Compare commits
3 commits
master
...
patch-queu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8e5a10bf4f | ||
|
|
fa3dae9fa4 | ||
|
|
a9a2e2bf68 |
5976 changed files with 157224 additions and 1454370 deletions
136
.gitignore
vendored
136
.gitignore
vendored
|
|
@ -1,13 +1,9 @@
|
|||
/build
|
||||
/src/mongo/db/modules
|
||||
/.jsdbshell
|
||||
/.dbshell
|
||||
/.sconsign.dblite
|
||||
/.sconf_temp
|
||||
/perf.data
|
||||
/massif.out.*
|
||||
.scons
|
||||
/smoke-last.json
|
||||
.jsdbshell
|
||||
.dbshell
|
||||
.sconsign.dblite
|
||||
.sconf_temp
|
||||
perf.data
|
||||
massif.out.*
|
||||
|
||||
*~
|
||||
*.swp
|
||||
|
|
@ -21,6 +17,7 @@
|
|||
*.ncb
|
||||
*.idb
|
||||
*.obj
|
||||
*/*.obj
|
||||
*.opt
|
||||
*.pch
|
||||
*.jsh
|
||||
|
|
@ -39,26 +36,23 @@
|
|||
*.psess
|
||||
*#
|
||||
.#*
|
||||
shell/mongo.cpp
|
||||
shell/mongo-server.cpp
|
||||
|
||||
/src/mongo/*/*Debug*/
|
||||
/src/mongo/*/*/*Debug*/
|
||||
/src/mongo/*/*Release*/
|
||||
/src/mongo/*/*/*Release*/
|
||||
/src/ipch
|
||||
/src/mongo/*/ipch
|
||||
/src/mongo/*/*/ipch
|
||||
/src/mongo/db/.gdb*
|
||||
/src/mongo/db/makefile.local
|
||||
/src/mongo/db/_ReSharper.db
|
||||
/src/third_party/*/*.cache
|
||||
/src/third_party/*/*.tlog
|
||||
/src/third_party/*/*.lastbuildstate
|
||||
*/Debug/
|
||||
*/*/Debug/
|
||||
*/Release/
|
||||
*/*/Release/
|
||||
*/ipch/
|
||||
*/*/ipch/
|
||||
db/.gdb*
|
||||
db/makefile.local
|
||||
db/_ReSharper.db
|
||||
config.log
|
||||
settings.py
|
||||
log_config.py
|
||||
buildinfo.cpp
|
||||
tags
|
||||
TAGS
|
||||
failfile.smoke
|
||||
|
||||
#temp dirs
|
||||
dump
|
||||
|
|
@ -71,52 +65,49 @@ docs/doxygen
|
|||
scratch
|
||||
|
||||
# binaries
|
||||
/mongo
|
||||
/mongod
|
||||
/mongogrid
|
||||
/mongos
|
||||
mongo
|
||||
mongod
|
||||
mongogrid
|
||||
mongos
|
||||
|
||||
/mongodump
|
||||
/mongorestore
|
||||
mongodump
|
||||
mongorestore
|
||||
|
||||
/mongofiles
|
||||
/mongoexport
|
||||
mongofiles
|
||||
mongoexport
|
||||
|
||||
/mongoimport
|
||||
/mongosniff
|
||||
/mongobridge
|
||||
/mongostat
|
||||
/mongotop
|
||||
/mongooplog
|
||||
/mongoperf
|
||||
/bsondump
|
||||
mongoimport
|
||||
mongosniff
|
||||
mongobridge
|
||||
mongostat
|
||||
mongotop
|
||||
bsondump
|
||||
|
||||
*.tgz
|
||||
*.zip
|
||||
*.tar.gz
|
||||
|
||||
#libs
|
||||
/libmongoclient.*
|
||||
/libmongotestfiles.*
|
||||
/libmongoshellfiles.*
|
||||
mongodb-*
|
||||
mongo-cxx-driver-*
|
||||
|
||||
/emr.jar
|
||||
*.class
|
||||
#libs
|
||||
libmongoclient.*
|
||||
libmongotestfiles.*
|
||||
libmongoshellfiles.*
|
||||
|
||||
# examples
|
||||
/firstExample
|
||||
/secondExample
|
||||
/whereExample
|
||||
/bsondemo
|
||||
/rsExample
|
||||
/tutorial
|
||||
firstExample
|
||||
secondExample
|
||||
whereExample
|
||||
bsondemo
|
||||
rsExample
|
||||
|
||||
#tests
|
||||
/test
|
||||
/authTest
|
||||
/perftest
|
||||
/clientTest
|
||||
/httpClientTest
|
||||
test
|
||||
authTest
|
||||
perftest
|
||||
clientTest
|
||||
httpClientTest
|
||||
|
||||
#debian
|
||||
build-stamp
|
||||
|
|
@ -127,7 +118,12 @@ debian/mongodb
|
|||
|
||||
#osx
|
||||
.DS_Store
|
||||
._.DS_Store
|
||||
|
||||
#third party
|
||||
third_party/js-1.7/jsautocfg.h
|
||||
third_party/js-1.7/jsautokw.h
|
||||
third_party/js-1.7/jskwgen
|
||||
third_party/js-1.7/jscpucfg
|
||||
|
||||
# QtCreator
|
||||
*.config
|
||||
|
|
@ -136,25 +132,3 @@ debian/mongodb
|
|||
*.files
|
||||
*.includes
|
||||
*.orig
|
||||
|
||||
#built by Visual Studio
|
||||
src/mongo/base/error_codes.cpp
|
||||
src/mongo/base/error_codes.h
|
||||
src/mongo/db/auth/action_type.cpp
|
||||
src/mongo/db/auth/action_type.h
|
||||
src/mongo/db/fts/stop_words_list.cpp
|
||||
src/mongo/db/fts/stop_words_list.h
|
||||
src/mongo/shell/mongo-server.cpp
|
||||
src/mongo/shell/mongo.cpp
|
||||
src/third_party/js-1.7/jsautocfg.h
|
||||
src/third_party/js-1.7/jsautokw.h
|
||||
src/third_party/v8/src/experimental-libraries.cc
|
||||
src/third_party/v8/src/libraries.cc
|
||||
|
||||
# old things that should be removed
|
||||
# maybe remove this mid 2012
|
||||
src/third_party/js-1.7/jscpucfg
|
||||
src/third_party/js-1.7/jskwgen
|
||||
src/mongo/buildinfo.cpp
|
||||
buildinfo.cpp
|
||||
/.settings/
|
||||
|
|
|
|||
|
|
@ -1,77 +0,0 @@
|
|||
Contributing to the MongoDB project
|
||||
===================================
|
||||
|
||||
Pull requests are always welcome, and the MongoDB dev team appreciates any help the community can
|
||||
give to help make MongoDB better.
|
||||
|
||||
For any particular improvement you want to make, you can begin a discussion on the
|
||||
`MongoDB Developers Forum`_. This is the best place discuss your proposed improvement (and its
|
||||
implementation) with the core development team.
|
||||
|
||||
.. _MongoDB Developers Forum: https://groups.google.com/forum/?fromgroups#!forum/mongodb-dev
|
||||
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
- Create a `MongoDB JIRA account`_.
|
||||
- Create a `Github account`_.
|
||||
- Fork the repository on Github at https://github.com/mongodb/mongo.
|
||||
|
||||
.. _MongoDB JIRA account: https://jira.mongodb.org/secure/Signup!default.jspa
|
||||
.. _Github account: https://github.com/signup/free
|
||||
|
||||
|
||||
JIRA Tickets
|
||||
------------
|
||||
|
||||
All commits to the MongoDB repository must reference an issue in the `SERVER project`_ of the
|
||||
MongoDB JIRA. Before creating any new tickets, please search the existing backlog for any open
|
||||
tickets that represent your change request. If there is not one, then you should create a new
|
||||
ticket.
|
||||
|
||||
For bugs, please clearly describe the issue you are resolving, including the platforms on which
|
||||
the issue is present and clear steps to reproduce.
|
||||
|
||||
For improvements or feature requests, be sure to explain the goal or use case and the approach
|
||||
your solution will take.
|
||||
|
||||
.. _SERVER project: https://jira.mongodb.org/browse/SERVER
|
||||
|
||||
|
||||
Style Guide
|
||||
-----------
|
||||
|
||||
All commits to the MongoDB repository must follow the `kernel development rules`_.
|
||||
|
||||
In particular, all code must follow the MongoDB `kernel code style guidelines`_. For anything
|
||||
not covered in this document you should default to the `Google CPP Style Guide`_ and the
|
||||
`Google JavaScript Style Guide`_.
|
||||
|
||||
Your commit message should also be prefaced with the relevant JIRA ticket, e.g. "SERVER-XXX Fixed
|
||||
a bug in aggregation".
|
||||
|
||||
.. _kernel development rules: http://dochub.mongodb.org/core/kernelcodedevelopmentrules
|
||||
.. _Kernel Code Style guidelines: http://dochub.mongodb.org/core/kernelcodestyle
|
||||
.. _Google CPP Style Guide: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
|
||||
.. _Google JavaScript Style Guide: http://google-styleguide.googlecode.com/svn/trunk/javascriptguide.xml
|
||||
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
Every non-trivial change to the code base should be accompanied by a relevant addition to or
|
||||
modification of the test suite. If you don't believe this is necessary, please add an explanation
|
||||
in the JIRA ticket why no such changes are either needed or possible.
|
||||
|
||||
All changes must also pass the full test suite (including your test additions/changes) on your
|
||||
local machine before you open a pull request.
|
||||
|
||||
|
||||
Contributor Agreement
|
||||
---------------------
|
||||
|
||||
A patch will only be considered for merging into the upstream codebase after you have signed the
|
||||
`contributor agreement`_.
|
||||
|
||||
.. _contributor agreement: http://www.10gen.com/contributor
|
||||
2
README
2
README
|
|
@ -47,7 +47,7 @@ DOCUMENTATION
|
|||
|
||||
MAIL LISTS AND IRC
|
||||
|
||||
http://dochub.mongodb.org/core/community
|
||||
http://www.mongodb.org/display/DOCS/Community
|
||||
|
||||
32 BIT BUILD NOTES
|
||||
|
||||
|
|
|
|||
|
|
@ -1,66 +0,0 @@
|
|||
# -*- mode: python; -*-
|
||||
|
||||
# This SConscript describes construction of buildinfo.cpp, which is independent of the
|
||||
# build variant's target.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import buildscripts.utils
|
||||
|
||||
Import('env windows usev8 usesm')
|
||||
|
||||
def getSysInfo():
|
||||
if windows:
|
||||
return "windows " + str( sys.getwindowsversion() )
|
||||
else:
|
||||
return " ".join( os.uname() )
|
||||
|
||||
buildinfo_filename = '#build/buildinfo.cpp'
|
||||
|
||||
buildinfo_template = '''
|
||||
#include <string>
|
||||
#include <boost/version.hpp>
|
||||
|
||||
#include "mongo/util/version.h"
|
||||
|
||||
namespace mongo {
|
||||
const char * gitVersion() { return "%(git_version)s"; }
|
||||
const char * compiledJSEngine() { return "%(js_engine)s"; }
|
||||
const char * allocator() { return "%(allocator)s"; }
|
||||
const char * loaderFlags() { return "%(loader_flags)s"; }
|
||||
const char * compilerFlags() { return "%(compiler_flags)s"; }
|
||||
std::string sysInfo() { return "%(sys_info)s BOOST_LIB_VERSION=" BOOST_LIB_VERSION ; }
|
||||
} // namespace mongo
|
||||
'''
|
||||
|
||||
def generate_buildinfo(env, target, source, **kw):
|
||||
git_version = buildscripts.utils.getGitVersion()
|
||||
if len(env["MONGO_MODULES"]):
|
||||
git_version += " modules: " + ", ".join(env["MONGO_MODULES"])
|
||||
|
||||
if usev8:
|
||||
js_engine = "V8"
|
||||
elif usesm:
|
||||
js_engine = "SpiderMonkey"
|
||||
else:
|
||||
js_engine = "Unknown"
|
||||
|
||||
contents = str(source[0]) % dict(git_version=git_version,
|
||||
js_engine=js_engine,
|
||||
sys_info=getSysInfo(),
|
||||
allocator=GetOption('allocator'),
|
||||
loader_flags=env.subst('$LINKFLAGS $LDFLAGS',
|
||||
source=source, target=target),
|
||||
compiler_flags=env.subst('$CXXFLAGS $CCFLAGS $CFLAGS',
|
||||
source=source, target=target))
|
||||
out = open(str(target[0]), 'wb')
|
||||
try:
|
||||
out.write(contents)
|
||||
finally:
|
||||
out.close()
|
||||
|
||||
env.Command(buildinfo_filename, Value(buildinfo_template), generate_buildinfo)
|
||||
env.AlwaysBuild(buildinfo_filename)
|
||||
env.Install('$BUILD_DIR/mongo', buildinfo_filename)
|
||||
env.Install('$BUILD_DIR/client_build/mongo', buildinfo_filename)
|
||||
143
SConscript.smoke
143
SConscript.smoke
|
|
@ -1,143 +0,0 @@
|
|||
# -*- mode: python -*-
|
||||
#
|
||||
# This SConscript file describes the build rules for smoke tests (scons smoke,
|
||||
# e.g.)
|
||||
|
||||
import os
|
||||
from buildscripts import utils
|
||||
|
||||
Import( "has_option env shellEnv testEnv" )
|
||||
|
||||
def add_exe( v ):
|
||||
return "${PROGPREFIX}%s${PROGSUFFIX}" % v
|
||||
|
||||
smokeEnv = testEnv.Clone()
|
||||
smokeEnv['ENV']['PATH']=os.environ['PATH']
|
||||
|
||||
# copy in any envrionment variables beginning with MONGO_; these
|
||||
# are used by buildscripts/buildlogger.py
|
||||
for name, value in os.environ.items():
|
||||
if name.startswith('MONGO_'):
|
||||
smokeEnv['ENV'][name] = value
|
||||
|
||||
smokeEnv.Alias( "dummySmokeSideEffect", [], [] )
|
||||
|
||||
smokeFlags = []
|
||||
|
||||
# Ugh. Frobbing the smokeFlags must precede using them to construct
|
||||
# actions, I think.
|
||||
if has_option( 'smokedbprefix'):
|
||||
smokeFlags += ['--smoke-db-prefix', GetOption( 'smokedbprefix')]
|
||||
|
||||
if 'startMongodSmallOplog' in COMMAND_LINE_TARGETS:
|
||||
smokeFlags += ["--small-oplog"]
|
||||
|
||||
if has_option('smokeauth'):
|
||||
smokeFlags += ['--auth']
|
||||
|
||||
def addTest(name, deps, actions):
|
||||
smokeEnv.Alias( name, deps, actions )
|
||||
smokeEnv.AlwaysBuild( name )
|
||||
# Prevent smoke tests from running in parallel
|
||||
smokeEnv.SideEffect( "dummySmokeSideEffect", name )
|
||||
|
||||
def addSmoketest( name, deps, extraSmokeArgs=[] ):
|
||||
# Convert from smoke to test, smokeJs to js, and foo to foo
|
||||
target = name
|
||||
if name.startswith("smoke"):
|
||||
if name == "smoke":
|
||||
target = File("test").path
|
||||
else:
|
||||
target = name[5].lower() + name[6:]
|
||||
|
||||
smokeArgs = smokeFlags + [target] + extraSmokeArgs
|
||||
addTest(name, deps, utils.run_smoke_command(*smokeArgs))
|
||||
|
||||
def addSmokeSuite( name, suitefile, needMongod=False ):
|
||||
# Add a smoketest target which invokes smoke.py with
|
||||
# --from-file, and passes the named suitefile as the
|
||||
# command line argument.
|
||||
|
||||
# resolve an initial # in the suitefile
|
||||
suitefile = str(env.File(suitefile))
|
||||
|
||||
smoke_args = ['--mode', 'files', '--from-file', suitefile]
|
||||
if not needMongod:
|
||||
smoke_args.append('--dont-start-mongod')
|
||||
addTest(name, [suitefile], utils.run_smoke_command(*smoke_args))
|
||||
|
||||
addSmoketest( "smoke", [ add_exe( "test" ), add_exe( "mongod" ), add_exe( "mongo" ) ] )
|
||||
addSmoketest( "smokePerf", [ add_exe("perftest") ] )
|
||||
addSmoketest( "smokeClient", [
|
||||
add_exe('firstExample'),
|
||||
add_exe('rsExample'),
|
||||
add_exe('secondExample'),
|
||||
add_exe('whereExample'),
|
||||
add_exe('authTest'),
|
||||
add_exe('httpClientTest'),
|
||||
add_exe('bsondemo'),
|
||||
add_exe('clientTest'),
|
||||
] )
|
||||
addSmoketest( "mongosTest", [ add_exe( 'mongos' ) ])
|
||||
addSmokeSuite( "smokeCppUnittests", "$UNITTEST_LIST" )
|
||||
addSmokeSuite( "smokeModuleTests", "$MODULETEST_LIST" )
|
||||
|
||||
# These tests require the mongo shell
|
||||
if shellEnv is not None:
|
||||
addSmoketest( "smokeJs", [add_exe("mongo"), add_exe("mongod")] )
|
||||
addSmoketest( "smokeClone", [ add_exe("mongo"), add_exe("mongod") ] )
|
||||
addSmoketest( "smokeRepl", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongobridge") ] )
|
||||
addSmoketest( "smokeReplSets", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongobridge") ] )
|
||||
addSmoketest( "smokeDur", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe('mongorestore') ] )
|
||||
addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongodump" ), add_exe( "mongorestore" ) ] )
|
||||
addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
|
||||
addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
|
||||
addSmoketest( "smokeSharding", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongos"), add_exe('mongofiles') ] )
|
||||
addSmoketest( "smokeJsPerf", [ add_exe("mongo"), add_exe("mongod") ] )
|
||||
addSmoketest( "smokeJsSlowNightly", [add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ])
|
||||
addSmoketest( "smokeJsSlowWeekly", [add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ])
|
||||
addSmoketest( "smokeQuota", [ add_exe("mongo"), add_exe("mongod") ] )
|
||||
addSmoketest( "smokeTool", [ add_exe( "mongo" ), add_exe("mongod"), add_exe("mongos"), "tools" ] )
|
||||
addSmoketest( "smokeAggregation", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongos" ) ] )
|
||||
addSmoketest( "smokeMultiVersion", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongos" ) ] )
|
||||
addSmoketest( "smokeFailPoint", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongos" ) ] )
|
||||
addSmoketest( "smokeSsl", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ], ['--use-ssl'] )
|
||||
|
||||
addSmoketest( "smokeFailingTests", [ add_exe( "mongo" ), add_exe( "mongod" ) ], ['--only-old-fails', '--continue-on-failure'] )
|
||||
addSmoketest( "smokeResetFails", [ add_exe( "mongo" ), add_exe( "mongod" ) ], ['--reset-old-fails'] )
|
||||
|
||||
smokeEnv.Alias( "startMongodSmallOplog", [add_exe("mongod")], [] );
|
||||
smokeEnv.AlwaysBuild( "startMongodSmallOplog" );
|
||||
smokeEnv.SideEffect( "dummySmokeSideEffect", "startMongodSmallOplog" )
|
||||
|
||||
def addMongodReqTargets( env, target, source ):
|
||||
mongodReqTargets = [ "smokeClient", "smokeJs" ]
|
||||
for target in mongodReqTargets:
|
||||
smokeEnv.Depends( target, "startMongod" )
|
||||
smokeEnv.Depends( "smokeAll", target )
|
||||
|
||||
smokeEnv.Alias( "addMongodReqTargets", [], [addMongodReqTargets] )
|
||||
smokeEnv.AlwaysBuild( "addMongodReqTargets" )
|
||||
|
||||
smokeEnv.Alias( "smokeAll", [ "smoke", "mongosTest", "smokeClone", "smokeRepl", "addMongodReqTargets", "smokeDisk", "smokeAuth", "smokeSharding", "smokeTool" ] )
|
||||
smokeEnv.AlwaysBuild( "smokeAll" )
|
||||
|
||||
def addMongodReqNoJsTargets( env, target, source ):
|
||||
mongodReqTargets = [ "smokeClient" ]
|
||||
for target in mongodReqTargets:
|
||||
smokeEnv.Depends( target, "startMongod" )
|
||||
smokeEnv.Depends( "smokeAllNoJs", target )
|
||||
|
||||
smokeEnv.Alias( "addMongodReqNoJsTargets", [], [addMongodReqNoJsTargets] )
|
||||
smokeEnv.AlwaysBuild( "addMongodReqNoJsTargets" )
|
||||
|
||||
smokeEnv.Alias( "smokeAllNoJs", [ "smoke", "mongosTest", "addMongodReqNoJsTargets" ] )
|
||||
smokeEnv.AlwaysBuild( "smokeAllNoJs" )
|
||||
|
||||
def run_shell_tests(env, target, source):
|
||||
from buildscripts import test_shell
|
||||
test_shell.mongo_path = windows and "mongo.exe" or "mongo"
|
||||
test_shell.run_tests()
|
||||
|
||||
env.Alias("test_shell", [], [run_shell_tests])
|
||||
env.AlwaysBuild("test_shell")
|
||||
1480
SConstruct
1480
SConstruct
File diff suppressed because it is too large
Load diff
|
|
@ -23,7 +23,6 @@
|
|||
#include <map>
|
||||
#include <limits>
|
||||
|
||||
|
||||
#if defined(_WIN32)
|
||||
#undef max
|
||||
#undef min
|
||||
|
|
@ -31,6 +30,10 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
inline bool isNaN(double d) {
|
||||
return d != d;
|
||||
}
|
||||
|
||||
/* must be same type when called, unless both sides are #s
|
||||
this large function is in header to facilitate inline-only use of bson
|
||||
*/
|
||||
|
|
@ -101,7 +104,7 @@ dodouble:
|
|||
// we use memcmp as we allow zeros in UTF8 strings
|
||||
int lsz = l.valuestrsize();
|
||||
int rsz = r.valuestrsize();
|
||||
int common = std::min(lsz, rsz);
|
||||
int common = min(lsz, rsz);
|
||||
int res = memcmp(l.valuestr(), r.valuestr(), common);
|
||||
if( res )
|
||||
return res;
|
||||
|
|
@ -121,7 +124,7 @@ dodouble:
|
|||
int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
|
||||
int rsz = r.objsize();
|
||||
if ( lsz - rsz != 0 ) return lsz - rsz;
|
||||
return memcmp(l.value()+4, r.value()+4, lsz+1 /*+1 for subtype byte*/);
|
||||
return memcmp(l.value()+4, r.value()+4, lsz+1);
|
||||
}
|
||||
case RegEx: {
|
||||
int c = strcmp(l.regex(), r.regex());
|
||||
|
|
@ -136,20 +139,18 @@ dodouble:
|
|||
f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
|
||||
if ( f )
|
||||
return f;
|
||||
f = strcmp( l.codeWScopeScopeDataUnsafe() , r.codeWScopeScopeDataUnsafe() );
|
||||
f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
|
||||
if ( f )
|
||||
return f;
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
verify( false);
|
||||
assert( false);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* wo = "well ordered"
|
||||
note: (mongodb related) : this can only change in behavior when index version # changes
|
||||
*/
|
||||
/* wo = "well ordered" */
|
||||
inline int BSONElement::woCompare( const BSONElement &e,
|
||||
bool considerFieldName ) const {
|
||||
int lt = (int) canonicalType();
|
||||
|
|
@ -173,19 +174,19 @@ dodouble:
|
|||
inline BSONObj BSONElement::embeddedObjectUserCheck() const {
|
||||
if ( MONGO_likely(isABSONObj()) )
|
||||
return BSONObj(value());
|
||||
std::stringstream ss;
|
||||
stringstream ss;
|
||||
ss << "invalid parameter: expected an object (" << fieldName() << ")";
|
||||
uasserted( 10065 , ss.str() );
|
||||
return BSONObj(); // never reachable
|
||||
}
|
||||
|
||||
inline BSONObj BSONElement::embeddedObject() const {
|
||||
verify( isABSONObj() );
|
||||
assert( isABSONObj() );
|
||||
return BSONObj(value());
|
||||
}
|
||||
|
||||
inline BSONObj BSONElement::codeWScopeObject() const {
|
||||
verify( type() == CodeWScope );
|
||||
assert( type() == CodeWScope );
|
||||
int strSizeWNull = *(int *)( value() + 4 );
|
||||
return BSONObj( value() + 4 + 4 + strSizeWNull );
|
||||
}
|
||||
|
|
@ -208,12 +209,10 @@ dodouble:
|
|||
inline NOINLINE_DECL void BSONObj::_assertInvalid() const {
|
||||
StringBuilder ss;
|
||||
int os = objsize();
|
||||
ss << "BSONObj size: " << os << " (0x" << toHex( &os, 4 ) << ") is invalid. "
|
||||
<< "Size must be between 0 and " << BSONObjMaxInternalSize
|
||||
<< "(" << ( BSONObjMaxInternalSize/(1024*1024) ) << "MB)";
|
||||
ss << "Invalid BSONObj size: " << os << " (0x" << toHex( &os, 4 ) << ')';
|
||||
try {
|
||||
BSONElement e = firstElement();
|
||||
ss << " First element: " << e.toString();
|
||||
ss << " first element: " << e.toString();
|
||||
}
|
||||
catch ( ... ) { }
|
||||
massert( 10334 , ss.str() , 0 );
|
||||
|
|
@ -242,8 +241,8 @@ dodouble:
|
|||
return b.obj();
|
||||
}
|
||||
|
||||
inline BSONObj BSONElement::wrap( const StringData& newName ) const {
|
||||
BSONObjBuilder b(size() + 6 + newName.size());
|
||||
inline BSONObj BSONElement::wrap( const char * newName ) const {
|
||||
BSONObjBuilder b(size()+6+(int)strlen(newName));
|
||||
b.appendAs(*this,newName);
|
||||
return b.obj();
|
||||
}
|
||||
|
|
@ -266,7 +265,7 @@ dodouble:
|
|||
BSONObjIterator i(*this);
|
||||
while ( i.more() ) {
|
||||
BSONElement e = i.next();
|
||||
if ( name == e.fieldName() )
|
||||
if ( strcmp(e.fieldName(), name.data()) == 0 )
|
||||
return e;
|
||||
}
|
||||
return BSONElement();
|
||||
|
|
@ -300,7 +299,7 @@ dodouble:
|
|||
|
||||
/* add all the fields from the object specified to this object if they don't exist */
|
||||
inline BSONObjBuilder& BSONObjBuilder::appendElementsUnique(BSONObj x) {
|
||||
std::set<std::string> have;
|
||||
set<string> have;
|
||||
{
|
||||
BSONObjIterator i = iterator();
|
||||
while ( i.more() )
|
||||
|
|
@ -333,41 +332,29 @@ dodouble:
|
|||
}
|
||||
|
||||
inline BSONObjBuilderValueStream::BSONObjBuilderValueStream( BSONObjBuilder * builder ) {
|
||||
_fieldName = 0;
|
||||
_builder = builder;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( T value ) {
|
||||
_builder->append(_fieldName, value);
|
||||
_fieldName = StringData();
|
||||
_fieldName = 0;
|
||||
return *_builder;
|
||||
}
|
||||
|
||||
inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const BSONElement& e ) {
|
||||
_builder->appendAs( e , _fieldName );
|
||||
_fieldName = StringData();
|
||||
_fieldName = 0;
|
||||
return *_builder;
|
||||
}
|
||||
|
||||
inline BufBuilder& BSONObjBuilderValueStream::subobjStart() {
|
||||
StringData tmp = _fieldName;
|
||||
_fieldName = StringData();
|
||||
return _builder->subobjStart(tmp);
|
||||
}
|
||||
|
||||
inline BufBuilder& BSONObjBuilderValueStream::subarrayStart() {
|
||||
StringData tmp = _fieldName;
|
||||
_fieldName = StringData();
|
||||
return _builder->subarrayStart(tmp);
|
||||
}
|
||||
|
||||
inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::Label &l ) {
|
||||
return Labeler( l, this );
|
||||
}
|
||||
|
||||
inline void BSONObjBuilderValueStream::endField( const StringData& nextFieldName ) {
|
||||
if ( haveSubobj() ) {
|
||||
verify( _fieldName.rawData() );
|
||||
inline void BSONObjBuilderValueStream::endField( const char *nextFieldName ) {
|
||||
if ( _fieldName && haveSubobj() ) {
|
||||
_builder->append( _fieldName, subobj()->done() );
|
||||
}
|
||||
_subobj.reset();
|
||||
|
|
@ -393,7 +380,7 @@ dodouble:
|
|||
}
|
||||
|
||||
// {a: {b:1}} -> {a.b:1}
|
||||
void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const std::string& base="");
|
||||
void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base="");
|
||||
inline BSONObj nested2dotted(const BSONObj& obj) {
|
||||
BSONObjBuilder b;
|
||||
nested2dotted(b, obj);
|
||||
|
|
@ -417,7 +404,7 @@ dodouble:
|
|||
inline bool BSONObjBuilder::hasField( const StringData& name ) const {
|
||||
BSONObjIterator i = iterator();
|
||||
while ( i.more() )
|
||||
if ( name == i.next().fieldName() )
|
||||
if ( strcmp( name.data() , i.next().fieldName() ) == 0 )
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -427,7 +414,7 @@ dodouble:
|
|||
* also, dotted2nested ignores order
|
||||
*/
|
||||
|
||||
typedef std::map<std::string, BSONElement> BSONMap;
|
||||
typedef map<string, BSONElement> BSONMap;
|
||||
inline BSONMap bson2map(const BSONObj& obj) {
|
||||
BSONMap m;
|
||||
BSONObjIterator it(obj);
|
||||
|
|
@ -444,7 +431,7 @@ dodouble:
|
|||
}
|
||||
};
|
||||
|
||||
typedef std::set<BSONElement, BSONElementFieldNameCmp> BSONSortedElements;
|
||||
typedef set<BSONElement, BSONElementFieldNameCmp> BSONSortedElements;
|
||||
inline BSONSortedElements bson2set( const BSONObj& obj ) {
|
||||
BSONSortedElements s;
|
||||
BSONObjIterator it(obj);
|
||||
|
|
@ -453,13 +440,13 @@ dodouble:
|
|||
return s;
|
||||
}
|
||||
|
||||
inline std::string BSONObj::toString( bool isArray, bool full ) const {
|
||||
inline string BSONObj::toString( bool isArray, bool full ) const {
|
||||
if ( isEmpty() ) return "{}";
|
||||
StringBuilder s;
|
||||
toString(s, isArray, full);
|
||||
return s.str();
|
||||
}
|
||||
inline void BSONObj::toString( StringBuilder& s, bool isArray, bool full, int depth ) const {
|
||||
inline void BSONObj::toString(StringBuilder& s, bool isArray, bool full ) const {
|
||||
if ( isEmpty() ) {
|
||||
s << "{}";
|
||||
return;
|
||||
|
|
@ -486,7 +473,7 @@ dodouble:
|
|||
first = false;
|
||||
else
|
||||
s << ", ";
|
||||
e.toString( s, !isArray, full, depth );
|
||||
e.toString(s, !isArray, full );
|
||||
}
|
||||
s << ( isArray ? " ]" : " }" );
|
||||
}
|
||||
|
|
@ -593,7 +580,7 @@ dodouble:
|
|||
len2 = strlen( p );
|
||||
else {
|
||||
size_t x = remain - len1 - 1;
|
||||
verify( x <= 0x7fffffff );
|
||||
assert( x <= 0x7fffffff );
|
||||
len2 = mongo::strnlen( p, (int) x );
|
||||
}
|
||||
//massert( 10319 , "Invalid regex options string", len2 != -1 ); // ERH - 4/28/10 - don't think this does anything
|
||||
|
|
@ -603,7 +590,7 @@ dodouble:
|
|||
default: {
|
||||
StringBuilder ss;
|
||||
ss << "BSONElement: bad type " << (int) type();
|
||||
std::string msg = ss.str();
|
||||
string msg = ss.str();
|
||||
massert( 13655 , msg.c_str(),false);
|
||||
}
|
||||
}
|
||||
|
|
@ -669,7 +656,7 @@ dodouble:
|
|||
{
|
||||
StringBuilder ss;
|
||||
ss << "BSONElement: bad type " << (int) type();
|
||||
std::string msg = ss.str();
|
||||
string msg = ss.str();
|
||||
massert(10320 , msg.c_str(),false);
|
||||
}
|
||||
}
|
||||
|
|
@ -678,25 +665,12 @@ dodouble:
|
|||
return totalSize;
|
||||
}
|
||||
|
||||
inline std::string BSONElement::toString( bool includeFieldName, bool full ) const {
|
||||
inline string BSONElement::toString( bool includeFieldName, bool full ) const {
|
||||
StringBuilder s;
|
||||
toString(s, includeFieldName, full);
|
||||
return s.str();
|
||||
}
|
||||
inline void BSONElement::toString( StringBuilder& s, bool includeFieldName, bool full, int depth ) const {
|
||||
|
||||
if ( depth > BSONObj::maxToStringRecursionDepth ) {
|
||||
// check if we want the full/complete string
|
||||
if ( full ) {
|
||||
StringBuilder s;
|
||||
s << "Reached maximum recursion depth of ";
|
||||
s << BSONObj::maxToStringRecursionDepth;
|
||||
uassert(16150, s.str(), full != true);
|
||||
}
|
||||
s << "...";
|
||||
return;
|
||||
}
|
||||
|
||||
inline void BSONElement::toString(StringBuilder& s, bool includeFieldName, bool full ) const {
|
||||
if ( includeFieldName && type() != EOO )
|
||||
s << fieldName() << ": ";
|
||||
switch ( type() ) {
|
||||
|
|
@ -725,10 +699,10 @@ dodouble:
|
|||
s << ( boolean() ? "true" : "false" );
|
||||
break;
|
||||
case Object:
|
||||
embeddedObject().toString(s, false, full, depth+1);
|
||||
embeddedObject().toString(s, false, full);
|
||||
break;
|
||||
case mongo::Array:
|
||||
embeddedObject().toString(s, true, full, depth+1);
|
||||
embeddedObject().toString(s, true, full);
|
||||
break;
|
||||
case Undefined:
|
||||
s << "undefined";
|
||||
|
|
@ -803,7 +777,7 @@ dodouble:
|
|||
if ( e.eoo() ) {
|
||||
const char *p = strchr(name, '.');
|
||||
if ( p ) {
|
||||
std::string left(name, p-name);
|
||||
string left(name, p-name);
|
||||
BSONObj sub = getObjectField(left.c_str());
|
||||
return sub.isEmpty() ? BSONElement() : sub.getFieldDotted(p+1);
|
||||
}
|
||||
|
|
@ -841,25 +815,25 @@ dodouble:
|
|||
|
||||
inline BSONObj BSONElement::Obj() const { return embeddedObjectUserCheck(); }
|
||||
|
||||
inline BSONElement BSONElement::operator[] (const std::string& field) const {
|
||||
inline BSONElement BSONElement::operator[] (const string& field) const {
|
||||
BSONObj o = Obj();
|
||||
return o[field];
|
||||
}
|
||||
|
||||
inline void BSONObj::elems(std::vector<BSONElement> &v) const {
|
||||
inline void BSONObj::elems(vector<BSONElement> &v) const {
|
||||
BSONObjIterator i(*this);
|
||||
while( i.more() )
|
||||
v.push_back(i.next());
|
||||
}
|
||||
|
||||
inline void BSONObj::elems(std::list<BSONElement> &v) const {
|
||||
inline void BSONObj::elems(list<BSONElement> &v) const {
|
||||
BSONObjIterator i(*this);
|
||||
while( i.more() )
|
||||
v.push_back(i.next());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void BSONObj::Vals(std::vector<T>& v) const {
|
||||
void BSONObj::Vals(vector<T>& v) const {
|
||||
BSONObjIterator i(*this);
|
||||
while( i.more() ) {
|
||||
T t;
|
||||
|
|
@ -868,7 +842,7 @@ dodouble:
|
|||
}
|
||||
}
|
||||
template <class T>
|
||||
void BSONObj::Vals(std::list<T>& v) const {
|
||||
void BSONObj::Vals(list<T>& v) const {
|
||||
BSONObjIterator i(*this);
|
||||
while( i.more() ) {
|
||||
T t;
|
||||
|
|
@ -878,7 +852,7 @@ dodouble:
|
|||
}
|
||||
|
||||
template <class T>
|
||||
void BSONObj::vals(std::vector<T>& v) const {
|
||||
void BSONObj::vals(vector<T>& v) const {
|
||||
BSONObjIterator i(*this);
|
||||
while( i.more() ) {
|
||||
try {
|
||||
|
|
@ -890,7 +864,7 @@ dodouble:
|
|||
}
|
||||
}
|
||||
template <class T>
|
||||
void BSONObj::vals(std::list<T>& v) const {
|
||||
void BSONObj::vals(list<T>& v) const {
|
||||
BSONObjIterator i(*this);
|
||||
while( i.more() ) {
|
||||
try {
|
||||
|
|
@ -902,11 +876,11 @@ dodouble:
|
|||
}
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<( std::ostream &s, const BSONObj &o ) {
|
||||
inline ostream& operator<<( ostream &s, const BSONObj &o ) {
|
||||
return s << o.toString();
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<( std::ostream &s, const BSONElement &e ) {
|
||||
inline ostream& operator<<( ostream &s, const BSONElement &e ) {
|
||||
return s << e.toString();
|
||||
}
|
||||
|
||||
|
|
@ -930,9 +904,9 @@ dodouble:
|
|||
}
|
||||
|
||||
// used by jsonString()
|
||||
inline std::string escape( const std::string& s , bool escape_slash=false) {
|
||||
inline string escape( string s , bool escape_slash=false) {
|
||||
StringBuilder ret;
|
||||
for ( std::string::const_iterator i = s.begin(); i != s.end(); ++i ) {
|
||||
for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
|
||||
switch ( *i ) {
|
||||
case '"':
|
||||
ret << "\\\"";
|
||||
|
|
@ -972,14 +946,14 @@ dodouble:
|
|||
return ret.str();
|
||||
}
|
||||
|
||||
inline std::string BSONObj::hexDump() const {
|
||||
std::stringstream ss;
|
||||
inline string BSONObj::hexDump() const {
|
||||
stringstream ss;
|
||||
const char *d = objdata();
|
||||
int size = objsize();
|
||||
for( int i = 0; i < size; ++i ) {
|
||||
ss.width( 2 );
|
||||
ss.fill( '0' );
|
||||
ss << std::hex << (unsigned)(unsigned char)( d[ i ] ) << std::dec;
|
||||
ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec;
|
||||
if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) )
|
||||
ss << '\'' << d[ i ] << '\'';
|
||||
if ( i != size - 1 )
|
||||
|
|
@ -996,66 +970,19 @@ dodouble:
|
|||
appendAs( j.next() , i.next().fieldName() );
|
||||
}
|
||||
|
||||
verify( ! i.more() );
|
||||
verify( ! j.more() );
|
||||
assert( ! i.more() );
|
||||
assert( ! j.more() );
|
||||
}
|
||||
|
||||
inline BSONObj BSONObj::removeField(const StringData& name) const {
|
||||
inline BSONObj BSONObj::removeField(const StringData& name) const {
|
||||
BSONObjBuilder b;
|
||||
BSONObjIterator i(*this);
|
||||
while ( i.more() ) {
|
||||
BSONElement e = i.next();
|
||||
const char *fname = e.fieldName();
|
||||
if ( name != fname )
|
||||
if( strcmp(name.data(), fname) )
|
||||
b.append(e);
|
||||
}
|
||||
return b.obj();
|
||||
}
|
||||
|
||||
template<typename T> bool BSONObj::coerceVector( std::vector<T>* out ) const {
|
||||
BSONObjIterator i( *this );
|
||||
while ( i.more() ) {
|
||||
BSONElement e = i.next();
|
||||
T t;
|
||||
if ( ! e.coerce<T>( &t ) )
|
||||
return false;
|
||||
out->push_back( t );
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template<> inline bool BSONElement::coerce<std::string>( std::string* out ) const {
|
||||
if ( type() != mongo::String )
|
||||
return false;
|
||||
*out = String();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<> inline bool BSONElement::coerce<int>( int* out ) const {
|
||||
if ( !isNumber() )
|
||||
return false;
|
||||
*out = numberInt();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<> inline bool BSONElement::coerce<double>( double* out ) const {
|
||||
if ( !isNumber() )
|
||||
return false;
|
||||
*out = numberDouble();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<> inline bool BSONElement::coerce<bool>( bool* out ) const {
|
||||
*out = trueValue();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<> inline bool BSONElement::coerce< std::vector<std::string> >( std::vector<std::string>* out ) const {
|
||||
if ( type() != mongo::Array )
|
||||
return false;
|
||||
return Obj().coerceVector<std::string>( out );
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
110
bson/bson.h
Normal file
110
bson/bson.h
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
/** @file bson.h
|
||||
|
||||
Main bson include file for mongodb c++ clients. MongoDB includes ../db/jsobj.h instead.
|
||||
This file, however, pulls in much less code / dependencies.
|
||||
|
||||
@see bsondemo
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
Main include file for C++ BSON module when using standalone (sans MongoDB client).
|
||||
|
||||
"BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
|
||||
represented in JSON (plus a few extensions useful for databases & other languages).
|
||||
|
||||
http://www.bsonspec.org/
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#if defined(MONGO_EXPOSE_MACROS)
|
||||
#error this header is for client programs, not the mongo database itself. include jsobj.h instead.
|
||||
/* because we define simplistic assert helpers here that don't pull in a bunch of util -- so that
|
||||
BSON can be used header only.
|
||||
*/
|
||||
#endif
|
||||
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <boost/utility.hpp>
|
||||
|
||||
namespace bson {
|
||||
|
||||
using std::string;
|
||||
using std::stringstream;
|
||||
|
||||
class assertion : public std::exception {
|
||||
public:
|
||||
assertion( unsigned u , const string& s )
|
||||
: id( u ) , msg( s ) {
|
||||
stringstream ss;
|
||||
ss << "BsonAssertion id: " << u << " " << s;
|
||||
full = ss.str();
|
||||
}
|
||||
|
||||
virtual ~assertion() throw() {}
|
||||
|
||||
virtual const char* what() const throw() { return full.c_str(); }
|
||||
|
||||
unsigned id;
|
||||
string msg;
|
||||
string full;
|
||||
};
|
||||
}
|
||||
|
||||
namespace mongo {
|
||||
#if !defined(assert)
|
||||
inline void assert(bool expr) {
|
||||
if(!expr) {
|
||||
throw bson::assertion( 0 , "assertion failure in bson library" );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if !defined(uassert)
|
||||
inline void uasserted(unsigned msgid, std::string s) {
|
||||
throw bson::assertion( msgid , s );
|
||||
}
|
||||
|
||||
inline void uassert(unsigned msgid, std::string msg, bool expr) {
|
||||
if( !expr )
|
||||
uasserted( msgid , msg );
|
||||
}
|
||||
inline void msgasserted(int msgid, const char *msg) {
|
||||
throw bson::assertion( msgid , msg );
|
||||
}
|
||||
inline void msgasserted(int msgid, const std::string &msg) { msgasserted(msgid, msg.c_str()); }
|
||||
inline void massert(unsigned msgid, std::string msg, bool expr) {
|
||||
if(!expr) {
|
||||
std::cout << "assertion failure in bson library: " << msgid << ' ' << msg << std::endl;
|
||||
throw bson::assertion( msgid , msg );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "util/builder.h"
|
||||
#include "bsontypes.h"
|
||||
#include "oid.h"
|
||||
#include "bsonelement.h"
|
||||
#include "bsonobj.h"
|
||||
#include "bsonobjbuilder.h"
|
||||
#include "bsonobjiterator.h"
|
||||
#include "bson-inl.h"
|
||||
82
bson/bson_db.h
Normal file
82
bson/bson_db.h
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
/** @file bson_db.h
|
||||
|
||||
This file contains the implementation of BSON-related methods that are required
|
||||
by the MongoDB database server.
|
||||
|
||||
Normally, for standalone BSON usage, you do not want this file - it will tend to
|
||||
pull in some other files from the MongoDB project. Thus, bson.h (the main file
|
||||
one would use) does not include this file.
|
||||
*/
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../util/optime.h"
|
||||
#include "../util/time_support.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/**
|
||||
Timestamps are a special BSON datatype that is used internally for replication.
|
||||
Append a timestamp element to the object being ebuilt.
|
||||
@param time - in millis (but stored in seconds)
|
||||
*/
|
||||
inline BSONObjBuilder& BSONObjBuilder::appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc ) {
|
||||
OpTime t( (unsigned) (time / 1000) , inc );
|
||||
appendTimestamp( fieldName , t.asDate() );
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline OpTime BSONElement::_opTime() const {
|
||||
if( type() == mongo::Date || type() == Timestamp )
|
||||
return OpTime( *reinterpret_cast< const unsigned long long* >( value() ) );
|
||||
return OpTime();
|
||||
}
|
||||
|
||||
inline string BSONElement::_asCode() const {
|
||||
switch( type() ) {
|
||||
case mongo::String:
|
||||
case Code:
|
||||
return string(valuestr(), valuestrsize()-1);
|
||||
case CodeWScope:
|
||||
return string(codeWScopeCode(), *(int*)(valuestr())-1);
|
||||
default:
|
||||
log() << "can't convert type: " << (int)(type()) << " to code" << endl;
|
||||
}
|
||||
uassert( 10062 , "not code" , 0 );
|
||||
return "";
|
||||
}
|
||||
|
||||
inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(DateNowLabeler& id) {
|
||||
_builder->appendDate(_fieldName, jsTime());
|
||||
_fieldName = 0;
|
||||
return *_builder;
|
||||
}
|
||||
|
||||
inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MinKeyLabeler& id) {
|
||||
_builder->appendMinKey(_fieldName);
|
||||
_fieldName = 0;
|
||||
return *_builder;
|
||||
}
|
||||
|
||||
inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MaxKeyLabeler& id) {
|
||||
_builder->appendMaxKey(_fieldName);
|
||||
_fieldName = 0;
|
||||
return *_builder;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,4 +1,16 @@
|
|||
// @file bsondemo.cpp
|
||||
/** @file bsondemo.cpp
|
||||
|
||||
Example of use of BSON from C++.
|
||||
|
||||
Requires boost (headers only).
|
||||
Works headers only (the parts actually exercised herein that is - some functions require .cpp files).
|
||||
|
||||
To build and run:
|
||||
g++ -o bsondemo bsondemo.cpp
|
||||
./bsondemo
|
||||
|
||||
Windows: project files are available in this directory for bsondemo.cpp for use with Visual Studio.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2010 10gen Inc.
|
||||
|
|
@ -16,20 +28,6 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Example of use of BSON from C++. Example is solely BSON, no MongoDB involved.
|
||||
|
||||
Requires boost (headers only).
|
||||
Works as c++ "headers-only" (the parts actually exercised herein that is - some functions require .cpp files).
|
||||
|
||||
To build and run:
|
||||
# "../../.." is the directory mongo/src/
|
||||
g++ -o bsondemo -I ../../.. bsondemo.cpp
|
||||
./bsondemo
|
||||
|
||||
Windows: project files are available in this directory for bsondemo.cpp for use with Visual Studio.
|
||||
*/
|
||||
|
||||
#include "../bson.h"
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
193
bson/bsondemo/bsondemo.vcxproj
Normal file
193
bson/bsondemo/bsondemo.vcxproj
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|Win32">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|x64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{C9DB5EB7-81AA-4185-BAA1-DA035654402F}</ProjectGuid>
|
||||
<RootNamespace>bsondemo</RootNamespace>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup>
|
||||
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
|
||||
<OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
|
||||
<OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
|
||||
<IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
|
||||
<IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
|
||||
<LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
|
||||
<LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
|
||||
<OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
|
||||
<OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
|
||||
<IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
|
||||
<IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
|
||||
<LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
|
||||
<LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
|
||||
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
|
||||
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
|
||||
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
|
||||
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
|
||||
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
|
||||
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
|
||||
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
|
||||
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
|
||||
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
|
||||
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
|
||||
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
|
||||
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<MinimalRebuild>No</MinimalRebuild>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<DebugInformationFormat>EditAndContinue</DebugInformationFormat>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<TargetMachine>MachineX86</TargetMachine>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<MinimalRebuild>No</MinimalRebuild>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<SubSystem>Console</SubSystem>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
|
||||
<MinimalRebuild>No</MinimalRebuild>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<TargetMachine>MachineX86</TargetMachine>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
|
||||
<MinimalRebuild>No</MinimalRebuild>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="bsondemo.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\bson.h" />
|
||||
<ClInclude Include="..\bson_db.h" />
|
||||
<ClInclude Include="..\bsonelement.h" />
|
||||
<ClInclude Include="..\bsoninlines.h" />
|
||||
<ClInclude Include="..\bsonmisc.h" />
|
||||
<ClInclude Include="..\bsonobj.h" />
|
||||
<ClInclude Include="..\bsonobjbuilder.h" />
|
||||
<ClInclude Include="..\bsonobjiterator.h" />
|
||||
<ClInclude Include="..\bsontypes.h" />
|
||||
<ClInclude Include="..\oid.h" />
|
||||
<ClInclude Include="..\ordering.h" />
|
||||
<ClInclude Include="..\util\builder.h" />
|
||||
<ClInclude Include="..\util\misc.h" />
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup>
|
||||
<ClCompile Include="bsondemo.cpp" />
|
||||
|
|
@ -28,6 +28,12 @@
|
|||
<ClInclude Include="..\bsontypes.h">
|
||||
<Filter>bson</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\util\builder.h">
|
||||
<Filter>bson</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\util\misc.h">
|
||||
<Filter>bson</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\oid.h">
|
||||
<Filter>bson</Filter>
|
||||
</ClInclude>
|
||||
|
|
@ -37,19 +43,10 @@
|
|||
<ClInclude Include="..\bson.h">
|
||||
<Filter>bson</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\util\builder.h">
|
||||
<Filter>bson\util</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\util\misc.h">
|
||||
<Filter>bson\util</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Filter Include="bson">
|
||||
<UniqueIdentifier>{ea599740-3c6f-40dd-a121-e825d82ae4aa}</UniqueIdentifier>
|
||||
</Filter>
|
||||
<Filter Include="bson\util">
|
||||
<UniqueIdentifier>{5c397ce3-b900-41cd-8af0-a1d456db6854}</UniqueIdentifier>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// bsonelement.h
|
||||
// BSONElement
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
|
|
@ -17,13 +17,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <string.h> // strlen
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "mongo/bson/bsontypes.h"
|
||||
#include "mongo/bson/oid.h"
|
||||
#include "mongo/platform/float_utils.h"
|
||||
#include <string.h>
|
||||
#include "util/builder.h"
|
||||
#include "bsontypes.h"
|
||||
|
||||
namespace mongo {
|
||||
class OpTime;
|
||||
|
|
@ -62,26 +59,26 @@ namespace mongo {
|
|||
/** These functions, which start with a capital letter, throw a UserException if the
|
||||
element is not of the required type. Example:
|
||||
|
||||
std::string foo = obj["foo"].String(); // std::exception if not a std::string type or DNE
|
||||
string foo = obj["foo"].String(); // exception if not a string type or DNE
|
||||
*/
|
||||
std::string String() const { return chk(mongo::String).valuestr(); }
|
||||
string String() const { return chk(mongo::String).valuestr(); }
|
||||
Date_t Date() const { return chk(mongo::Date).date(); }
|
||||
double Number() const { return chk(isNumber()).number(); }
|
||||
double Double() const { return chk(NumberDouble)._numberDouble(); }
|
||||
long long Long() const { return chk(NumberLong)._numberLong(); }
|
||||
int Int() const { return chk(NumberInt)._numberInt(); }
|
||||
bool Bool() const { return chk(mongo::Bool).boolean(); }
|
||||
std::vector<BSONElement> Array() const; // see implementation for detailed comments
|
||||
vector<BSONElement> Array() const; // see implementation for detailed comments
|
||||
mongo::OID OID() const { return chk(jstOID).__oid(); }
|
||||
void Null() const { chk(isNull()); } // throw UserException if not null
|
||||
void OK() const { chk(ok()); } // throw UserException if element DNE
|
||||
|
||||
/** @return the embedded object associated with this field.
|
||||
/** @return the embedded object associated with this field.
|
||||
Note the returned object is a reference to within the parent bson object. If that
|
||||
object is out of scope, this pointer will no longer be valid. Call getOwned() on the
|
||||
returned BSONObj if you need your own copy.
|
||||
throws UserException if the element is not of type object.
|
||||
*/
|
||||
object is out of scope, this pointer will no longer be valid. Call getOwned() on the
|
||||
returned BSONObj if you need your own copy.
|
||||
throws UserException if the element is not of type object.
|
||||
*/
|
||||
BSONObj Obj() const;
|
||||
|
||||
/** populate v with the value of the element. If type does not match, throw exception.
|
||||
|
|
@ -94,28 +91,31 @@ namespace mongo {
|
|||
void Val(mongo::OID& v) const { v = OID(); }
|
||||
void Val(int& v) const { v = Int(); }
|
||||
void Val(double& v) const { v = Double(); }
|
||||
void Val(std::string& v) const { v = String(); }
|
||||
void Val(string& v) const { v = String(); }
|
||||
|
||||
/** Use ok() to check if a value is assigned:
|
||||
if( myObj["foo"].ok() ) ...
|
||||
*/
|
||||
bool ok() const { return !eoo(); }
|
||||
|
||||
std::string toString( bool includeFieldName = true, bool full=false) const;
|
||||
void toString(StringBuilder& s, bool includeFieldName = true, bool full=false, int depth=0) const;
|
||||
std::string jsonString( JsonStringFormat format, bool includeFieldNames = true, int pretty = 0 ) const;
|
||||
operator std::string() const { return toString(); }
|
||||
string toString( bool includeFieldName = true, bool full=false) const;
|
||||
void toString(StringBuilder& s, bool includeFieldName = true, bool full=false) const;
|
||||
string jsonString( JsonStringFormat format, bool includeFieldNames = true, int pretty = 0 ) const;
|
||||
operator string() const { return toString(); }
|
||||
|
||||
/** Returns the type of the element */
|
||||
BSONType type() const { return (BSONType) *reinterpret_cast< const signed char * >(data); }
|
||||
BSONType type() const { return (BSONType) *data; }
|
||||
|
||||
/** retrieve a field within this element
|
||||
throws exception if *this is not an embedded object
|
||||
*/
|
||||
BSONElement operator[] (const std::string& field) const;
|
||||
BSONElement operator[] (const string& field) const;
|
||||
|
||||
/** See canonicalizeBSONType in bsontypes.h */
|
||||
int canonicalType() const { return canonicalizeBSONType(type()); }
|
||||
/** returns the tyoe of the element fixed for the main type
|
||||
the main purpose is numbers. any numeric type will return NumberDouble
|
||||
Note: if the order changes, indexes have to be re-built or than can be corruption
|
||||
*/
|
||||
int canonicalType() const;
|
||||
|
||||
/** Indicates if it is the end-of-object element, which is present at the end of
|
||||
every BSON object.
|
||||
|
|
@ -132,7 +132,7 @@ namespace mongo {
|
|||
BSONObj wrap() const;
|
||||
|
||||
/** Wrap this element up as a singleton object with a new name. */
|
||||
BSONObj wrap( const StringData& newName) const;
|
||||
BSONObj wrap( const char* newName) const;
|
||||
|
||||
/** field name of the element. e.g., for
|
||||
name : "Joe"
|
||||
|
|
@ -143,13 +143,6 @@ namespace mongo {
|
|||
return data + 1;
|
||||
}
|
||||
|
||||
|
||||
int fieldNameSize() const {
|
||||
if ( fieldNameSize_ == -1 )
|
||||
fieldNameSize_ = (int)strlen( fieldName() ) + 1;
|
||||
return fieldNameSize_;
|
||||
}
|
||||
|
||||
/** raw data of the element's value (so be careful). */
|
||||
const char * value() const {
|
||||
return (data + fieldNameSize() + 1);
|
||||
|
|
@ -190,26 +183,16 @@ namespace mongo {
|
|||
bool isNumber() const;
|
||||
|
||||
/** Return double value for this field. MUST be NumberDouble type. */
|
||||
double _numberDouble() const {return (reinterpret_cast< const PackedDouble* >( value() ))->d; }
|
||||
/** Return int value for this field. MUST be NumberInt type. */
|
||||
double _numberDouble() const {return *reinterpret_cast< const double* >( value() ); }
|
||||
/** Return double value for this field. MUST be NumberInt type. */
|
||||
int _numberInt() const {return *reinterpret_cast< const int* >( value() ); }
|
||||
/** Return long long value for this field. MUST be NumberLong type. */
|
||||
/** Return double value for this field. MUST be NumberLong type. */
|
||||
long long _numberLong() const {return *reinterpret_cast< const long long* >( value() ); }
|
||||
|
||||
/** Retrieve int value for the element safely. Zero returned if not a number. */
|
||||
int numberInt() const;
|
||||
/** Retrieve long value for the element safely. Zero returned if not a number.
|
||||
* Behavior is not defined for double values that are NaNs, or too large/small
|
||||
* to be represented by long longs */
|
||||
/** Retrieve long value for the element safely. Zero returned if not a number. */
|
||||
long long numberLong() const;
|
||||
|
||||
/** Like numberLong() but with well-defined behavior for doubles that
|
||||
* are NaNs, or too large/small to be represented as long longs.
|
||||
* NaNs -> 0
|
||||
* very large doubles -> LLONG_MAX
|
||||
* very small doubles -> LLONG_MIN */
|
||||
long long safeNumberLong() const;
|
||||
|
||||
/** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
|
||||
Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
|
||||
*/
|
||||
|
|
@ -253,43 +236,18 @@ namespace mongo {
|
|||
return type() == mongo::String ? valuestr() : "";
|
||||
}
|
||||
/** Get the string value of the element. If not a string returns "". */
|
||||
std::string str() const {
|
||||
return type() == mongo::String ? std::string(valuestr(), valuestrsize()-1) : std::string();
|
||||
string str() const {
|
||||
return type() == mongo::String ? string(valuestr(), valuestrsize()-1) : string();
|
||||
}
|
||||
|
||||
/** Get javascript code of a CodeWScope data element. */
|
||||
const char * codeWScopeCode() const {
|
||||
massert( 16177 , "not codeWScope" , type() == CodeWScope );
|
||||
return value() + 4 + 4; //two ints precede code (see BSON spec)
|
||||
return value() + 8;
|
||||
}
|
||||
|
||||
/** Get length of the code part of the CodeWScope object
|
||||
* This INCLUDES the null char at the end */
|
||||
int codeWScopeCodeLen() const {
|
||||
massert( 16178 , "not codeWScope" , type() == CodeWScope );
|
||||
return *(int *)( value() + 4 );
|
||||
}
|
||||
|
||||
/** Get the scope SavedContext of a CodeWScope data element.
|
||||
*
|
||||
* This function is DEPRECATED, since it can error if there are
|
||||
* null chars in the codeWScopeCode. However, some existing indexes
|
||||
* may be based on an incorrect ordering derived from this function,
|
||||
* so it may still need to be used in certain cases.
|
||||
* */
|
||||
const char * codeWScopeScopeDataUnsafe() const {
|
||||
//This can error if there are null chars in the codeWScopeCode
|
||||
return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
|
||||
}
|
||||
|
||||
/* Get the scope SavedContext of a CodeWScope data element.
|
||||
*
|
||||
* This is the corrected version of codeWScopeScopeDataUnsafe(),
|
||||
* but note that existing uses might rely on the behavior of
|
||||
* that function so be careful in choosing which version to use.
|
||||
*/
|
||||
/** Get the scope SavedContext of a CodeWScope data element. */
|
||||
const char * codeWScopeScopeData() const {
|
||||
return codeWScopeCode() + codeWScopeCodeLen();
|
||||
// TODO fix
|
||||
return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
|
||||
}
|
||||
|
||||
/** Get the embedded object this element holds. */
|
||||
|
|
@ -303,7 +261,7 @@ namespace mongo {
|
|||
/** Get raw binary data. Element must be of type BinData. Doesn't handle type 2 specially */
|
||||
const char *binData(int& len) const {
|
||||
// BinData: <int len> <byte subtype> <byte[len] data>
|
||||
verify( type() == BinData );
|
||||
assert( type() == BinData );
|
||||
len = valuestrsize();
|
||||
return value() + 5;
|
||||
}
|
||||
|
|
@ -322,14 +280,14 @@ namespace mongo {
|
|||
|
||||
BinDataType binDataType() const {
|
||||
// BinData: <int len> <byte subtype> <byte[len] data>
|
||||
verify( type() == BinData );
|
||||
assert( type() == BinData );
|
||||
unsigned char c = (value() + 4)[0];
|
||||
return (BinDataType)c;
|
||||
}
|
||||
|
||||
/** Retrieve the regex string for a Regex element */
|
||||
const char *regex() const {
|
||||
verify(type() == RegEx);
|
||||
assert(type() == RegEx);
|
||||
return value();
|
||||
}
|
||||
|
||||
|
|
@ -433,7 +391,7 @@ namespace mongo {
|
|||
fieldNameSize_ = -1;
|
||||
if ( maxLen != -1 ) {
|
||||
int size = (int) strnlen( fieldName(), maxLen - 1 );
|
||||
uassert( 10333 , "Invalid field name", size != -1 );
|
||||
massert( 10333 , "Invalid field name", size != -1 );
|
||||
fieldNameSize_ = size + 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -448,15 +406,17 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
|
||||
std::string _asCode() const;
|
||||
string _asCode() const;
|
||||
OpTime _opTime() const;
|
||||
|
||||
template<typename T> bool coerce( T* out ) const;
|
||||
|
||||
private:
|
||||
const char *data;
|
||||
mutable int fieldNameSize_; // cached value
|
||||
|
||||
int fieldNameSize() const {
|
||||
if ( fieldNameSize_ == -1 )
|
||||
fieldNameSize_ = (int)strlen( fieldName() ) + 1;
|
||||
return fieldNameSize_;
|
||||
}
|
||||
mutable int totalSize; /* caches the computed size */
|
||||
|
||||
friend class BSONObjIterator;
|
||||
|
|
@ -468,23 +428,68 @@ namespace mongo {
|
|||
ss << "field not found, expected type " << t;
|
||||
else
|
||||
ss << "wrong type for field (" << fieldName() << ") " << type() << " != " << t;
|
||||
msgasserted(13111, ss.str() );
|
||||
uasserted(13111, ss.str() );
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
const BSONElement& chk(bool expr) const {
|
||||
massert(13118, "unexpected or missing type value in BSON object", expr);
|
||||
uassert(13118, "unexpected or missing type value in BSON object", expr);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
inline int BSONElement::canonicalType() const {
|
||||
BSONType t = type();
|
||||
switch ( t ) {
|
||||
case MinKey:
|
||||
case MaxKey:
|
||||
return t;
|
||||
case EOO:
|
||||
case Undefined:
|
||||
return 0;
|
||||
case jstNULL:
|
||||
return 5;
|
||||
case NumberDouble:
|
||||
case NumberInt:
|
||||
case NumberLong:
|
||||
return 10;
|
||||
case mongo::String:
|
||||
case Symbol:
|
||||
return 15;
|
||||
case Object:
|
||||
return 20;
|
||||
case mongo::Array:
|
||||
return 25;
|
||||
case BinData:
|
||||
return 30;
|
||||
case jstOID:
|
||||
return 35;
|
||||
case mongo::Bool:
|
||||
return 40;
|
||||
case mongo::Date:
|
||||
case Timestamp:
|
||||
return 45;
|
||||
case RegEx:
|
||||
return 50;
|
||||
case DBRef:
|
||||
return 55;
|
||||
case Code:
|
||||
return 60;
|
||||
case CodeWScope:
|
||||
return 65;
|
||||
default:
|
||||
assert(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool BSONElement::trueValue() const {
|
||||
// NOTE Behavior changes must be replicated in Value::coerceToBool().
|
||||
switch( type() ) {
|
||||
case NumberLong:
|
||||
return *reinterpret_cast< const long long* >( value() ) != 0;
|
||||
case NumberDouble:
|
||||
return (reinterpret_cast < const PackedDouble* >(value ()))->d != 0;
|
||||
return *reinterpret_cast< const double* >( value() ) != 0;
|
||||
case NumberInt:
|
||||
return *reinterpret_cast< const int* >( value() ) != 0;
|
||||
case mongo::Bool:
|
||||
|
|
@ -568,30 +573,6 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
|
||||
/** Like numberLong() but with well-defined behavior for doubles that
|
||||
* are NaNs, or too large/small to be represented as long longs.
|
||||
* NaNs -> 0
|
||||
* very large doubles -> LLONG_MAX
|
||||
* very small doubles -> LLONG_MIN */
|
||||
inline long long BSONElement::safeNumberLong() const {
|
||||
double d;
|
||||
switch( type() ) {
|
||||
case NumberDouble:
|
||||
d = numberDouble();
|
||||
if ( isNaN( d ) ){
|
||||
return 0;
|
||||
}
|
||||
if ( d > (double) std::numeric_limits<long long>::max() ){
|
||||
return std::numeric_limits<long long>::max();
|
||||
}
|
||||
if ( d < std::numeric_limits<long long>::min() ){
|
||||
return std::numeric_limits<long long>::min();
|
||||
}
|
||||
default:
|
||||
return numberLong();
|
||||
}
|
||||
}
|
||||
|
||||
inline BSONElement::BSONElement() {
|
||||
static char z = 0;
|
||||
data = &z;
|
||||
203
bson/bsonmisc.h
Normal file
203
bson/bsonmisc.h
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
// @file bsonmisc.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace mongo {
|
||||
|
||||
int getGtLtOp(const BSONElement& e);
|
||||
|
||||
struct BSONElementCmpWithoutField {
|
||||
bool operator()( const BSONElement &l, const BSONElement &r ) const {
|
||||
return l.woCompare( r, false ) < 0;
|
||||
}
|
||||
};
|
||||
|
||||
class BSONObjCmp {
|
||||
public:
|
||||
BSONObjCmp( const BSONObj &order = BSONObj() ) : _order( order ) {}
|
||||
bool operator()( const BSONObj &l, const BSONObj &r ) const {
|
||||
return l.woCompare( r, _order ) < 0;
|
||||
}
|
||||
BSONObj order() const { return _order; }
|
||||
private:
|
||||
BSONObj _order;
|
||||
};
|
||||
|
||||
typedef set<BSONObj,BSONObjCmp> BSONObjSet;
|
||||
|
||||
enum FieldCompareResult {
|
||||
LEFT_SUBFIELD = -2,
|
||||
LEFT_BEFORE = -1,
|
||||
SAME = 0,
|
||||
RIGHT_BEFORE = 1 ,
|
||||
RIGHT_SUBFIELD = 2
|
||||
};
|
||||
|
||||
FieldCompareResult compareDottedFieldNames( const string& l , const string& r );
|
||||
|
||||
/** Use BSON macro to build a BSONObj from a stream
|
||||
|
||||
e.g.,
|
||||
BSON( "name" << "joe" << "age" << 33 )
|
||||
|
||||
with auto-generated object id:
|
||||
BSON( GENOID << "name" << "joe" << "age" << 33 )
|
||||
|
||||
The labels GT, GTE, LT, LTE, NE can be helpful for stream-oriented construction
|
||||
of a BSONObj, particularly when assembling a Query. For example,
|
||||
BSON( "a" << GT << 23.4 << NE << 30 << "b" << 2 ) produces the object
|
||||
{ a: { \$gt: 23.4, \$ne: 30 }, b: 2 }.
|
||||
*/
|
||||
#define BSON(x) (( mongo::BSONObjBuilder(64) << x ).obj())
|
||||
|
||||
/** Use BSON_ARRAY macro like BSON macro, but without keys
|
||||
|
||||
BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
|
||||
|
||||
*/
|
||||
#define BSON_ARRAY(x) (( mongo::BSONArrayBuilder() << x ).arr())
|
||||
|
||||
/* Utility class to auto assign object IDs.
|
||||
Example:
|
||||
cout << BSON( GENOID << "z" << 3 ); // { _id : ..., z : 3 }
|
||||
*/
|
||||
extern struct GENOIDLabeler { } GENOID;
|
||||
|
||||
/* Utility class to add a Date element with the current time
|
||||
Example:
|
||||
cout << BSON( "created" << DATENOW ); // { created : "2009-10-09 11:41:42" }
|
||||
*/
|
||||
extern struct DateNowLabeler { } DATENOW;
|
||||
|
||||
/* Utility class to add the minKey (minus infinity) to a given attribute
|
||||
Example:
|
||||
cout << BSON( "a" << MINKEY ); // { "a" : { "$minKey" : 1 } }
|
||||
*/
|
||||
extern struct MinKeyLabeler { } MINKEY;
|
||||
extern struct MaxKeyLabeler { } MAXKEY;
|
||||
|
||||
// Utility class to implement GT, GTE, etc as described above.
|
||||
class Labeler {
|
||||
public:
|
||||
struct Label {
|
||||
Label( const char *l ) : l_( l ) {}
|
||||
const char *l_;
|
||||
};
|
||||
Labeler( const Label &l, BSONObjBuilderValueStream *s ) : l_( l ), s_( s ) {}
|
||||
template<class T>
|
||||
BSONObjBuilder& operator<<( T value );
|
||||
|
||||
/* the value of the element e is appended i.e. for
|
||||
"age" << GT << someElement
|
||||
one gets
|
||||
{ age : { $gt : someElement's value } }
|
||||
*/
|
||||
BSONObjBuilder& operator<<( const BSONElement& e );
|
||||
private:
|
||||
const Label &l_;
|
||||
BSONObjBuilderValueStream *s_;
|
||||
};
|
||||
|
||||
extern Labeler::Label GT;
|
||||
extern Labeler::Label GTE;
|
||||
extern Labeler::Label LT;
|
||||
extern Labeler::Label LTE;
|
||||
extern Labeler::Label NE;
|
||||
extern Labeler::Label SIZE;
|
||||
|
||||
|
||||
// $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT << 6));
|
||||
// becomes : {$or: [{x: {$gt: 7}}, {y: {$lt: 6}}]}
|
||||
inline BSONObj OR(const BSONObj& a, const BSONObj& b);
|
||||
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c);
|
||||
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d);
|
||||
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e);
|
||||
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f);
|
||||
// definitions in bsonobjbuilder.h b/c of incomplete types
|
||||
|
||||
// Utility class to implement BSON( key << val ) as described above.
|
||||
class BSONObjBuilderValueStream : public boost::noncopyable {
|
||||
public:
|
||||
friend class Labeler;
|
||||
BSONObjBuilderValueStream( BSONObjBuilder * builder );
|
||||
|
||||
BSONObjBuilder& operator<<( const BSONElement& e );
|
||||
|
||||
template<class T>
|
||||
BSONObjBuilder& operator<<( T value );
|
||||
|
||||
BSONObjBuilder& operator<<(DateNowLabeler& id);
|
||||
|
||||
BSONObjBuilder& operator<<(MinKeyLabeler& id);
|
||||
BSONObjBuilder& operator<<(MaxKeyLabeler& id);
|
||||
|
||||
Labeler operator<<( const Labeler::Label &l );
|
||||
|
||||
void endField( const char *nextFieldName = 0 );
|
||||
bool subobjStarted() const { return _fieldName != 0; }
|
||||
|
||||
private:
|
||||
const char * _fieldName;
|
||||
BSONObjBuilder * _builder;
|
||||
|
||||
bool haveSubobj() const { return _subobj.get() != 0; }
|
||||
BSONObjBuilder *subobj();
|
||||
auto_ptr< BSONObjBuilder > _subobj;
|
||||
};
|
||||
|
||||
/**
|
||||
used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
|
||||
*/
|
||||
class BSONSizeTracker {
|
||||
public:
|
||||
BSONSizeTracker() {
|
||||
_pos = 0;
|
||||
for ( int i=0; i<SIZE; i++ )
|
||||
_sizes[i] = 512; // this is the default, so just be consistent
|
||||
}
|
||||
|
||||
~BSONSizeTracker() {
|
||||
}
|
||||
|
||||
void got( int size ) {
|
||||
_sizes[_pos++] = size;
|
||||
if ( _pos >= SIZE )
|
||||
_pos = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* right now choosing largest size
|
||||
*/
|
||||
int getSize() const {
|
||||
int x = 16; // sane min
|
||||
for ( int i=0; i<SIZE; i++ ) {
|
||||
if ( _sizes[i] > x )
|
||||
x = _sizes[i];
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
private:
|
||||
enum { SIZE = 10 };
|
||||
int _pos;
|
||||
int _sizes[SIZE];
|
||||
};
|
||||
|
||||
// considers order
|
||||
bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs);
|
||||
}
|
||||
|
|
@ -18,21 +18,17 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/intrusive_ptr.hpp>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <set>
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "mongo/bson/bsonelement.h"
|
||||
#include "mongo/base/string_data.h"
|
||||
#include "mongo/bson/util/atomic_int.h"
|
||||
#include "mongo/bson/util/builder.h"
|
||||
#include "util/atomic_int.h"
|
||||
#include "util/builder.h"
|
||||
#include "stringdata.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
typedef std::set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
|
||||
typedef std::multiset< BSONElement, BSONElementCmpWithoutField > BSONElementMSet;
|
||||
typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
|
||||
typedef multiset< BSONElement, BSONElementCmpWithoutField > BSONElementMSet;
|
||||
|
||||
/**
|
||||
C++ representation of a "BSON" object -- that is, an extended JSON-style
|
||||
|
|
@ -74,7 +70,7 @@ namespace mongo {
|
|||
*/
|
||||
class BSONObj {
|
||||
public:
|
||||
|
||||
|
||||
/** Construct a BSONObj from data in the proper format.
|
||||
* Use this constructor when something else owns msgdata's buffer
|
||||
*/
|
||||
|
|
@ -91,11 +87,11 @@ namespace mongo {
|
|||
init(holder);
|
||||
}
|
||||
|
||||
explicit BSONObj(const Record *r);
|
||||
|
||||
/** Construct an empty BSONObj -- that is, {}. */
|
||||
BSONObj();
|
||||
|
||||
static BSONObj make( const Record* r );
|
||||
|
||||
~BSONObj() {
|
||||
_objdata = 0; // defensive
|
||||
}
|
||||
|
|
@ -141,18 +137,16 @@ namespace mongo {
|
|||
/** Readable representation of a BSON object in an extended JSON-style notation.
|
||||
This is an abbreviated representation which might be used for logging.
|
||||
*/
|
||||
enum { maxToStringRecursionDepth = 100 };
|
||||
|
||||
std::string toString( bool isArray = false, bool full=false ) const;
|
||||
void toString( StringBuilder& s, bool isArray = false, bool full=false, int depth=0 ) const;
|
||||
string toString( bool isArray = false, bool full=false ) const;
|
||||
void toString(StringBuilder& s, bool isArray = false, bool full=false ) const;
|
||||
|
||||
/** Properly formatted JSON string.
|
||||
@param pretty if true we try to add some lf's and indentation
|
||||
*/
|
||||
std::string jsonString( JsonStringFormat format = Strict, int pretty = 0 ) const;
|
||||
string jsonString( JsonStringFormat format = Strict, int pretty = 0 ) const;
|
||||
|
||||
/** note: addFields always adds _id even if not specified */
|
||||
int addFields(BSONObj& from, std::set<std::string>& fields); /* returns n added */
|
||||
int addFields(BSONObj& from, set<string>& fields); /* returns n added */
|
||||
|
||||
/** remove specified field and return a new object with the remaining fields.
|
||||
slowish as builds a full new object
|
||||
|
|
@ -165,7 +159,7 @@ namespace mongo {
|
|||
int nFields() const;
|
||||
|
||||
/** adds the field names to the fields set. does NOT clear it (appends). */
|
||||
int getFieldNames(std::set<std::string>& fields) const;
|
||||
int getFieldNames(set<string>& fields) const;
|
||||
|
||||
/** @return the specified element. element.eoo() will be true if not found.
|
||||
@param name field to find. supports dot (".") notation to reach into embedded objects.
|
||||
|
|
@ -176,7 +170,7 @@ namespace mongo {
|
|||
@param name field to find. supports dot (".") notation to reach into embedded objects.
|
||||
for example "x.y" means "in the nested object in field x, retrieve field y"
|
||||
*/
|
||||
BSONElement getFieldDotted(const std::string& name) const {
|
||||
BSONElement getFieldDotted(const string& name) const {
|
||||
return getFieldDotted( name.c_str() );
|
||||
}
|
||||
|
||||
|
|
@ -212,21 +206,21 @@ namespace mongo {
|
|||
return getField(field);
|
||||
}
|
||||
|
||||
BSONElement operator[] (const std::string& field) const {
|
||||
BSONElement operator[] (const string& field) const {
|
||||
return getField(field);
|
||||
}
|
||||
|
||||
BSONElement operator[] (int field) const {
|
||||
StringBuilder ss;
|
||||
ss << field;
|
||||
std::string s = ss.str();
|
||||
string s = ss.str();
|
||||
return getField(s.c_str());
|
||||
}
|
||||
|
||||
/** @return true if field exists */
|
||||
bool hasField( const StringData& name ) const { return !getField(name).eoo(); }
|
||||
bool hasField( const char * name ) const { return !getField(name).eoo(); }
|
||||
/** @return true if field exists */
|
||||
bool hasElement(const StringData& name) const { return hasField(name); }
|
||||
bool hasElement(const char *name) const { return hasField(name); }
|
||||
|
||||
/** @return "" if DNE or wrong type */
|
||||
const char * getStringField(const char *name) const;
|
||||
|
|
@ -242,19 +236,12 @@ namespace mongo {
|
|||
*/
|
||||
bool getBoolField(const char *name) const;
|
||||
|
||||
/** @param pattern a BSON obj indicating a set of (un-dotted) field
|
||||
* names. Element values are ignored.
|
||||
* @return a BSON obj constructed by taking the elements of this obj
|
||||
* that correspond to the fields in pattern. Field names of the
|
||||
* returned object are replaced with the empty string. If field in
|
||||
* pattern is missing, it is omitted from the returned object.
|
||||
*
|
||||
* Example: if this = {a : 4 , b : 5 , c : 6})
|
||||
* this.extractFieldsUnDotted({a : 1 , c : 1}) -> {"" : 4 , "" : 6 }
|
||||
* this.extractFieldsUnDotted({b : "blah"}) -> {"" : 5}
|
||||
*
|
||||
/**
|
||||
sets element field names to empty string
|
||||
If a field in pattern is missing, it is omitted from the returned
|
||||
object.
|
||||
*/
|
||||
BSONObj extractFieldsUnDotted(const BSONObj& pattern) const;
|
||||
BSONObj extractFieldsUnDotted(BSONObj pattern) const;
|
||||
|
||||
/** extract items from object which match a pattern object.
|
||||
e.g., if pattern is { x : 1, y : 1 }, builds an object with
|
||||
|
|
@ -293,7 +280,7 @@ namespace mongo {
|
|||
void dump() const;
|
||||
|
||||
/** Alternative output format */
|
||||
std::string hexDump() const;
|
||||
string hexDump() const;
|
||||
|
||||
/**wo='well ordered'. fields must be in same order in each object.
|
||||
Ordering is with respect to the signs of the elements
|
||||
|
|
@ -323,22 +310,6 @@ namespace mongo {
|
|||
|
||||
bool equal(const BSONObj& r) const;
|
||||
|
||||
/**
|
||||
* @param otherObj
|
||||
* @return true if 'this' is a prefix of otherObj- in other words if
|
||||
* otherObj contains the same field names and field vals in the same
|
||||
* order as 'this', plus optionally some additional elements.
|
||||
*/
|
||||
bool isPrefixOf( const BSONObj& otherObj ) const;
|
||||
|
||||
/**
|
||||
* @param otherObj
|
||||
* @return returns true if the list of field names in 'this' is a prefix
|
||||
* of the list of field names in otherObj. Similar to 'isPrefixOf',
|
||||
* but ignores the field values and only looks at field names.
|
||||
*/
|
||||
bool isFieldNamePrefixOf( const BSONObj& otherObj ) const;
|
||||
|
||||
/** This is "shallow equality" -- ints and doubles won't match. for a
|
||||
deep equality test use woCompare (which is slower).
|
||||
*/
|
||||
|
|
@ -361,11 +332,6 @@ namespace mongo {
|
|||
return *p == EOO ? "" : p+1;
|
||||
}
|
||||
|
||||
BSONType firstElementType() const {
|
||||
const char *p = objdata() + 4;
|
||||
return (BSONType) *p;
|
||||
}
|
||||
|
||||
/** Get the _id field from the object. For good performance drivers should
|
||||
assure that _id is the first element of the object; however, correct operation
|
||||
is assured regardless.
|
||||
|
|
@ -396,7 +362,7 @@ namespace mongo {
|
|||
bool valid() const;
|
||||
|
||||
/** @return an md5 value for this object. */
|
||||
std::string md5() const;
|
||||
string md5() const;
|
||||
|
||||
bool operator==( const BSONObj& other ) const { return equal( other ); }
|
||||
bool operator!=(const BSONObj& other) const { return !operator==( other); }
|
||||
|
|
@ -420,35 +386,34 @@ namespace mongo {
|
|||
opELEM_MATCH = 0x12,
|
||||
opNEAR = 0x13,
|
||||
opWITHIN = 0x14,
|
||||
opMAX_DISTANCE = 0x15,
|
||||
opGEO_INTERSECTS = 0x16,
|
||||
opMAX_DISTANCE=0x15
|
||||
};
|
||||
|
||||
/** add all elements of the object to the specified vector */
|
||||
void elems(std::vector<BSONElement> &) const;
|
||||
void elems(vector<BSONElement> &) const;
|
||||
/** add all elements of the object to the specified list */
|
||||
void elems(std::list<BSONElement> &) const;
|
||||
void elems(list<BSONElement> &) const;
|
||||
|
||||
/** add all values of the object to the specified vector. If type mismatches, exception.
|
||||
this is most useful when the BSONObj is an array, but can be used with non-arrays too in theory.
|
||||
|
||||
example:
|
||||
bo sub = y["subobj"].Obj();
|
||||
std::vector<int> myints;
|
||||
vector<int> myints;
|
||||
sub.Vals(myints);
|
||||
*/
|
||||
template <class T>
|
||||
void Vals(std::vector<T> &) const;
|
||||
void Vals(vector<T> &) const;
|
||||
/** add all values of the object to the specified list. If type mismatches, exception. */
|
||||
template <class T>
|
||||
void Vals(std::list<T> &) const;
|
||||
void Vals(list<T> &) const;
|
||||
|
||||
/** add all values of the object to the specified vector. If type mismatches, skip. */
|
||||
template <class T>
|
||||
void vals(std::vector<T> &) const;
|
||||
void vals(vector<T> &) const;
|
||||
/** add all values of the object to the specified list. If type mismatches, skip. */
|
||||
template <class T>
|
||||
void vals(std::list<T> &) const;
|
||||
void vals(list<T> &) const;
|
||||
|
||||
friend class BSONObjIterator;
|
||||
typedef BSONObjIterator iterator;
|
||||
|
|
@ -462,12 +427,10 @@ namespace mongo {
|
|||
BSONObjIterator begin() const;
|
||||
|
||||
void appendSelfToBufBuilder(BufBuilder& b) const {
|
||||
verify( objsize() );
|
||||
assert( objsize() );
|
||||
b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
|
||||
}
|
||||
|
||||
template<typename T> bool coerceVector( std::vector<T>* out ) const;
|
||||
|
||||
#pragma pack(1)
|
||||
class Holder : boost::noncopyable {
|
||||
private:
|
||||
|
|
@ -482,12 +445,12 @@ namespace mongo {
|
|||
friend void intrusive_ptr_add_ref(Holder* h) { h->refCount++; }
|
||||
friend void intrusive_ptr_release(Holder* h) {
|
||||
#if defined(_DEBUG) // cant use dassert or DEV here
|
||||
verify((int)h->refCount > 0); // make sure we haven't already freed the buffer
|
||||
assert((int)h->refCount > 0); // make sure we haven't already freed the buffer
|
||||
#endif
|
||||
if(--(h->refCount) == 0){
|
||||
#if defined(_DEBUG)
|
||||
unsigned sz = (unsigned&) *h->data;
|
||||
verify(sz < BSONObjMaxInternalSize * 3);
|
||||
assert(sz < BSONObjMaxInternalSize * 3);
|
||||
memset(h->data, 0xdd, sz);
|
||||
#endif
|
||||
free(h);
|
||||
|
|
@ -496,18 +459,6 @@ namespace mongo {
|
|||
};
|
||||
#pragma pack()
|
||||
|
||||
BSONObj(const BSONObj &rO):
|
||||
_objdata(rO._objdata), _holder(rO._holder) {
|
||||
}
|
||||
|
||||
BSONObj &operator=(const BSONObj &rRHS) {
|
||||
if (this != &rRHS) {
|
||||
_objdata = rRHS._objdata;
|
||||
_holder = rRHS._holder;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
const char *_objdata;
|
||||
boost::intrusive_ptr< Holder > _holder;
|
||||
|
|
@ -525,8 +476,8 @@ namespace mongo {
|
|||
}
|
||||
};
|
||||
|
||||
std::ostream& operator<<( std::ostream &s, const BSONObj &o );
|
||||
std::ostream& operator<<( std::ostream &s, const BSONElement &e );
|
||||
ostream& operator<<( ostream &s, const BSONObj &o );
|
||||
ostream& operator<<( ostream &s, const BSONElement &e );
|
||||
|
||||
StringBuilder& operator<<( StringBuilder &s, const BSONObj &o );
|
||||
StringBuilder& operator<<( StringBuilder &s, const BSONElement &e );
|
||||
|
|
@ -22,33 +22,68 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/static_assert.hpp>
|
||||
#include <map>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
#include "mongo/base/parse_number.h"
|
||||
#include "mongo/bson/bsonelement.h"
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "mongo/bson/bsonmisc.h"
|
||||
#include "mongo/bson/bson_builder_base.h"
|
||||
#include "mongo/bson/bson_field.h"
|
||||
|
||||
#if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS)
|
||||
#include "mongo/util/log.h"
|
||||
#endif
|
||||
#include <cmath>
|
||||
#include <boost/static_assert.hpp>
|
||||
#include "bsonelement.h"
|
||||
#include "bsonobj.h"
|
||||
#include "bsonmisc.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
using namespace std;
|
||||
|
||||
#if defined(_WIN32)
|
||||
// warning: 'this' : used in base member initializer list
|
||||
#pragma warning( disable : 4355 )
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
class BSONFieldValue {
|
||||
public:
|
||||
BSONFieldValue( const string& name , const T& t ) {
|
||||
_name = name;
|
||||
_t = t;
|
||||
}
|
||||
|
||||
const T& value() const { return _t; }
|
||||
const string& name() const { return _name; }
|
||||
|
||||
private:
|
||||
string _name;
|
||||
T _t;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class BSONField {
|
||||
public:
|
||||
BSONField( const string& name , const string& longName="" )
|
||||
: _name(name), _longName(longName) {}
|
||||
const string& name() const { return _name; }
|
||||
operator string() const { return _name; }
|
||||
|
||||
BSONFieldValue<T> make( const T& t ) const {
|
||||
return BSONFieldValue<T>( _name , t );
|
||||
}
|
||||
|
||||
BSONFieldValue<BSONObj> gt( const T& t ) const { return query( "$gt" , t ); }
|
||||
BSONFieldValue<BSONObj> lt( const T& t ) const { return query( "$lt" , t ); }
|
||||
|
||||
BSONFieldValue<BSONObj> query( const char * q , const T& t ) const;
|
||||
|
||||
BSONFieldValue<T> operator()( const T& t ) const {
|
||||
return BSONFieldValue<T>( _name , t );
|
||||
}
|
||||
|
||||
private:
|
||||
string _name;
|
||||
string _longName;
|
||||
};
|
||||
|
||||
/** Utility for creating a BSONObj.
|
||||
See also the BSON() and BSON_ARRAY() macros.
|
||||
*/
|
||||
class BSONObjBuilder : public BSONBuilderBase, private boost::noncopyable {
|
||||
class BSONObjBuilder : boost::noncopyable {
|
||||
public:
|
||||
/** @param initsize this is just a hint as to the final size of the object */
|
||||
BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize + sizeof(unsigned)), _offset( sizeof(unsigned) ), _s( this ) , _tracker(0) , _doneCalled(false) {
|
||||
|
|
@ -82,14 +117,14 @@ namespace mongo {
|
|||
|
||||
/** append element to the object we are building */
|
||||
BSONObjBuilder& append( const BSONElement& e) {
|
||||
verify( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
|
||||
assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
|
||||
_b.appendBuf((void*) e.rawdata(), e.size());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** append an element but with a new name */
|
||||
BSONObjBuilder& appendAs(const BSONElement& e, const StringData& fieldName) {
|
||||
verify( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
|
||||
assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
|
||||
_b.appendNum((char) e.type());
|
||||
_b.appendStr(fieldName);
|
||||
_b.appendBuf((void *) e.value(), e.valuesize());
|
||||
|
|
@ -106,12 +141,12 @@ namespace mongo {
|
|||
|
||||
/** add a subobject as a member */
|
||||
BSONObjBuilder& appendObject(const StringData& fieldName, const char * objdata , int size = 0 ) {
|
||||
verify( objdata );
|
||||
assert( objdata );
|
||||
if ( size == 0 ) {
|
||||
size = *((int*)objdata);
|
||||
}
|
||||
|
||||
verify( size > 4 && size < 100000000 );
|
||||
assert( size > 4 && size < 100000000 );
|
||||
|
||||
_b.appendNum((char) Object);
|
||||
_b.appendStr(fieldName);
|
||||
|
|
@ -199,7 +234,7 @@ namespace mongo {
|
|||
long long x = n;
|
||||
if ( x < 0 )
|
||||
x = x * -1;
|
||||
if ( x < ( (std::numeric_limits<int>::max)() / 2 ) ) // extra () to avoid max macro on windows
|
||||
if ( x < ( (numeric_limits<int>::max)() / 2 ) ) // extra () to avoid max macro on windows
|
||||
append( fieldName , (int)n );
|
||||
else
|
||||
append( fieldName , n );
|
||||
|
|
@ -219,26 +254,25 @@ namespace mongo {
|
|||
}
|
||||
|
||||
BSONObjBuilder& appendNumber( const StringData& fieldName , size_t n ) {
|
||||
static const size_t maxInt = ( 1 << 30 );
|
||||
static size_t maxInt = (size_t)pow( 2.0 , 30.0 );
|
||||
|
||||
if ( n < maxInt )
|
||||
append( fieldName, static_cast<int>( n ) );
|
||||
append( fieldName , (int)n );
|
||||
else
|
||||
append( fieldName, static_cast<long long>( n ) );
|
||||
append( fieldName , (long long)n );
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& appendNumber( const StringData& fieldName, long long llNumber ) {
|
||||
static const long long maxInt = ( 1LL << 30 );
|
||||
static const long long maxDouble = ( 1LL << 40 );
|
||||
|
||||
long long nonNegative = llNumber >= 0 ? llNumber : -llNumber;
|
||||
if ( nonNegative < maxInt )
|
||||
append( fieldName, static_cast<int>( llNumber ) );
|
||||
else if ( nonNegative < maxDouble )
|
||||
append( fieldName, static_cast<double>( llNumber ) );
|
||||
BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ) {
|
||||
static long long maxInt = (int)pow( 2.0 , 30.0 );
|
||||
static long long maxDouble = (long long)pow( 2.0 , 40.0 );
|
||||
long long x = l >= 0 ? l : -l;
|
||||
if ( x < maxInt )
|
||||
append( fieldName , (int)l );
|
||||
else if ( x < maxDouble )
|
||||
append( fieldName , (double)l );
|
||||
else
|
||||
append( fieldName, llNumber );
|
||||
append( fieldName , l );
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
|
@ -253,7 +287,7 @@ namespace mongo {
|
|||
/** tries to append the data as a number
|
||||
* @return true if the data was able to be converted to a number
|
||||
*/
|
||||
bool appendAsNumber( const StringData& fieldName , const std::string& data );
|
||||
bool appendAsNumber( const StringData& fieldName , const string& data );
|
||||
|
||||
/** Append a BSON Object ID (OID type).
|
||||
@deprecated Generally, it is preferred to use the append append(name, oid)
|
||||
|
|
@ -315,7 +349,7 @@ namespace mongo {
|
|||
if( dt > 0 && dt <= 0xffffffff ) {
|
||||
static int n;
|
||||
if( n++ == 0 )
|
||||
log() << "DEV WARNING appendDate() called with a tiny (but nonzero) date" << std::endl;
|
||||
log() << "DEV WARNING appendDate() called with a tiny (but nonzero) date" << endl;
|
||||
}
|
||||
#endif
|
||||
_b.appendNum((char) Date);
|
||||
|
|
@ -339,10 +373,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& append(const StringData& fieldName, const BSONRegEx& regex) {
|
||||
return appendRegex(fieldName, regex.pattern, regex.flags);
|
||||
}
|
||||
|
||||
BSONObjBuilder& appendCode(const StringData& fieldName, const StringData& code) {
|
||||
_b.appendNum((char) Code);
|
||||
_b.appendStr(fieldName);
|
||||
|
|
@ -351,10 +381,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& append(const StringData& fieldName, const BSONCode& code) {
|
||||
return appendCode(fieldName, code.code);
|
||||
}
|
||||
|
||||
/** Append a string element.
|
||||
@param sz size includes terminating null character */
|
||||
BSONObjBuilder& append(const StringData& fieldName, const char *str, int sz) {
|
||||
|
|
@ -369,17 +395,9 @@ namespace mongo {
|
|||
return append(fieldName, str, (int) strlen(str)+1);
|
||||
}
|
||||
/** Append a string element */
|
||||
BSONObjBuilder& append(const StringData& fieldName, const std::string& str) {
|
||||
BSONObjBuilder& append(const StringData& fieldName, const string& str) {
|
||||
return append(fieldName, str.c_str(), (int) str.size()+1);
|
||||
}
|
||||
/** Append a string element */
|
||||
BSONObjBuilder& append(const StringData& fieldName, const StringData& str) {
|
||||
_b.appendNum((char) String);
|
||||
_b.appendStr(fieldName);
|
||||
_b.appendNum((int)str.size()+1);
|
||||
_b.appendStr(str, true);
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& appendSymbol(const StringData& fieldName, const StringData& symbol) {
|
||||
_b.appendNum((char) Symbol);
|
||||
|
|
@ -389,15 +407,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& append(const StringData& fieldName, const BSONSymbol& symbol) {
|
||||
return appendSymbol(fieldName, symbol.symbol);
|
||||
}
|
||||
|
||||
/** Implements builder interface but no-op in ObjBuilder */
|
||||
void appendNull() {
|
||||
msgasserted(16234, "Invalid call to appendNull in BSONObj Builder.");
|
||||
}
|
||||
|
||||
/** Append a Null element to the object */
|
||||
BSONObjBuilder& appendNull( const StringData& fieldName ) {
|
||||
_b.appendNum( (char) jstNULL );
|
||||
|
|
@ -426,19 +435,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* To store an OpTime in BSON, use this function.
|
||||
* This captures both the secs and inc fields.
|
||||
*/
|
||||
BSONObjBuilder& append(const StringData& fieldName, OpTime optime);
|
||||
|
||||
/**
|
||||
* Alternative way to store an OpTime in BSON. Pass the OpTime as a Date, as follows:
|
||||
*
|
||||
* builder.appendTimestamp("field", optime.asDate());
|
||||
*
|
||||
* This captures both the secs and inc fields.
|
||||
*/
|
||||
BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long val ) {
|
||||
_b.appendNum( (char) Timestamp );
|
||||
_b.appendStr( fieldName );
|
||||
|
|
@ -466,10 +462,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& append(const StringData& fieldName, const BSONDBRef& dbref) {
|
||||
return appendDBRef(fieldName, dbref.ns, dbref.oid);
|
||||
}
|
||||
|
||||
/** Append a binary data element
|
||||
@param fieldName name of the field
|
||||
@param len length of the binary data in bytes
|
||||
|
|
@ -486,10 +478,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& append(const StringData& fieldName, const BSONBinData& bd) {
|
||||
return appendBinData(fieldName, bd.length, bd.type, bd.data);
|
||||
}
|
||||
|
||||
/**
|
||||
Subtype 2 is deprecated.
|
||||
Append a BSON bindata bytearray element.
|
||||
|
|
@ -519,10 +507,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& append(const StringData& fieldName, const BSONCodeWScope& cws) {
|
||||
return appendCodeWScope(fieldName, cws.code, cws.scope);
|
||||
}
|
||||
|
||||
void appendUndefined( const StringData& fieldName ) {
|
||||
_b.appendNum( (char) Undefined );
|
||||
_b.appendStr( fieldName );
|
||||
|
|
@ -541,21 +525,14 @@ namespace mongo {
|
|||
|
||||
/** Append an array of values. */
|
||||
template < class T >
|
||||
BSONObjBuilder& append( const StringData& fieldName, const std::vector< T >& vals );
|
||||
BSONObjBuilder& append( const StringData& fieldName, const vector< T >& vals );
|
||||
|
||||
template < class T >
|
||||
BSONObjBuilder& append( const StringData& fieldName, const std::list< T >& vals );
|
||||
BSONObjBuilder& append( const StringData& fieldName, const list< T >& vals );
|
||||
|
||||
/** Append a set of values. */
|
||||
template < class T >
|
||||
BSONObjBuilder& append( const StringData& fieldName, const std::set< T >& vals );
|
||||
|
||||
/**
|
||||
* Append a map of values as a sub-object.
|
||||
* Note: the keys of the map should be StringData-compatible (i.e. strings).
|
||||
*/
|
||||
template < class K, class T >
|
||||
BSONObjBuilder& append( const StringData& fieldName, const std::map< K, T >& vals );
|
||||
BSONObjBuilder& append( const StringData& fieldName, const set< T >& vals );
|
||||
|
||||
/**
|
||||
* destructive
|
||||
|
|
@ -596,13 +573,21 @@ namespace mongo {
|
|||
return temp;
|
||||
}
|
||||
|
||||
/* assume ownership of the buffer - you must then free it (with free()) */
|
||||
char* decouple(int& l) {
|
||||
char *x = _done();
|
||||
assert( x );
|
||||
l = _b.len();
|
||||
_b.decouple();
|
||||
return x;
|
||||
}
|
||||
void decouple() {
|
||||
_b.decouple(); // post done() call version. be sure jsobj frees...
|
||||
}
|
||||
|
||||
void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
|
||||
|
||||
static std::string numStr( int i ) {
|
||||
static string numStr( int i ) {
|
||||
if (i>=0 && i<100 && numStrsReady)
|
||||
return numStrs[i];
|
||||
StringBuilder o;
|
||||
|
|
@ -611,7 +596,7 @@ namespace mongo {
|
|||
}
|
||||
|
||||
/** Stream oriented way to add field names and values. */
|
||||
BSONObjBuilderValueStream &operator<<( const StringData& name ) {
|
||||
BSONObjBuilderValueStream &operator<<(const char * name ) {
|
||||
_s.endField( name );
|
||||
return _s;
|
||||
}
|
||||
|
|
@ -619,6 +604,17 @@ namespace mongo {
|
|||
/** Stream oriented way to add field names and values. */
|
||||
BSONObjBuilder& operator<<( GENOIDLabeler ) { return genOID(); }
|
||||
|
||||
// prevent implicit string conversions which would allow bad things like BSON( BSON( "foo" << 1 ) << 2 )
|
||||
struct ForceExplicitString {
|
||||
ForceExplicitString( const string &str ) : str_( str ) {}
|
||||
string str_;
|
||||
};
|
||||
|
||||
/** Stream oriented way to add field names and values. */
|
||||
BSONObjBuilderValueStream &operator<<( const ForceExplicitString& name ) {
|
||||
return operator<<( name.str_.c_str() );
|
||||
}
|
||||
|
||||
Labeler operator<<( const Labeler::Label &l ) {
|
||||
massert( 10336 , "No subobject started", _s.subobjStarted() );
|
||||
return _s << l;
|
||||
|
|
@ -626,24 +622,16 @@ namespace mongo {
|
|||
|
||||
template<typename T>
|
||||
BSONObjBuilderValueStream& operator<<( const BSONField<T>& f ) {
|
||||
_s.endField( f.name() );
|
||||
_s.endField( f.name().c_str() );
|
||||
return _s;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
BSONObjBuilder& operator<<( const BSONFieldValue<T>& v ) {
|
||||
append( v.name(), v.value() );
|
||||
append( v.name().c_str() , v.value() );
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONObjBuilder& operator<<( const BSONElement& e ){
|
||||
append( e );
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool isArray() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/** @return true if we are using our own bufbuilder, and not an alternate that was given to us in our constructor */
|
||||
bool owned() const { return &_b == &_buf; }
|
||||
|
|
@ -679,11 +667,11 @@ namespace mongo {
|
|||
BSONSizeTracker * _tracker;
|
||||
bool _doneCalled;
|
||||
|
||||
static const std::string numStrs[100]; // cache of 0 to 99 inclusive
|
||||
static const string numStrs[100]; // cache of 0 to 99 inclusive
|
||||
static bool numStrsReady; // for static init safety. see comments in db/jsobj.cpp
|
||||
};
|
||||
|
||||
class BSONArrayBuilder : public BSONBuilderBase, private boost::noncopyable {
|
||||
class BSONArrayBuilder : boost::noncopyable {
|
||||
public:
|
||||
BSONArrayBuilder() : _i(0), _b() {}
|
||||
BSONArrayBuilder( BufBuilder &_b ) : _i(0), _b(_b) {}
|
||||
|
|
@ -700,53 +688,25 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
BSONArrayBuilder& operator<<(const BSONElement& e) {
|
||||
return append(e);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
BSONArrayBuilder& operator<<(const T& x) {
|
||||
_b << num().c_str() << x;
|
||||
return *this;
|
||||
return append(x);
|
||||
}
|
||||
|
||||
void appendNull() {
|
||||
_b.appendNull(num());
|
||||
}
|
||||
|
||||
void appendUndefined() {
|
||||
_b.appendUndefined(num());
|
||||
}
|
||||
|
||||
/**
|
||||
* destructive - ownership moves to returned BSONArray
|
||||
* @return owned BSONArray
|
||||
*/
|
||||
BSONArray arr() { return BSONArray(_b.obj()); }
|
||||
BSONObj obj() { return _b.obj(); }
|
||||
|
||||
BSONObj done() { return _b.done(); }
|
||||
|
||||
void doneFast() { _b.doneFast(); }
|
||||
|
||||
BSONArrayBuilder& append(const StringData& name, int n) {
|
||||
fill( name );
|
||||
append( n );
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONArrayBuilder& append(const StringData& name, long long n) {
|
||||
fill( name );
|
||||
append( n );
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONArrayBuilder& append(const StringData& name, double n) {
|
||||
fill( name );
|
||||
append( n );
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
BSONArrayBuilder& append(const StringData& name, const T& x) {
|
||||
fill( name );
|
||||
|
|
@ -754,12 +714,6 @@ namespace mongo {
|
|||
return *this;
|
||||
}
|
||||
|
||||
template < class T >
|
||||
BSONArrayBuilder& append( const std::list< T >& vals );
|
||||
|
||||
template < class T >
|
||||
BSONArrayBuilder& append( const std::set< T >& vals );
|
||||
|
||||
// These two just use next position
|
||||
BufBuilder &subobjStart() { return _b.subobjStart( num() ); }
|
||||
BufBuilder &subarrayStart() { return _b.subarrayStart( num() ); }
|
||||
|
|
@ -781,52 +735,29 @@ namespace mongo {
|
|||
return _b.subobjStart( num() );
|
||||
}
|
||||
|
||||
BufBuilder &subarrayStart( const StringData& name ) {
|
||||
BufBuilder &subarrayStart( const char *name ) {
|
||||
fill( name );
|
||||
return _b.subarrayStart( num() );
|
||||
}
|
||||
|
||||
BSONArrayBuilder& appendArray( const StringData& name, const BSONObj& subObj ) {
|
||||
void appendArray( const StringData& name, BSONObj subObj ) {
|
||||
fill( name );
|
||||
_b.appendArray( num(), subObj );
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONArrayBuilder& appendAs( const BSONElement &e, const StringData& name) {
|
||||
void appendAs( const BSONElement &e, const char *name) {
|
||||
fill( name );
|
||||
append( e );
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONArrayBuilder& appendTimestamp(unsigned int sec, unsigned int inc) {
|
||||
_b.appendTimestamp(num(), sec, inc);
|
||||
return *this;
|
||||
}
|
||||
|
||||
BSONArrayBuilder& append(const StringData& s) {
|
||||
_b.append(num(), s);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool isArray() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
int len() const { return _b.len(); }
|
||||
int arrSize() const { return _i; }
|
||||
|
||||
private:
|
||||
// These two are undefined privates to prevent their accidental
|
||||
// use as we don't support unsigned ints in BSON
|
||||
BSONObjBuilder& append(const StringData& fieldName, unsigned int val);
|
||||
BSONObjBuilder& append(const StringData& fieldName, unsigned long long val);
|
||||
|
||||
void fill( const StringData& name ) {
|
||||
long int n;
|
||||
Status status = parseNumberFromStringWithBase( name, 10, &n );
|
||||
uassert( 13048,
|
||||
(string)"can't append to array using string field name: " + name.toString(),
|
||||
status.isOK() );
|
||||
char *r;
|
||||
long int n = strtol( name.data(), &r, 10 );
|
||||
if ( *r )
|
||||
uasserted( 13048, (string)"can't append to array using string field name [" + name.data() + "]" );
|
||||
fill(n);
|
||||
}
|
||||
|
||||
|
|
@ -837,16 +768,27 @@ namespace mongo {
|
|||
uassert(15891, "can't backfill array to larger than 1,500,000 elements", upTo <= maxElems);
|
||||
|
||||
while( _i < upTo )
|
||||
appendNull();
|
||||
append( nullElt() );
|
||||
}
|
||||
|
||||
std::string num() { return _b.numStr(_i++); }
|
||||
static BSONElement nullElt() {
|
||||
static BSONObj n = nullObj();
|
||||
return n.firstElement();
|
||||
}
|
||||
|
||||
static BSONObj nullObj() {
|
||||
BSONObjBuilder _b;
|
||||
_b.appendNull( "" );
|
||||
return _b.obj();
|
||||
}
|
||||
|
||||
string num() { return _b.numStr(_i++); }
|
||||
int _i;
|
||||
BSONObjBuilder _b;
|
||||
};
|
||||
|
||||
template < class T >
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const std::vector< T >& vals ) {
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const vector< T >& vals ) {
|
||||
BSONObjBuilder arrBuilder;
|
||||
for ( unsigned int i = 0; i < vals.size(); ++i )
|
||||
arrBuilder.append( numStr( i ), vals[ i ] );
|
||||
|
|
@ -865,41 +807,13 @@ namespace mongo {
|
|||
}
|
||||
|
||||
template < class T >
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const std::list< T >& vals ) {
|
||||
return _appendIt< std::list< T > >( *this, fieldName, vals );
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
|
||||
return _appendIt< list< T > >( *this, fieldName, vals );
|
||||
}
|
||||
|
||||
template < class T >
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const std::set< T >& vals ) {
|
||||
return _appendIt< std::set< T > >( *this, fieldName, vals );
|
||||
}
|
||||
|
||||
template < class K, class T >
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const std::map< K, T >& vals ) {
|
||||
BSONObjBuilder bob;
|
||||
for( typename std::map<K,T>::const_iterator i = vals.begin(); i != vals.end(); ++i ){
|
||||
bob.append(i->first, i->second);
|
||||
}
|
||||
append(fieldName, bob.obj());
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
template < class L >
|
||||
inline BSONArrayBuilder& _appendArrayIt( BSONArrayBuilder& _this, const L& vals ) {
|
||||
for( typename L::const_iterator i = vals.begin(); i != vals.end(); i++ )
|
||||
_this.append( *i );
|
||||
return _this;
|
||||
}
|
||||
|
||||
template < class T >
|
||||
inline BSONArrayBuilder& BSONArrayBuilder::append( const std::list< T >& vals ) {
|
||||
return _appendArrayIt< std::list< T > >( *this, vals );
|
||||
}
|
||||
|
||||
template < class T >
|
||||
inline BSONArrayBuilder& BSONArrayBuilder::append( const std::set< T >& vals ) {
|
||||
return _appendArrayIt< std::set< T > >( *this, vals );
|
||||
inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const set< T >& vals ) {
|
||||
return _appendIt< set< T > >( *this, fieldName, vals );
|
||||
}
|
||||
|
||||
|
||||
161
bson/bsonobjiterator.h
Normal file
161
bson/bsonobjiterator.h
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
// bsonobjiterator.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/preprocessor/cat.hpp> // like the ## operator but works with __LINE__
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/** iterator for a BSONObj
|
||||
|
||||
Note each BSONObj ends with an EOO element: so you will get more() on an empty
|
||||
object, although next().eoo() will be true.
|
||||
|
||||
The BSONObj must stay in scope for the duration of the iterator's execution.
|
||||
|
||||
todo: we may want to make a more stl-like iterator interface for this
|
||||
with things like begin() and end()
|
||||
*/
|
||||
class BSONObjIterator {
|
||||
public:
|
||||
/** Create an iterator for a BSON object.
|
||||
*/
|
||||
BSONObjIterator(const BSONObj& jso) {
|
||||
int sz = jso.objsize();
|
||||
if ( MONGO_unlikely(sz == 0) ) {
|
||||
_pos = _theend = 0;
|
||||
return;
|
||||
}
|
||||
_pos = jso.objdata() + 4;
|
||||
_theend = jso.objdata() + sz - 1;
|
||||
}
|
||||
|
||||
BSONObjIterator( const char * start , const char * end ) {
|
||||
_pos = start + 4;
|
||||
_theend = end - 1;
|
||||
}
|
||||
|
||||
/** @return true if more elements exist to be enumerated. */
|
||||
bool more() { return _pos < _theend; }
|
||||
|
||||
/** @return true if more elements exist to be enumerated INCLUDING the EOO element which is always at the end. */
|
||||
bool moreWithEOO() { return _pos <= _theend; }
|
||||
|
||||
/** @return the next element in the object. For the final element, element.eoo() will be true. */
|
||||
BSONElement next( bool checkEnd ) {
|
||||
assert( _pos <= _theend );
|
||||
BSONElement e( _pos, checkEnd ? (int)(_theend + 1 - _pos) : -1 );
|
||||
_pos += e.size( checkEnd ? (int)(_theend + 1 - _pos) : -1 );
|
||||
return e;
|
||||
}
|
||||
BSONElement next() {
|
||||
assert( _pos <= _theend );
|
||||
BSONElement e(_pos);
|
||||
_pos += e.size();
|
||||
return e;
|
||||
}
|
||||
void operator++() { next(); }
|
||||
void operator++(int) { next(); }
|
||||
|
||||
BSONElement operator*() {
|
||||
assert( _pos <= _theend );
|
||||
return BSONElement(_pos);
|
||||
}
|
||||
|
||||
private:
|
||||
const char* _pos;
|
||||
const char* _theend;
|
||||
};
|
||||
|
||||
class BSONObjIteratorSorted {
|
||||
public:
|
||||
BSONObjIteratorSorted( const BSONObj& o );
|
||||
|
||||
~BSONObjIteratorSorted() {
|
||||
assert( _fields );
|
||||
delete[] _fields;
|
||||
_fields = 0;
|
||||
}
|
||||
|
||||
bool more() {
|
||||
return _cur < _nfields;
|
||||
}
|
||||
|
||||
BSONElement next() {
|
||||
assert( _fields );
|
||||
if ( _cur < _nfields )
|
||||
return BSONElement( _fields[_cur++] );
|
||||
return BSONElement();
|
||||
}
|
||||
|
||||
private:
|
||||
const char ** _fields;
|
||||
int _nfields;
|
||||
int _cur;
|
||||
};
|
||||
|
||||
/** transform a BSON array into a vector of BSONElements.
|
||||
we match array # positions with their vector position, and ignore
|
||||
any fields with non-numeric field names.
|
||||
*/
|
||||
inline vector<BSONElement> BSONElement::Array() const {
|
||||
chk(mongo::Array);
|
||||
vector<BSONElement> v;
|
||||
BSONObjIterator i(Obj());
|
||||
while( i.more() ) {
|
||||
BSONElement e = i.next();
|
||||
const char *f = e.fieldName();
|
||||
try {
|
||||
unsigned u = stringToNum(f);
|
||||
assert( u < 1000000 );
|
||||
if( u >= v.size() )
|
||||
v.resize(u+1);
|
||||
v[u] = e;
|
||||
}
|
||||
catch(unsigned) { }
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
/** Similar to BOOST_FOREACH
|
||||
*
|
||||
* because the iterator is defined outside of the for, you must use {} around
|
||||
* the surrounding scope. Don't do this:
|
||||
*
|
||||
* if (foo)
|
||||
* BSONForEach(e, obj)
|
||||
* doSomething(e);
|
||||
*
|
||||
* but this is OK:
|
||||
*
|
||||
* if (foo) {
|
||||
* BSONForEach(e, obj)
|
||||
* doSomething(e);
|
||||
* }
|
||||
*
|
||||
*/
|
||||
|
||||
#define BSONForEach(e, obj) \
|
||||
BSONObjIterator BOOST_PP_CAT(it_,__LINE__)(obj); \
|
||||
for ( BSONElement e; \
|
||||
(BOOST_PP_CAT(it_,__LINE__).more() ? \
|
||||
(e = BOOST_PP_CAT(it_,__LINE__).next(), true) : \
|
||||
false) ; \
|
||||
/*nothing*/ )
|
||||
|
||||
}
|
||||
107
bson/bsontypes.h
Normal file
107
bson/bsontypes.h
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
// bsontypes.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/misc.h"
|
||||
|
||||
namespace bson { }
|
||||
|
||||
namespace mongo {
|
||||
|
||||
using namespace std;
|
||||
|
||||
class BSONArrayBuilder;
|
||||
class BSONElement;
|
||||
class BSONObj;
|
||||
class BSONObjBuilder;
|
||||
class BSONObjBuilderValueStream;
|
||||
class BSONObjIterator;
|
||||
class Ordering;
|
||||
class Record;
|
||||
struct BSONArray; // empty subclass of BSONObj useful for overloading
|
||||
struct BSONElementCmpWithoutField;
|
||||
|
||||
extern BSONObj maxKey;
|
||||
extern BSONObj minKey;
|
||||
|
||||
/**
|
||||
the complete list of valid BSON types
|
||||
see also bsonspec.org
|
||||
*/
|
||||
enum BSONType {
|
||||
/** smaller than all other types */
|
||||
MinKey=-1,
|
||||
/** end of object */
|
||||
EOO=0,
|
||||
/** double precision floating point value */
|
||||
NumberDouble=1,
|
||||
/** character string, stored in utf8 */
|
||||
String=2,
|
||||
/** an embedded object */
|
||||
Object=3,
|
||||
/** an embedded array */
|
||||
Array=4,
|
||||
/** binary data */
|
||||
BinData=5,
|
||||
/** Undefined type */
|
||||
Undefined=6,
|
||||
/** ObjectId */
|
||||
jstOID=7,
|
||||
/** boolean type */
|
||||
Bool=8,
|
||||
/** date type */
|
||||
Date=9,
|
||||
/** null type */
|
||||
jstNULL=10,
|
||||
/** regular expression, a pattern with options */
|
||||
RegEx=11,
|
||||
/** deprecated / will be redesigned */
|
||||
DBRef=12,
|
||||
/** deprecated / use CodeWScope */
|
||||
Code=13,
|
||||
/** a programming language (e.g., Python) symbol */
|
||||
Symbol=14,
|
||||
/** javascript code that can execute on the database server, with SavedContext */
|
||||
CodeWScope=15,
|
||||
/** 32 bit signed integer */
|
||||
NumberInt = 16,
|
||||
/** Updated to a Date with value next OpTime on insert */
|
||||
Timestamp = 17,
|
||||
/** 64 bit integer */
|
||||
NumberLong = 18,
|
||||
/** max type that is not MaxKey */
|
||||
JSTypeMax=18,
|
||||
/** larger than all other types */
|
||||
MaxKey=127
|
||||
};
|
||||
|
||||
/* subtypes of BinData.
|
||||
bdtCustom and above are ones that the JS compiler understands, but are
|
||||
opaque to the database.
|
||||
*/
|
||||
enum BinDataType {
|
||||
BinDataGeneral=0,
|
||||
Function=1,
|
||||
ByteArrayDeprecated=2, /* use BinGeneral instead */
|
||||
bdtUUID = 3,
|
||||
MD5Type=5,
|
||||
bdtCustom=128
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -20,17 +20,14 @@
|
|||
#if defined(__GNUC__)
|
||||
|
||||
#define NOINLINE_DECL __attribute__((noinline))
|
||||
#define PACKED_DECL __attribute__((packed))
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#define NOINLINE_DECL __declspec(noinline)
|
||||
#define PACKED_DECL
|
||||
|
||||
#else
|
||||
|
||||
#define NOINLINE_DECL
|
||||
#define PACKED_DECL
|
||||
|
||||
#endif
|
||||
|
||||
173
bson/oid.cpp
Normal file
173
bson/oid.cpp
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
// @file oid.cpp
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "pch.h"
|
||||
#include "oid.h"
|
||||
#include "util/atomic_int.h"
|
||||
#include "../db/nonce.h"
|
||||
#include "bsonobjbuilder.h"
|
||||
|
||||
BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
|
||||
|
||||
namespace mongo {
|
||||
|
||||
// machine # before folding in the process id
|
||||
OID::MachineAndPid OID::ourMachine;
|
||||
|
||||
unsigned OID::ourPid() {
|
||||
unsigned pid;
|
||||
#if defined(_WIN32)
|
||||
pid = (unsigned short) GetCurrentProcessId();
|
||||
#elif defined(__linux__) || defined(__APPLE__) || defined(__sunos__)
|
||||
pid = (unsigned short) getpid();
|
||||
#else
|
||||
pid = (unsigned short) Security::getNonce();
|
||||
#endif
|
||||
return pid;
|
||||
}
|
||||
|
||||
void OID::foldInPid(OID::MachineAndPid& x) {
|
||||
unsigned p = ourPid();
|
||||
x._pid ^= (unsigned short) p;
|
||||
// when the pid is greater than 16 bits, let the high bits modulate the machine id field.
|
||||
unsigned short& rest = (unsigned short &) x._machineNumber[1];
|
||||
rest ^= p >> 16;
|
||||
}
|
||||
|
||||
OID::MachineAndPid OID::genMachineAndPid() {
|
||||
BOOST_STATIC_ASSERT( sizeof(mongo::OID::MachineAndPid) == 5 );
|
||||
|
||||
// this is not called often, so the following is not expensive, and gives us some
|
||||
// testing that nonce generation is working right and that our OIDs are (perhaps) ok.
|
||||
{
|
||||
nonce64 a = Security::getNonceDuringInit();
|
||||
nonce64 b = Security::getNonceDuringInit();
|
||||
nonce64 c = Security::getNonceDuringInit();
|
||||
assert( !(a==b && b==c) );
|
||||
}
|
||||
|
||||
unsigned long long n = Security::getNonceDuringInit();
|
||||
OID::MachineAndPid x = ourMachine = (OID::MachineAndPid&) n;
|
||||
foldInPid(x);
|
||||
return x;
|
||||
}
|
||||
|
||||
// after folding in the process id
|
||||
OID::MachineAndPid OID::ourMachineAndPid = OID::genMachineAndPid();
|
||||
|
||||
void OID::regenMachineId() {
|
||||
ourMachineAndPid = genMachineAndPid();
|
||||
}
|
||||
|
||||
inline bool OID::MachineAndPid::operator!=(const OID::MachineAndPid& rhs) const {
|
||||
return _pid != rhs._pid || _machineNumber != rhs._machineNumber;
|
||||
}
|
||||
|
||||
unsigned OID::getMachineId() {
|
||||
unsigned char x[4];
|
||||
x[0] = ourMachineAndPid._machineNumber[0];
|
||||
x[1] = ourMachineAndPid._machineNumber[1];
|
||||
x[2] = ourMachineAndPid._machineNumber[2];
|
||||
x[3] = 0;
|
||||
return (unsigned&) x[0];
|
||||
}
|
||||
|
||||
void OID::justForked() {
|
||||
MachineAndPid x = ourMachine;
|
||||
// we let the random # for machine go into all 5 bytes of MachineAndPid, and then
|
||||
// xor in the pid into _pid. this reduces the probability of collisions.
|
||||
foldInPid(x);
|
||||
ourMachineAndPid = genMachineAndPid();
|
||||
assert( x != ourMachineAndPid );
|
||||
ourMachineAndPid = x;
|
||||
}
|
||||
|
||||
void OID::init() {
|
||||
static AtomicUInt inc = (unsigned) Security::getNonce();
|
||||
|
||||
{
|
||||
unsigned t = (unsigned) time(0);
|
||||
unsigned char *T = (unsigned char *) &t;
|
||||
_time[0] = T[3]; // big endian order because we use memcmp() to compare OID's
|
||||
_time[1] = T[2];
|
||||
_time[2] = T[1];
|
||||
_time[3] = T[0];
|
||||
}
|
||||
|
||||
_machineAndPid = ourMachineAndPid;
|
||||
|
||||
{
|
||||
int new_inc = inc++;
|
||||
unsigned char *T = (unsigned char *) &new_inc;
|
||||
_inc[0] = T[2];
|
||||
_inc[1] = T[1];
|
||||
_inc[2] = T[0];
|
||||
}
|
||||
}
|
||||
|
||||
void OID::init( string s ) {
|
||||
assert( s.size() == 24 );
|
||||
const char *p = s.c_str();
|
||||
for( int i = 0; i < 12; i++ ) {
|
||||
data[i] = fromHex(p);
|
||||
p += 2;
|
||||
}
|
||||
}
|
||||
|
||||
void OID::init(Date_t date, bool max) {
|
||||
int time = (int) (date / 1000);
|
||||
char* T = (char *) &time;
|
||||
data[0] = T[3];
|
||||
data[1] = T[2];
|
||||
data[2] = T[1];
|
||||
data[3] = T[0];
|
||||
|
||||
if (max)
|
||||
*(long long*)(data + 4) = 0xFFFFFFFFFFFFFFFFll;
|
||||
else
|
||||
*(long long*)(data + 4) = 0x0000000000000000ll;
|
||||
}
|
||||
|
||||
time_t OID::asTimeT() {
|
||||
int time;
|
||||
char* T = (char *) &time;
|
||||
T[0] = data[3];
|
||||
T[1] = data[2];
|
||||
T[2] = data[1];
|
||||
T[3] = data[0];
|
||||
return time;
|
||||
}
|
||||
|
||||
const string BSONObjBuilder::numStrs[] = {
|
||||
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
|
||||
"10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
|
||||
"20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
|
||||
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
|
||||
"40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
|
||||
"50", "51", "52", "53", "54", "55", "56", "57", "58", "59",
|
||||
"60", "61", "62", "63", "64", "65", "66", "67", "68", "69",
|
||||
"70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
|
||||
"80", "81", "82", "83", "84", "85", "86", "87", "88", "89",
|
||||
"90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
|
||||
};
|
||||
|
||||
// This is to ensure that BSONObjBuilder doesn't try to use numStrs before the strings have been constructed
|
||||
// I've tested just making numStrs a char[][], but the overhead of constructing the strings each time was too high
|
||||
// numStrsReady will be 0 until after numStrs is initialized because it is a static variable
|
||||
bool BSONObjBuilder::numStrsReady = (numStrs[0].size() > 0);
|
||||
|
||||
}
|
||||
132
bson/oid.h
Normal file
132
bson/oid.h
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
// oid.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../util/hex.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
#pragma pack(1)
|
||||
/** Object ID type.
|
||||
BSON objects typically have an _id field for the object id. This field should be the first
|
||||
member of the object when present. class OID is a special type that is a 12 byte id which
|
||||
is likely to be unique to the system. You may also use other types for _id's.
|
||||
When _id field is missing from a BSON object, on an insert the database may insert one
|
||||
automatically in certain circumstances.
|
||||
|
||||
Warning: You must call OID::newState() after a fork().
|
||||
|
||||
Typical contents of the BSON ObjectID is a 12-byte value consisting of a 4-byte timestamp (seconds since epoch),
|
||||
a 3-byte machine id, a 2-byte process id, and a 3-byte counter. Note that the timestamp and counter fields must
|
||||
be stored big endian unlike the rest of BSON. This is because they are compared byte-by-byte and we want to ensure
|
||||
a mostly increasing order.
|
||||
*/
|
||||
class OID {
|
||||
public:
|
||||
OID() : a(0), b(0) { }
|
||||
|
||||
/** init from a 24 char hex string */
|
||||
explicit OID(const string &s) { init(s); }
|
||||
|
||||
/** initialize to 'null' */
|
||||
void clear() { a = 0; b = 0; }
|
||||
|
||||
const unsigned char *getData() const { return data; }
|
||||
|
||||
bool operator==(const OID& r) const { return a==r.a && b==r.b; }
|
||||
bool operator!=(const OID& r) const { return a!=r.a || b!=r.b; }
|
||||
int compare( const OID& other ) const { return memcmp( data , other.data , 12 ); }
|
||||
bool operator<( const OID& other ) const { return compare( other ) < 0; }
|
||||
bool operator<=( const OID& other ) const { return compare( other ) <= 0; }
|
||||
|
||||
/** @return the object ID output as 24 hex digits */
|
||||
string str() const { return toHexLower(data, 12); }
|
||||
string toString() const { return str(); }
|
||||
|
||||
static OID gen() { OID o; o.init(); return o; }
|
||||
|
||||
/** sets the contents to a new oid / randomized value */
|
||||
void init();
|
||||
|
||||
/** init from a 24 char hex string */
|
||||
void init( string s );
|
||||
|
||||
/** Set to the min/max OID that could be generated at given timestamp. */
|
||||
void init( Date_t date, bool max=false );
|
||||
|
||||
time_t asTimeT();
|
||||
Date_t asDateT() { return asTimeT() * (long long)1000; }
|
||||
|
||||
bool isSet() const { return a || b; }
|
||||
|
||||
/** call this after a fork to update the process id */
|
||||
static void justForked();
|
||||
|
||||
static unsigned getMachineId(); // features command uses
|
||||
static void regenMachineId(); // used by unit tests
|
||||
|
||||
private:
|
||||
struct MachineAndPid {
|
||||
unsigned char _machineNumber[3];
|
||||
unsigned short _pid;
|
||||
bool operator!=(const OID::MachineAndPid& rhs) const;
|
||||
};
|
||||
static MachineAndPid ourMachine, ourMachineAndPid;
|
||||
union {
|
||||
struct {
|
||||
// 12 bytes total
|
||||
unsigned char _time[4];
|
||||
MachineAndPid _machineAndPid;
|
||||
unsigned char _inc[3];
|
||||
};
|
||||
struct {
|
||||
long long a;
|
||||
unsigned b;
|
||||
};
|
||||
unsigned char data[12];
|
||||
};
|
||||
|
||||
static unsigned ourPid();
|
||||
static void foldInPid(MachineAndPid& x);
|
||||
static MachineAndPid genMachineAndPid();
|
||||
};
|
||||
#pragma pack()
|
||||
|
||||
ostream& operator<<( ostream &s, const OID &o );
|
||||
inline StringBuilder& operator<< (StringBuilder& s, const OID& o) { return (s << o.str()); }
|
||||
|
||||
/** Formatting mode for generating JSON from BSON.
|
||||
See <http://mongodb.onconfluence.com/display/DOCS/Mongo+Extended+JSON>
|
||||
for details.
|
||||
*/
|
||||
enum JsonStringFormat {
|
||||
/** strict RFC format */
|
||||
Strict,
|
||||
/** 10gen format, which is close to JS format. This form is understandable by
|
||||
javascript running inside the Mongo server via eval() */
|
||||
TenGen,
|
||||
/** Javascript JSON compatible */
|
||||
JS
|
||||
};
|
||||
|
||||
inline ostream& operator<<( ostream &s, const OID &o ) {
|
||||
s << o.str();
|
||||
return s;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -46,8 +46,8 @@ namespace mongo {
|
|||
// for woCompare...
|
||||
unsigned descending(unsigned mask) const { return bits & mask; }
|
||||
|
||||
/*operator std::string() const {
|
||||
StringBuilder buf;
|
||||
/*operator string() const {
|
||||
StringBuilder buf(32);
|
||||
for ( unsigned i=0; i<nkeys; i++)
|
||||
buf.append( get(i) > 0 ? "+" : "-" );
|
||||
return buf.str();
|
||||
71
bson/stringdata.h
Normal file
71
bson/stringdata.h
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
// stringdata.h
|
||||
|
||||
/* Copyright 2010 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
|
||||
namespace mongo {
|
||||
|
||||
using std::string;
|
||||
|
||||
/** A StringData object wraps a 'const string&' or a 'const char*' without
|
||||
* copying its contents. The most common usage is as a function argument that
|
||||
* takes any of the two forms of strings above. Fundamentally, this class tries
|
||||
* go around the fact that string literals in C++ are char[N]'s.
|
||||
*
|
||||
* Note that the object StringData wraps around must be alive while the StringData
|
||||
* is.
|
||||
*/
|
||||
class StringData {
|
||||
public:
|
||||
/** Construct a StringData, for the case where the length of
|
||||
* string is not known. 'c' must be a pointer to a null-terminated string.
|
||||
*/
|
||||
StringData( const char* c )
|
||||
: _data(c), _size((unsigned) strlen(c)) {}
|
||||
|
||||
/** Construct a StringData explicitly, for the case where the length of the string
|
||||
* is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
|
||||
* must be the length that std::strlen(c) would return, a.k.a the index of the
|
||||
* terminator in c.
|
||||
*/
|
||||
StringData( const char* c, unsigned len )
|
||||
: _data(c), _size(len) {}
|
||||
|
||||
/** Construct a StringData, for the case of a std::string. */
|
||||
StringData( const string& s )
|
||||
: _data(s.c_str()), _size((unsigned) s.size()) {}
|
||||
|
||||
// Construct a StringData explicitly, for the case of a literal whose size is
|
||||
// known at compile time.
|
||||
struct LiteralTag {};
|
||||
template<size_t N>
|
||||
StringData( const char (&val)[N], LiteralTag )
|
||||
: _data(&val[0]), _size(N-1) {}
|
||||
|
||||
// accessors
|
||||
const char* data() const { return _data; }
|
||||
unsigned size() const { return _size; }
|
||||
|
||||
private:
|
||||
const char* const _data; // is always null terminated
|
||||
const unsigned _size; // 'size' does not include the null terminator
|
||||
};
|
||||
|
||||
} // namespace mongo
|
||||
106
bson/util/atomic_int.h
Normal file
106
bson/util/atomic_int.h
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
// atomic_int.h
|
||||
// atomic wrapper for unsigned
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#if defined(_WIN32)
|
||||
# include <windows.h>
|
||||
#endif
|
||||
|
||||
namespace mongo {
|
||||
|
||||
struct AtomicUInt {
|
||||
AtomicUInt() : x(0) {}
|
||||
AtomicUInt(unsigned z) : x(z) { }
|
||||
|
||||
operator unsigned() const { return x; }
|
||||
unsigned get() const { return x; }
|
||||
|
||||
inline AtomicUInt operator++(); // ++prefix
|
||||
inline AtomicUInt operator++(int);// postfix++
|
||||
inline AtomicUInt operator--(); // --prefix
|
||||
inline AtomicUInt operator--(int); // postfix--
|
||||
|
||||
inline void zero();
|
||||
|
||||
volatile unsigned x;
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
void AtomicUInt::zero() {
|
||||
InterlockedExchange((volatile long*)&x, 0);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator++() {
|
||||
return InterlockedIncrement((volatile long*)&x);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator++(int) {
|
||||
return InterlockedIncrement((volatile long*)&x)-1;
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator--() {
|
||||
return InterlockedDecrement((volatile long*)&x);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator--(int) {
|
||||
return InterlockedDecrement((volatile long*)&x)+1;
|
||||
}
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
|
||||
// this is in GCC >= 4.1
|
||||
inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe - maybe
|
||||
AtomicUInt AtomicUInt::operator++() {
|
||||
return __sync_add_and_fetch(&x, 1);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator++(int) {
|
||||
return __sync_fetch_and_add(&x, 1);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator--() {
|
||||
return __sync_add_and_fetch(&x, -1);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator--(int) {
|
||||
return __sync_fetch_and_add(&x, -1);
|
||||
}
|
||||
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
||||
inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe
|
||||
// from boost 1.39 interprocess/detail/atomic.hpp
|
||||
inline unsigned atomic_int_helper(volatile unsigned *x, int val) {
|
||||
int r;
|
||||
asm volatile
|
||||
(
|
||||
"lock\n\t"
|
||||
"xadd %1, %0":
|
||||
"+m"( *x ), "=r"( r ): // outputs (%0, %1)
|
||||
"1"( val ): // inputs (%2 == %1)
|
||||
"memory", "cc" // clobbers
|
||||
);
|
||||
return r;
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator++() {
|
||||
return atomic_int_helper(&x, 1)+1;
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator++(int) {
|
||||
return atomic_int_helper(&x, 1);
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator--() {
|
||||
return atomic_int_helper(&x, -1)-1;
|
||||
}
|
||||
AtomicUInt AtomicUInt::operator--(int) {
|
||||
return atomic_int_helper(&x, -1);
|
||||
}
|
||||
#else
|
||||
# error "unsupported compiler or platform"
|
||||
#endif
|
||||
|
||||
} // namespace mongo
|
||||
331
bson/util/builder.h
Normal file
331
bson/util/builder.h
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
/* builder.h */
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cfloat>
|
||||
#include <string>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "../inline_decls.h"
|
||||
#include "../stringdata.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/* Note the limit here is rather arbitrary and is simply a standard. generally the code works
|
||||
with any object that fits in ram.
|
||||
|
||||
Also note that the server has some basic checks to enforce this limit but those checks are not exhaustive
|
||||
for example need to check for size too big after
|
||||
update $push (append) operation
|
||||
various db.eval() type operations
|
||||
*/
|
||||
const int BSONObjMaxUserSize = 16 * 1024 * 1024;
|
||||
|
||||
/*
|
||||
Sometimes we need objects slightly larger - an object in the replication local.oplog
|
||||
is slightly larger than a user object for example.
|
||||
*/
|
||||
const int BSONObjMaxInternalSize = BSONObjMaxUserSize + ( 16 * 1024 );
|
||||
|
||||
const int BufferMaxSize = 64 * 1024 * 1024;
|
||||
|
||||
class StringBuilder;
|
||||
|
||||
void msgasserted(int msgid, const char *msg);
|
||||
|
||||
class TrivialAllocator {
|
||||
public:
|
||||
void* Malloc(size_t sz) { return malloc(sz); }
|
||||
void* Realloc(void *p, size_t sz) { return realloc(p, sz); }
|
||||
void Free(void *p) { free(p); }
|
||||
};
|
||||
|
||||
class StackAllocator {
|
||||
public:
|
||||
enum { SZ = 512 };
|
||||
void* Malloc(size_t sz) {
|
||||
if( sz <= SZ ) return buf;
|
||||
return malloc(sz);
|
||||
}
|
||||
void* Realloc(void *p, size_t sz) {
|
||||
if( p == buf ) {
|
||||
if( sz <= SZ ) return buf;
|
||||
void *d = malloc(sz);
|
||||
if ( d == 0 )
|
||||
msgasserted( 15912 , "out of memory StackAllocator::Realloc" );
|
||||
memcpy(d, p, SZ);
|
||||
return d;
|
||||
}
|
||||
return realloc(p, sz);
|
||||
}
|
||||
void Free(void *p) {
|
||||
if( p != buf )
|
||||
free(p);
|
||||
}
|
||||
private:
|
||||
char buf[SZ];
|
||||
};
|
||||
|
||||
template< class Allocator >
|
||||
class _BufBuilder {
|
||||
// non-copyable, non-assignable
|
||||
_BufBuilder( const _BufBuilder& );
|
||||
_BufBuilder& operator=( const _BufBuilder& );
|
||||
Allocator al;
|
||||
public:
|
||||
_BufBuilder(int initsize = 512) : size(initsize) {
|
||||
if ( size > 0 ) {
|
||||
data = (char *) al.Malloc(size);
|
||||
if( data == 0 )
|
||||
msgasserted(10000, "out of memory BufBuilder");
|
||||
}
|
||||
else {
|
||||
data = 0;
|
||||
}
|
||||
l = 0;
|
||||
}
|
||||
~_BufBuilder() { kill(); }
|
||||
|
||||
void kill() {
|
||||
if ( data ) {
|
||||
al.Free(data);
|
||||
data = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void reset() {
|
||||
l = 0;
|
||||
}
|
||||
void reset( int maxSize ) {
|
||||
l = 0;
|
||||
if ( maxSize && size > maxSize ) {
|
||||
al.Free(data);
|
||||
data = (char*)al.Malloc(maxSize);
|
||||
if ( data == 0 )
|
||||
msgasserted( 15913 , "out of memory BufBuilder::reset" );
|
||||
size = maxSize;
|
||||
}
|
||||
}
|
||||
|
||||
/** leave room for some stuff later
|
||||
@return point to region that was skipped. pointer may change later (on realloc), so for immediate use only
|
||||
*/
|
||||
char* skip(int n) { return grow(n); }
|
||||
|
||||
/* note this may be deallocated (realloced) if you keep writing. */
|
||||
char* buf() { return data; }
|
||||
const char* buf() const { return data; }
|
||||
|
||||
/* assume ownership of the buffer - you must then free() it */
|
||||
void decouple() { data = 0; }
|
||||
|
||||
void appendUChar(unsigned char j) {
|
||||
*((unsigned char*)grow(sizeof(unsigned char))) = j;
|
||||
}
|
||||
void appendChar(char j) {
|
||||
*((char*)grow(sizeof(char))) = j;
|
||||
}
|
||||
void appendNum(char j) {
|
||||
*((char*)grow(sizeof(char))) = j;
|
||||
}
|
||||
void appendNum(short j) {
|
||||
*((short*)grow(sizeof(short))) = j;
|
||||
}
|
||||
void appendNum(int j) {
|
||||
*((int*)grow(sizeof(int))) = j;
|
||||
}
|
||||
void appendNum(unsigned j) {
|
||||
*((unsigned*)grow(sizeof(unsigned))) = j;
|
||||
}
|
||||
void appendNum(bool j) {
|
||||
*((bool*)grow(sizeof(bool))) = j;
|
||||
}
|
||||
void appendNum(double j) {
|
||||
*((double*)grow(sizeof(double))) = j;
|
||||
}
|
||||
void appendNum(long long j) {
|
||||
*((long long*)grow(sizeof(long long))) = j;
|
||||
}
|
||||
void appendNum(unsigned long long j) {
|
||||
*((unsigned long long*)grow(sizeof(unsigned long long))) = j;
|
||||
}
|
||||
|
||||
void appendBuf(const void *src, size_t len) {
|
||||
memcpy(grow((int) len), src, len);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void appendStruct(const T& s) {
|
||||
appendBuf(&s, sizeof(T));
|
||||
}
|
||||
|
||||
void appendStr(const StringData &str , bool includeEndingNull = true ) {
|
||||
const int len = str.size() + ( includeEndingNull ? 1 : 0 );
|
||||
memcpy(grow(len), str.data(), len);
|
||||
}
|
||||
|
||||
/** @return length of current string */
|
||||
int len() const { return l; }
|
||||
void setlen( int newLen ) { l = newLen; }
|
||||
/** @return size of the buffer */
|
||||
int getSize() const { return size; }
|
||||
|
||||
/* returns the pre-grow write position */
|
||||
inline char* grow(int by) {
|
||||
int oldlen = l;
|
||||
l += by;
|
||||
if ( l > size ) {
|
||||
grow_reallocate();
|
||||
}
|
||||
return data + oldlen;
|
||||
}
|
||||
|
||||
private:
|
||||
/* "slow" portion of 'grow()' */
|
||||
void NOINLINE_DECL grow_reallocate() {
|
||||
int a = size * 2;
|
||||
if ( a == 0 )
|
||||
a = 512;
|
||||
if ( l > a )
|
||||
a = l + 16 * 1024;
|
||||
if ( a > BufferMaxSize )
|
||||
msgasserted(13548, "BufBuilder grow() > 64MB");
|
||||
data = (char *) al.Realloc(data, a);
|
||||
size= a;
|
||||
}
|
||||
|
||||
char *data;
|
||||
int l;
|
||||
int size;
|
||||
|
||||
friend class StringBuilder;
|
||||
};
|
||||
|
||||
typedef _BufBuilder<TrivialAllocator> BufBuilder;
|
||||
|
||||
/** The StackBufBuilder builds smaller datasets on the stack instead of using malloc.
|
||||
this can be significantly faster for small bufs. However, you can not decouple() the
|
||||
buffer with StackBufBuilder.
|
||||
While designed to be a variable on the stack, if you were to dynamically allocate one,
|
||||
nothing bad would happen. In fact in some circumstances this might make sense, say,
|
||||
embedded in some other object.
|
||||
*/
|
||||
class StackBufBuilder : public _BufBuilder<StackAllocator> {
|
||||
public:
|
||||
StackBufBuilder() : _BufBuilder<StackAllocator>(StackAllocator::SZ) { }
|
||||
void decouple(); // not allowed. not implemented.
|
||||
};
|
||||
|
||||
namespace {
|
||||
#if defined(_WIN32)
|
||||
int (*mongo_snprintf)(char *str, size_t size, const char *format, ...) = &sprintf_s;
|
||||
#else
|
||||
int (*mongo_snprintf)(char *str, size_t size, const char *format, ...) = &snprintf;
|
||||
#endif
|
||||
}
|
||||
|
||||
/** stringstream deals with locale so this is a lot faster than std::stringstream for UTF8 */
|
||||
class StringBuilder {
|
||||
public:
|
||||
static const size_t MONGO_DBL_SIZE = 3 + DBL_MANT_DIG - DBL_MIN_EXP;
|
||||
static const size_t MONGO_S32_SIZE = 12;
|
||||
static const size_t MONGO_U32_SIZE = 11;
|
||||
static const size_t MONGO_S64_SIZE = 23;
|
||||
static const size_t MONGO_U64_SIZE = 22;
|
||||
static const size_t MONGO_S16_SIZE = 7;
|
||||
|
||||
StringBuilder( int initsize=256 )
|
||||
: _buf( initsize ) {
|
||||
}
|
||||
|
||||
StringBuilder& operator<<( double x ) {
|
||||
return SBNUM( x , MONGO_DBL_SIZE , "%g" );
|
||||
}
|
||||
StringBuilder& operator<<( int x ) {
|
||||
return SBNUM( x , MONGO_S32_SIZE , "%d" );
|
||||
}
|
||||
StringBuilder& operator<<( unsigned x ) {
|
||||
return SBNUM( x , MONGO_U32_SIZE , "%u" );
|
||||
}
|
||||
StringBuilder& operator<<( long x ) {
|
||||
return SBNUM( x , MONGO_S64_SIZE , "%ld" );
|
||||
}
|
||||
StringBuilder& operator<<( unsigned long x ) {
|
||||
return SBNUM( x , MONGO_U64_SIZE , "%lu" );
|
||||
}
|
||||
StringBuilder& operator<<( long long x ) {
|
||||
return SBNUM( x , MONGO_S64_SIZE , "%lld" );
|
||||
}
|
||||
StringBuilder& operator<<( unsigned long long x ) {
|
||||
return SBNUM( x , MONGO_U64_SIZE , "%llu" );
|
||||
}
|
||||
StringBuilder& operator<<( short x ) {
|
||||
return SBNUM( x , MONGO_S16_SIZE , "%hd" );
|
||||
}
|
||||
StringBuilder& operator<<( char c ) {
|
||||
_buf.grow( 1 )[0] = c;
|
||||
return *this;
|
||||
}
|
||||
|
||||
void appendDoubleNice( double x ) {
|
||||
const int prev = _buf.l;
|
||||
const int maxSize = 32;
|
||||
char * start = _buf.grow( maxSize );
|
||||
int z = mongo_snprintf( start , maxSize , "%.16g" , x );
|
||||
assert( z >= 0 );
|
||||
assert( z < maxSize );
|
||||
_buf.l = prev + z;
|
||||
if( strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0 ) {
|
||||
write( ".0" , 2 );
|
||||
}
|
||||
}
|
||||
|
||||
void write( const char* buf, int len) { memcpy( _buf.grow( len ) , buf , len ); }
|
||||
|
||||
void append( const StringData& str ) { memcpy( _buf.grow( str.size() ) , str.data() , str.size() ); }
|
||||
|
||||
StringBuilder& operator<<( const StringData& str ) {
|
||||
append( str );
|
||||
return *this;
|
||||
}
|
||||
|
||||
void reset( int maxSize = 0 ) { _buf.reset( maxSize ); }
|
||||
|
||||
std::string str() const { return std::string(_buf.data, _buf.l); }
|
||||
|
||||
int len() const { return _buf.l; }
|
||||
|
||||
private:
|
||||
BufBuilder _buf;
|
||||
|
||||
// non-copyable, non-assignable
|
||||
StringBuilder( const StringBuilder& );
|
||||
StringBuilder& operator=( const StringBuilder& );
|
||||
|
||||
template <typename T>
|
||||
StringBuilder& SBNUM(T val,int maxSize,const char *macro) {
|
||||
int prev = _buf.l;
|
||||
int z = mongo_snprintf( _buf.grow(maxSize) , maxSize , macro , (val) );
|
||||
assert( z >= 0 );
|
||||
assert( z < maxSize );
|
||||
_buf.l = prev + z;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace mongo
|
||||
113
bson/util/misc.h
Normal file
113
bson/util/misc.h
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
/* @file misc.h
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ctime>
|
||||
|
||||
namespace mongo {
|
||||
|
||||
using namespace std;
|
||||
|
||||
inline void time_t_to_String(time_t t, char *buf) {
|
||||
#if defined(_WIN32)
|
||||
ctime_s(buf, 32, &t);
|
||||
#else
|
||||
ctime_r(&t, buf);
|
||||
#endif
|
||||
buf[24] = 0; // don't want the \n
|
||||
}
|
||||
|
||||
inline string time_t_to_String(time_t t = time(0) ) {
|
||||
char buf[64];
|
||||
#if defined(_WIN32)
|
||||
ctime_s(buf, sizeof(buf), &t);
|
||||
#else
|
||||
ctime_r(&t, buf);
|
||||
#endif
|
||||
buf[24] = 0; // don't want the \n
|
||||
return buf;
|
||||
}
|
||||
|
||||
inline string time_t_to_String_no_year(time_t t) {
|
||||
char buf[64];
|
||||
#if defined(_WIN32)
|
||||
ctime_s(buf, sizeof(buf), &t);
|
||||
#else
|
||||
ctime_r(&t, buf);
|
||||
#endif
|
||||
buf[19] = 0;
|
||||
return buf;
|
||||
}
|
||||
|
||||
inline string time_t_to_String_short(time_t t) {
|
||||
char buf[64];
|
||||
#if defined(_WIN32)
|
||||
ctime_s(buf, sizeof(buf), &t);
|
||||
#else
|
||||
ctime_r(&t, buf);
|
||||
#endif
|
||||
buf[19] = 0;
|
||||
if( buf[0] && buf[1] && buf[2] && buf[3] )
|
||||
return buf + 4; // skip day of week
|
||||
return buf;
|
||||
}
|
||||
|
||||
struct Date_t {
|
||||
// TODO: make signed (and look for related TODO's)
|
||||
unsigned long long millis;
|
||||
Date_t(): millis(0) {}
|
||||
Date_t(unsigned long long m): millis(m) {}
|
||||
operator unsigned long long&() { return millis; }
|
||||
operator const unsigned long long&() const { return millis; }
|
||||
string toString() const {
|
||||
char buf[64];
|
||||
time_t_to_String(millis/1000, buf);
|
||||
return buf;
|
||||
}
|
||||
};
|
||||
|
||||
// Like strlen, but only scans up to n bytes.
|
||||
// Returns -1 if no '0' found.
|
||||
inline int strnlen( const char *s, int n ) {
|
||||
for( int i = 0; i < n; ++i )
|
||||
if ( !s[ i ] )
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline bool isNumber( char c ) {
|
||||
return c >= '0' && c <= '9';
|
||||
}
|
||||
|
||||
inline unsigned stringToNum(const char *str) {
|
||||
unsigned x = 0;
|
||||
const char *p = str;
|
||||
while( 1 ) {
|
||||
if( !isNumber(*p) ) {
|
||||
if( *p == 0 && p != str )
|
||||
break;
|
||||
throw 0;
|
||||
}
|
||||
x = x * 10 + *p++ - '0';
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,5 +1,13 @@
|
|||
|
||||
import hacks_mandriva
|
||||
import hacks_ubuntu
|
||||
import os;
|
||||
|
||||
def findHacks( un ):
|
||||
if un[0] == 'Linux' and (os.path.exists("/etc/debian_version") or
|
||||
un[3].find("Ubuntu") >= 0):
|
||||
return hacks_ubuntu
|
||||
if un[0] == 'Linux' and (os.path.exists("/etc/mandriva-release") or
|
||||
un[3].find("mnb") >= 0):
|
||||
return hacks_mandriva
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -1,54 +0,0 @@
|
|||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
from optparse import OptionParser
|
||||
|
||||
""" This script aggregates several tracefiles into one tracefile
|
||||
All but the last argument are input tracefiles or .txt files which list tracefiles.
|
||||
The last argument is the tracefile to which the output will be written
|
||||
"""
|
||||
def aggregate(inputs, output):
|
||||
"""Aggregates the tracefiles given in inputs to a tracefile given by output"""
|
||||
args = ['lcov']
|
||||
|
||||
for name in inputs:
|
||||
args += ['-a', name]
|
||||
|
||||
args += ['-o', output]
|
||||
|
||||
print ' '.join(args)
|
||||
|
||||
return subprocess.call(args)
|
||||
|
||||
def getfilesize(path):
|
||||
if not os.path.isfile(path):
|
||||
return 0
|
||||
return os.path.getsize(path)
|
||||
|
||||
def main ():
|
||||
inputs = []
|
||||
|
||||
usage = "usage: %prog input1.info input2.info ... output.info"
|
||||
parser = OptionParser(usage=usage)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
if len(args) < 2:
|
||||
return "must supply input files"
|
||||
|
||||
for path in args[:-1]:
|
||||
name, ext = os.path.splitext(path)
|
||||
|
||||
if ext == '.info':
|
||||
if getfilesize(path) > 0:
|
||||
inputs.append(path)
|
||||
|
||||
elif ext == '.txt':
|
||||
inputs += [line.strip() for line in open(path)
|
||||
if getfilesize(line.strip()) > 0]
|
||||
else:
|
||||
return "unrecognized file type"
|
||||
|
||||
return aggregate(inputs, args[-1])
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
23
buildscripts/bb.py
Normal file
23
buildscripts/bb.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
# bb tools
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
def checkOk():
|
||||
dir = os.getcwd()
|
||||
m = re.compile( ".*/.*_V(\d+\.\d+)/mongo" ).findall( dir )
|
||||
if len(m) == 0:
|
||||
return
|
||||
if len(m) > 1:
|
||||
raise Exception( "unexpected: " + str(m) )
|
||||
|
||||
m = "v" + m[0]
|
||||
print( m )
|
||||
print( "excpted version [" + m + "]" )
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
diff = Popen( [ "git", "diff", "origin/v1.2" ], stdout=PIPE ).communicate()[ 0 ]
|
||||
if len(diff) > 0:
|
||||
print( diff )
|
||||
raise Exception( "build bot broken?" )
|
||||
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
|
||||
import utils
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
def go( boost_root ):
|
||||
|
||||
OUTPUT = "src/third_party/boost"
|
||||
if os.path.exists( OUTPUT ):
|
||||
shutil.rmtree( OUTPUT )
|
||||
|
||||
cmd = [ "bcp" , "--scan" , "--boost=%s" % boost_root ]
|
||||
|
||||
src = utils.getAllSourceFiles()
|
||||
|
||||
cmd += src
|
||||
cmd.append( OUTPUT )
|
||||
|
||||
if not os.path.exists( OUTPUT ):
|
||||
os.makedirs( OUTPUT )
|
||||
|
||||
res = utils.execsys( cmd )
|
||||
|
||||
out = open( OUTPUT + "/bcp-out.txt" , 'w' )
|
||||
out.write( res[0] )
|
||||
out.close()
|
||||
|
||||
out = open( OUTPUT + "/notes.txt" , 'w' )
|
||||
out.write( "command: " + " ".join( cmd ) )
|
||||
out.close()
|
||||
|
||||
print( res[1] )
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) == 1:
|
||||
print( "usage: python %s <boost root directory>" % sys.argv[0] )
|
||||
sys.exit(1)
|
||||
go( sys.argv[1] )
|
||||
|
||||
62
buildscripts/benchmark_tools.py
Normal file
62
buildscripts/benchmark_tools.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
import urllib
|
||||
import urllib2
|
||||
import sys
|
||||
|
||||
try:
|
||||
import json
|
||||
except:
|
||||
import simplejson as json # need simplejson for python < 2.6
|
||||
|
||||
sys.path.append( "." )
|
||||
sys.path.append( ".." )
|
||||
sys.path.append( "../../" )
|
||||
sys.path.append( "../../../" )
|
||||
|
||||
|
||||
import settings
|
||||
|
||||
def machine_info(extra_info=""):
|
||||
"""Get a dict representing the "machine" section of a benchmark result.
|
||||
|
||||
ie:
|
||||
{
|
||||
"os_name": "OS X",
|
||||
"os_version": "10.5",
|
||||
"processor": "2.4 GHz Intel Core 2 Duo",
|
||||
"memory": "3 GB 667 MHz DDR2 SDRAM",
|
||||
"extra_info": "Python 2.6"
|
||||
}
|
||||
|
||||
Must have a settings.py file on sys.path that defines "processor" and "memory"
|
||||
variables.
|
||||
"""
|
||||
machine = {}
|
||||
(machine["os_name"], _, machine["os_version"], _, _) = os.uname()
|
||||
machine["processor"] = settings.processor
|
||||
machine["memory"] = settings.memory
|
||||
machine["extra_info"] = extra_info
|
||||
return machine
|
||||
|
||||
def post_data(data, machine_extra_info="", post_url="http://mongo-db.appspot.com/benchmark"):
|
||||
"""Post a benchmark data point.
|
||||
|
||||
data should be a Python dict that looks like:
|
||||
{
|
||||
"benchmark": {
|
||||
"project": "http://github.com/mongodb/mongo-python-driver",
|
||||
"name": "insert test",
|
||||
"description": "test inserting 10000 documents with the C extension enabled",
|
||||
"tags": ["insert", "python"]
|
||||
},
|
||||
"trial": {
|
||||
"server_hash": "4f5a8d52f47507a70b6c625dfb5dbfc87ba5656a",
|
||||
"client_hash": "8bf2ad3d397cbde745fd92ad41c5b13976fac2b5",
|
||||
"result": 67.5,
|
||||
"extra_info": "some logs or something"
|
||||
}
|
||||
}
|
||||
"""
|
||||
data["machine"] = machine_info(machine_extra_info)
|
||||
urllib2.urlopen(post_url, urllib.urlencode({"payload": json.dumps(data)}))
|
||||
return data
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
'''Script to attempt an isolated build of the C++ driver and its examples.
|
||||
|
||||
Working directory must be the repository root.
|
||||
|
||||
Usage:
|
||||
|
||||
./buildscripts/build_and_test_client.py <mongo client archive file> [optional scons arguments]
|
||||
|
||||
The client is built in a temporary directory, and the sample programs are run against a mongod
|
||||
instance found in the current working directory. The temporary directory and its contents are
|
||||
destroyed at the end of execution.
|
||||
'''
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import tarfile
|
||||
import zipfile
|
||||
|
||||
import utils
|
||||
|
||||
def main(args):
|
||||
archive_file = args[1]
|
||||
scons_args = args[2:]
|
||||
build_and_test(archive_file, scons_args)
|
||||
|
||||
def build_and_test(archive_name, scons_args):
|
||||
work_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
archive = open_archive(archive_name)
|
||||
extracted_root = extract_archive(work_dir, archive)
|
||||
run_scons(extracted_root, scons_args)
|
||||
smoke_client(extracted_root)
|
||||
finally:
|
||||
shutil.rmtree(work_dir)
|
||||
|
||||
def open_tar(archive_name):
|
||||
return tarfile.open(archive_name, 'r')
|
||||
|
||||
def open_zip(archive_name):
|
||||
class ZipWrapper(zipfile.ZipFile):
|
||||
def getnames(self):
|
||||
return self.namelist()
|
||||
return ZipWrapper(archive_name, 'r')
|
||||
|
||||
def open_archive(archive_name):
|
||||
try:
|
||||
return open_tar(archive_name)
|
||||
except:
|
||||
return open_zip(archive_name)
|
||||
|
||||
def extract_archive(work_dir, archive_file):
|
||||
archive_file.extractall(path=work_dir)
|
||||
return os.path.join(
|
||||
work_dir,
|
||||
os.path.dirname([n for n in archive_file.getnames() if n.endswith('SConstruct')][0])
|
||||
)
|
||||
|
||||
def run_scons(extracted_root, scons_args):
|
||||
rc = subprocess.call(['scons', '-C', extracted_root, ] + scons_args + ['clientTests'])
|
||||
if rc is not 0:
|
||||
sys.exit(rc)
|
||||
|
||||
def smoke_client(extracted_root):
|
||||
rc = subprocess.call(utils.smoke_command("--test-path", extracted_root, "client"))
|
||||
if rc is not 0:
|
||||
sys.exit(rc)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
sys.exit(0)
|
||||
|
|
@ -1,480 +0,0 @@
|
|||
"""
|
||||
buildlogger.py
|
||||
|
||||
Wrap a command (specified on the command line invocation of buildlogger.py)
|
||||
and send output in batches to the buildlogs web application via HTTP POST.
|
||||
|
||||
The script configures itself from environment variables:
|
||||
|
||||
required env vars:
|
||||
MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
|
||||
MONGO_BUILD_NUMBER (an integer)
|
||||
MONGO_TEST_FILENAME (not required when invoked with -g)
|
||||
|
||||
optional env vars:
|
||||
MONGO_PHASE (e.g. "core", "slow nightly", etc)
|
||||
MONGO_* (any other environment vars are passed to the web app)
|
||||
BUILDLOGGER_CREDENTIALS (see below)
|
||||
|
||||
This script has two modes: a "test" mode, intended to wrap the invocation of
|
||||
an individual test file, and a "global" mode, intended to wrap the mongod
|
||||
instances that run throughout the duration of a mongo test phase (the logs
|
||||
from "global" invocations are displayed interspersed with the logs of each
|
||||
test, in order to let the buildlogs web app display the full output sensibly.)
|
||||
|
||||
If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
|
||||
path to a valid Python file containing "username" and "password" variables,
|
||||
which should be valid credentials for authenticating to the buildlogger web
|
||||
app. For example:
|
||||
|
||||
username = "hello"
|
||||
password = "world"
|
||||
|
||||
If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
|
||||
and the directories one, two, and three levels up, are searched, in that
|
||||
order.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import urllib2
|
||||
import utils
|
||||
|
||||
# suppress deprecation warnings that happen when
|
||||
# we import the 'buildbot.tac' file below
|
||||
import warnings
|
||||
warnings.simplefilter('ignore', DeprecationWarning)
|
||||
|
||||
try:
|
||||
import json
|
||||
except:
|
||||
try:
|
||||
import simplejson as json
|
||||
except:
|
||||
json = None
|
||||
|
||||
# try to load the shared secret from settings.py
|
||||
# which will be one, two, or three directories up
|
||||
# from this file's location
|
||||
credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
|
||||
credentials_loc, credentials_name = os.path.split(credentials_file)
|
||||
if not credentials_loc:
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
possible_paths = [
|
||||
os.path.abspath(os.path.join(here, '..')),
|
||||
os.path.abspath(os.path.join(here, '..', '..')),
|
||||
os.path.abspath(os.path.join(here, '..', '..', '..')),
|
||||
]
|
||||
else:
|
||||
possible_paths = [credentials_loc]
|
||||
|
||||
username, password = None, None
|
||||
for path in possible_paths:
|
||||
credentials_path = os.path.join(path, credentials_name)
|
||||
if os.path.isfile(credentials_path):
|
||||
credentials = {}
|
||||
try:
|
||||
execfile(credentials_path, credentials, credentials)
|
||||
username = credentials.get('slavename', credentials.get('username'))
|
||||
password = credentials.get('passwd', credentials.get('password'))
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
URL_ROOT = 'http://buildlogs.mongodb.org/'
|
||||
TIMEOUT_SECONDS = 10
|
||||
socket.setdefaulttimeout(TIMEOUT_SECONDS)
|
||||
|
||||
digest_handler = urllib2.HTTPDigestAuthHandler()
|
||||
digest_handler.add_password(
|
||||
realm='buildlogs',
|
||||
uri=URL_ROOT,
|
||||
user=username,
|
||||
passwd=password)
|
||||
|
||||
# This version of HTTPErrorProcessor is copied from
|
||||
# Python 2.7, and allows REST response codes (e.g.
|
||||
# "201 Created") which are treated as errors by
|
||||
# older versions.
|
||||
class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
|
||||
def http_response(self, request, response):
|
||||
code, msg, hdrs = response.code, response.msg, response.info()
|
||||
|
||||
# According to RFC 2616, "2xx" code indicates that the client's
|
||||
# request was successfully received, understood, and accepted.
|
||||
if not (200 <= code < 300):
|
||||
response = self.parent.error(
|
||||
'http', request, response, code, msg, hdrs)
|
||||
|
||||
return response
|
||||
|
||||
url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
|
||||
|
||||
def url(endpoint):
|
||||
if not endpoint.endswith('/'):
|
||||
endpoint = '%s/' % endpoint
|
||||
|
||||
return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
|
||||
|
||||
def post(endpoint, data, headers=None):
|
||||
data = json.dumps(data, encoding='utf-8')
|
||||
|
||||
headers = headers or {}
|
||||
headers.update({'Content-Type': 'application/json; charset=utf-8'})
|
||||
|
||||
req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
|
||||
try:
|
||||
response = url_opener.open(req)
|
||||
except urllib2.URLError:
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
# indicate that the request did not succeed
|
||||
return None
|
||||
|
||||
response_headers = dict(response.info())
|
||||
|
||||
# eg "Content-Type: application/json; charset=utf-8"
|
||||
content_type = response_headers.get('content-type')
|
||||
match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
|
||||
if match and match.group('mimetype') == 'application/json':
|
||||
encoding = match.group('charset') or 'utf-8'
|
||||
return json.load(response, encoding=encoding)
|
||||
|
||||
return response.read()
|
||||
|
||||
def traceback_to_stderr(func):
|
||||
"""
|
||||
decorator which logs any exceptions encountered to stderr
|
||||
and returns none.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except urllib2.HTTPError, err:
|
||||
sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
|
||||
if hasattr(err, 'hdrs'):
|
||||
for k, v in err.hdrs.items():
|
||||
sys.stderr.write("%s: %s\n" % (k, v))
|
||||
sys.stderr.write('\n')
|
||||
sys.stderr.write(err.read())
|
||||
sys.stderr.write('\n----\n')
|
||||
sys.stderr.flush()
|
||||
except:
|
||||
sys.stderr.write('Traceback from buildlogger:\n')
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
return None
|
||||
return wrapper
|
||||
|
||||
|
||||
@traceback_to_stderr
|
||||
def get_or_create_build(builder, buildnum, extra={}):
|
||||
data = {'builder': builder, 'buildnum': buildnum}
|
||||
data.update(extra)
|
||||
response = post('build', data)
|
||||
if response is None:
|
||||
return None
|
||||
return response['id']
|
||||
|
||||
@traceback_to_stderr
|
||||
def create_test(build_id, test_filename, test_command, test_phase):
|
||||
response = post('build/%s/test' % build_id, {
|
||||
'test_filename': test_filename,
|
||||
'command': test_command,
|
||||
'phase': test_phase,
|
||||
})
|
||||
if response is None:
|
||||
return None
|
||||
return response['id']
|
||||
|
||||
@traceback_to_stderr
|
||||
def append_test_logs(build_id, test_id, log_lines):
|
||||
response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
|
||||
if response is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
@traceback_to_stderr
|
||||
def append_global_logs(build_id, log_lines):
|
||||
"""
|
||||
"global" logs are for the mongod(s) started by smoke.py
|
||||
that last the duration of a test phase -- since there
|
||||
may be output in here that is important but spans individual
|
||||
tests, the buildlogs webapp handles these logs specially.
|
||||
"""
|
||||
response = post('build/%s' % build_id, data=log_lines)
|
||||
if response is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
@traceback_to_stderr
|
||||
def finish_test(build_id, test_id, failed=False):
|
||||
response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
|
||||
'X-Sendlogs-Test-Done': 'true',
|
||||
'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
|
||||
})
|
||||
if response is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def run_and_echo(command):
|
||||
"""
|
||||
this just calls the command, and returns its return code,
|
||||
allowing stdout and stderr to work as normal. it is used
|
||||
as a fallback when environment variables or python
|
||||
dependencies cannot be configured, or when the logging
|
||||
webapp is unavailable, etc
|
||||
"""
|
||||
proc = subprocess.Popen(command)
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
try:
|
||||
proc.send_signal(signum)
|
||||
except AttributeError:
|
||||
os.kill(proc.pid, signum)
|
||||
orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
proc.wait()
|
||||
|
||||
signal.signal(signal.SIGTERM, orig_handler)
|
||||
return proc.returncode
|
||||
|
||||
class LogAppender(object):
|
||||
def __init__(self, callback, args, send_after_lines=200, send_after_seconds=2):
|
||||
self.callback = callback
|
||||
self.callback_args = args
|
||||
|
||||
self.send_after_lines = send_after_lines
|
||||
self.send_after_seconds = send_after_seconds
|
||||
|
||||
self.buf = []
|
||||
self.retrybuf = []
|
||||
self.last_sent = time.time()
|
||||
|
||||
def __call__(self, line):
|
||||
self.buf.append((time.time(), line))
|
||||
|
||||
delay = time.time() - self.last_sent
|
||||
if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
|
||||
self.submit()
|
||||
|
||||
# no return value is expected
|
||||
|
||||
def submit(self):
|
||||
if len(self.buf) + len(self.retrybuf) == 0:
|
||||
return True
|
||||
|
||||
args = list(self.callback_args)
|
||||
args.append(list(self.buf) + self.retrybuf)
|
||||
|
||||
self.last_sent = time.time()
|
||||
|
||||
if self.callback(*args):
|
||||
self.buf = []
|
||||
self.retrybuf = []
|
||||
return True
|
||||
else:
|
||||
self.retrybuf += self.buf
|
||||
self.buf = []
|
||||
return False
|
||||
|
||||
|
||||
def wrap_test(command):
|
||||
"""
|
||||
call the given command, intercept its stdout and stderr,
|
||||
and send results in batches of 100 lines or 10s to the
|
||||
buildlogger webapp
|
||||
"""
|
||||
|
||||
# get builder name and build number from environment
|
||||
builder = os.environ.get('MONGO_BUILDER_NAME')
|
||||
buildnum = os.environ.get('MONGO_BUILD_NUMBER')
|
||||
|
||||
if builder is None or buildnum is None:
|
||||
return run_and_echo(command)
|
||||
|
||||
try:
|
||||
buildnum = int(buildnum)
|
||||
except ValueError:
|
||||
sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
|
||||
sys.stderr.flush()
|
||||
return run_and_echo(command)
|
||||
|
||||
# test takes some extra info
|
||||
phase = os.environ.get('MONGO_PHASE', 'unknown')
|
||||
test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
|
||||
|
||||
build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
|
||||
build_info.pop('MONGO_BUILDER_NAME', None)
|
||||
build_info.pop('MONGO_BUILD_NUMBER', None)
|
||||
build_info.pop('MONGO_PHASE', None)
|
||||
build_info.pop('MONGO_TEST_FILENAME', None)
|
||||
|
||||
build_id = get_or_create_build(builder, buildnum, extra=build_info)
|
||||
if not build_id:
|
||||
return run_and_echo(command)
|
||||
|
||||
test_id = create_test(build_id, test_filename, ' '.join(command), phase)
|
||||
if not test_id:
|
||||
return run_and_echo(command)
|
||||
|
||||
# the peculiar formatting here matches what is printed by
|
||||
# smoke.py when starting tests
|
||||
output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
|
||||
sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
|
||||
sys.stdout.flush()
|
||||
|
||||
callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
|
||||
returncode = loop_and_callback(command, callback)
|
||||
failed = bool(returncode != 0)
|
||||
|
||||
# this will append any remaining unsubmitted logs, or
|
||||
# return True if there are none left to submit
|
||||
tries = 5
|
||||
while not callback.submit() and tries > 0:
|
||||
sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
|
||||
sys.stderr.flush()
|
||||
time.sleep(1)
|
||||
tries -= 1
|
||||
|
||||
tries = 5
|
||||
while not finish_test(build_id, test_id, failed) and tries > 5:
|
||||
sys.stderr.write('failed to mark test finished, retrying in 1s\n')
|
||||
sys.stderr.flush()
|
||||
time.sleep(1)
|
||||
tries -= 1
|
||||
|
||||
return returncode
|
||||
|
||||
def wrap_global(command):
|
||||
"""
|
||||
call the given command, intercept its stdout and stderr,
|
||||
and send results in batches of 100 lines or 10s to the
|
||||
buildlogger webapp. see :func:`append_global_logs` for the
|
||||
difference between "global" and "test" log output.
|
||||
"""
|
||||
|
||||
# get builder name and build number from environment
|
||||
builder = os.environ.get('MONGO_BUILDER_NAME')
|
||||
buildnum = os.environ.get('MONGO_BUILD_NUMBER')
|
||||
|
||||
if builder is None or buildnum is None:
|
||||
return run_and_echo(command)
|
||||
|
||||
try:
|
||||
buildnum = int(buildnum)
|
||||
except ValueError:
|
||||
sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
sys.stderr.flush()
|
||||
return run_and_echo(command)
|
||||
|
||||
build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
|
||||
build_info.pop('MONGO_BUILDER_NAME', None)
|
||||
build_info.pop('MONGO_BUILD_NUMBER', None)
|
||||
|
||||
build_id = get_or_create_build(builder, buildnum, extra=build_info)
|
||||
if not build_id:
|
||||
return run_and_echo(command)
|
||||
|
||||
callback = LogAppender(callback=append_global_logs, args=(build_id, ))
|
||||
returncode = loop_and_callback(command, callback)
|
||||
|
||||
# this will append any remaining unsubmitted logs, or
|
||||
# return True if there are none left to submit
|
||||
tries = 5
|
||||
while not callback.submit() and tries > 0:
|
||||
sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
|
||||
sys.stderr.flush()
|
||||
time.sleep(1)
|
||||
tries -= 1
|
||||
|
||||
return returncode
|
||||
|
||||
def loop_and_callback(command, callback):
|
||||
"""
|
||||
run the given command (a sequence of arguments, ordinarily
|
||||
from sys.argv), and call the given callback with each line
|
||||
of stdout or stderr encountered. after the command is finished,
|
||||
callback is called once more with None instead of a string.
|
||||
"""
|
||||
proc = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
try:
|
||||
proc.send_signal(signum)
|
||||
except AttributeError:
|
||||
os.kill(proc.pid, signum)
|
||||
|
||||
# register a handler to delegate SIGTERM
|
||||
# to the child process
|
||||
orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
while proc.poll() is None:
|
||||
try:
|
||||
line = proc.stdout.readline().strip('\r\n')
|
||||
line = utils.unicode_dammit(line)
|
||||
callback(line)
|
||||
except IOError:
|
||||
# if the signal handler is called while
|
||||
# we're waiting for readline() to return,
|
||||
# don't show a traceback
|
||||
break
|
||||
|
||||
# There may be additional buffered output
|
||||
for line in proc.stdout.readlines():
|
||||
callback(line.strip('\r\n'))
|
||||
|
||||
# restore the original signal handler, if any
|
||||
signal.signal(signal.SIGTERM, orig_handler)
|
||||
return proc.returncode
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# argv[0] is 'buildlogger.py'
|
||||
del sys.argv[0]
|
||||
|
||||
if sys.argv[0] in ('-g', '--global'):
|
||||
# then this is wrapping a "global" command, and should
|
||||
# submit global logs to the build, not test logs to a
|
||||
# test within the build
|
||||
del sys.argv[0]
|
||||
wrapper = wrap_global
|
||||
|
||||
else:
|
||||
wrapper = wrap_test
|
||||
|
||||
# if we are missing credentials or the json module, then
|
||||
# we can't use buildlogger; so just echo output, but also
|
||||
# log why we can't work.
|
||||
if json is None:
|
||||
sys.stderr.write('buildlogger: could not import a json module\n')
|
||||
sys.stderr.flush()
|
||||
wrapper = run_and_echo
|
||||
|
||||
elif username is None or password is None:
|
||||
sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
|
||||
sys.stderr.flush()
|
||||
wrapper = run_and_echo
|
||||
|
||||
# otherwise wrap a test command as normal; the
|
||||
# wrapper functions return the return code of
|
||||
# the wrapped command, so that should be our
|
||||
# exit code as well.
|
||||
sys.exit(wrapper(sys.argv))
|
||||
|
||||
|
|
@ -14,34 +14,18 @@ if os.path.basename(cwd) == 'buildscripts':
|
|||
print( "cwd [" + cwd + "]" )
|
||||
|
||||
def shouldKill( c ):
|
||||
|
||||
if "smoke.py" in c:
|
||||
return False
|
||||
|
||||
if "emr.py" in c:
|
||||
return False
|
||||
|
||||
if "java" in c:
|
||||
return False
|
||||
|
||||
if c.find( cwd ) >= 0:
|
||||
return True
|
||||
|
||||
if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) ) and c.find( "/mongo/" ) >= 0:
|
||||
return True
|
||||
|
||||
if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def killprocs( signal="" ):
|
||||
|
||||
killed = 0
|
||||
|
||||
if sys.platform == 'win32':
|
||||
return killed
|
||||
|
||||
|
||||
l = utils.getprocesslist()
|
||||
print( "num procs:" + str( len( l ) ) )
|
||||
if len(l) == 0:
|
||||
|
|
|
|||
103
buildscripts/confluence_export.py
Normal file
103
buildscripts/confluence_export.py
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
# Export the contents on confluence
|
||||
#
|
||||
# Dependencies:
|
||||
# - suds
|
||||
#
|
||||
# User: soap, Password: soap
|
||||
from __future__ import with_statement
|
||||
import cookielib
|
||||
import datetime
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import urllib2
|
||||
sys.path[0:0] = [""]
|
||||
|
||||
import simples3
|
||||
from suds.client import Client
|
||||
|
||||
import settings
|
||||
|
||||
HTML_URI = "http://mongodb.onconfluence.com/rpc/soap-axis/confluenceservice-v1?wsdl"
|
||||
PDF_URI = "http://www.mongodb.org/rpc/soap-axis/pdfexport?wsdl"
|
||||
USERNAME = "soap"
|
||||
PASSWORD = "soap"
|
||||
AUTH_URI = "http://www.mongodb.org/login.action?os_authType=basic"
|
||||
TMP_DIR = "confluence-tmp"
|
||||
TMP_FILE = "confluence-tmp.zip"
|
||||
|
||||
|
||||
def export_html_and_get_uri():
|
||||
client = Client(HTML_URI)
|
||||
auth = client.service.login(USERNAME, PASSWORD)
|
||||
return client.service.exportSpace(auth, "DOCS", "TYPE_HTML")
|
||||
|
||||
|
||||
def export_pdf_and_get_uri():
|
||||
client = Client(PDF_URI)
|
||||
auth = client.service.login(USERNAME, PASSWORD)
|
||||
return client.service.exportSpace(auth, "DOCS")
|
||||
|
||||
|
||||
def login_and_download(docs):
|
||||
cookie_jar = cookielib.CookieJar()
|
||||
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
|
||||
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
password_manager.add_password(None, AUTH_URI, USERNAME, PASSWORD)
|
||||
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
|
||||
urllib2.build_opener(cookie_handler, auth_handler).open(AUTH_URI)
|
||||
return urllib2.build_opener(cookie_handler).open(docs)
|
||||
|
||||
|
||||
def extract_to_dir(data, dir):
|
||||
with open(TMP_FILE, "w") as f:
|
||||
f.write(data.read())
|
||||
data.close()
|
||||
# This is all really annoying but zipfile doesn't do extraction on 2.5
|
||||
subprocess.call(["unzip", "-d", dir, TMP_FILE])
|
||||
os.unlink(TMP_FILE)
|
||||
|
||||
|
||||
def rmdir(dir):
|
||||
try:
|
||||
shutil.rmtree(dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def overwrite(src, dest):
|
||||
target = "%s/DOCS-%s/" % (dest, datetime.date.today())
|
||||
current = "%s/current" % dest
|
||||
rmdir(target)
|
||||
shutil.copytree(src, target)
|
||||
try:
|
||||
os.unlink(current)
|
||||
except:
|
||||
pass
|
||||
os.symlink(os.path.abspath(target), os.path.abspath(current))
|
||||
|
||||
|
||||
def write_to_s3(pdf):
|
||||
s3 = simples3.S3Bucket(settings.bucket, settings.id, settings.key)
|
||||
name = "docs/mongodb-docs-%s.pdf" % datetime.date.today()
|
||||
s3.put(name, pdf, acl="public-read")
|
||||
|
||||
|
||||
def main(dir):
|
||||
# HTML
|
||||
rmdir(TMP_DIR)
|
||||
extract_to_dir(login_and_download(export_html_and_get_uri()), TMP_DIR)
|
||||
overwrite("%s/DOCS/" % TMP_DIR, dir)
|
||||
|
||||
# PDF
|
||||
write_to_s3(login_and_download(export_pdf_and_get_uri()).read())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main(sys.argv[1])
|
||||
except IndexError:
|
||||
print "pass outdir as first arg"
|
||||
3361
buildscripts/cpplint.py
vendored
3361
buildscripts/cpplint.py
vendored
File diff suppressed because it is too large
Load diff
139
buildscripts/distmirror.py
Normal file
139
buildscripts/distmirror.py
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Download mongodb stuff (at present builds, sources, docs, but not
|
||||
# drivers).
|
||||
|
||||
# Usage: <progname> [directory] # directory defaults to cwd.
|
||||
|
||||
# FIXME: this script is fairly sloppy.
|
||||
import sys
|
||||
import os
|
||||
import urllib2
|
||||
import time
|
||||
import hashlib
|
||||
import warnings
|
||||
|
||||
written_files = []
|
||||
def get(url, filename):
|
||||
# A little safety check.
|
||||
if filename in written_files:
|
||||
raise Exception('not overwriting file %s (already written in this session)' % filename)
|
||||
else:
|
||||
written_files.append(filename)
|
||||
print "downloading %s to %s" % (url, filename)
|
||||
open(filename, 'w').write(urllib2.urlopen(url).read())
|
||||
|
||||
|
||||
def checkmd5(md5str, filename):
|
||||
m = hashlib.md5()
|
||||
m.update(open(filename, 'rb').read())
|
||||
d = m.hexdigest()
|
||||
if d != md5str:
|
||||
warnings.warn("md5sum mismatch for file %s: wanted %s; got %s" % (filename, md5str, d))
|
||||
|
||||
osarches=(("osx", ("i386", "i386-tiger", "x86_64"), ("tgz", )),
|
||||
("linux", ("i686", "x86_64"), ("tgz", )),
|
||||
("win32", ("i386", "x86_64"), ("zip", )),
|
||||
("sunos5", ("i86pc", "x86_64"), ("tgz", )),
|
||||
("src", ("src", ), ("tar.gz", "zip")), )
|
||||
|
||||
# KLUDGE: this will need constant editing.
|
||||
versions = ("1.4.2", "1.5.1", "latest")
|
||||
|
||||
url_format = "http://downloads.mongodb.org/%s/mongodb-%s-%s.%s"
|
||||
filename_format = "mongodb-%s-%s.%s"
|
||||
|
||||
def core_server():
|
||||
for version in versions:
|
||||
for (os, architectures, archives) in osarches:
|
||||
for architecture in architectures:
|
||||
for archive in archives:
|
||||
osarch = os + '-' + architecture if architecture != 'src' else 'src'
|
||||
# ugh.
|
||||
if architecture == 'src' and version == 'latest':
|
||||
if archive == 'tar.gz':
|
||||
archive2 = 'tarball'
|
||||
elif archive == 'zip':
|
||||
archive2 == 'zipball'
|
||||
url = "http://github.com/mongodb/mongo/"+archive2+"/master"
|
||||
version2 = "master"
|
||||
else:
|
||||
version2 = version if architecture != 'src' else 'r'+version
|
||||
url = url_format % (os, osarch, version2, archive)
|
||||
# ugh ugh
|
||||
md5url = url+'.md5' if architecture != 'src' else None
|
||||
filename = filename_format % (osarch, version2, archive)
|
||||
get(url, filename)
|
||||
if md5url:
|
||||
print "fetching md5 url " + md5url
|
||||
md5str = urllib2.urlopen(md5url).read()
|
||||
checkmd5(md5str, filename)
|
||||
|
||||
def drivers():
|
||||
# Drivers... FIXME: drivers.
|
||||
driver_url_format = "http://github.com/mongodb/mongo-%s-driver/%s/%s"
|
||||
driver_filename_format = "mongo-%s-driver-%s.%s"
|
||||
drivers=(("python", ("1.6", "master"), ("zipball", "tarball"), None),
|
||||
("ruby", ("0.20", "master"), ("zipball", "tarball"), None),
|
||||
("c", ("v0.1", "master"), ("zipball", "tarball"), None),
|
||||
# FIXME: PHP, Java, and Csharp also have zips and jars of
|
||||
# precompiled relesaes.
|
||||
("php", ("1.0.6", "master"), ("zipball", "tarball"), None),
|
||||
("java", ("r1.4", "r2.0rc1", "master"), ("zipball", "tarball"), None),
|
||||
# And Csharp is in a different github place, too.
|
||||
("csharp", ("0.82.2", "master"), ("zipball", "tarball"),
|
||||
"http://github.com/samus/mongodb-%s/%s/%s"),
|
||||
)
|
||||
|
||||
for (lang, releases, archives, url_format) in drivers:
|
||||
for release in releases:
|
||||
for archive in archives:
|
||||
url = (url_format if url_format else driver_url_format) % (lang, archive, release)
|
||||
if archive == 'zipball':
|
||||
extension = 'zip'
|
||||
elif archive == 'tarball':
|
||||
extension = 'tgz'
|
||||
else:
|
||||
raise Exception('unknown archive format %s' % archive)
|
||||
filename = driver_filename_format % (lang, release, extension)
|
||||
get(url, filename)
|
||||
# ugh ugh ugh
|
||||
if lang == 'csharp' and release != 'master':
|
||||
url = 'http://github.com/downloads/samus/mongodb-csharp/MongoDBDriver-Release-%.zip' % (release)
|
||||
filename = 'MongoDBDriver-Release-%.zip' % (release)
|
||||
get(url, filename)
|
||||
if lang == 'java' and release != 'master':
|
||||
get('http://github.com/downloads/mongodb/mongo-java-driver/mongo-%s.jar' % (release), 'mongo-%s.jar' % (release))
|
||||
# I have no idea what's going on with the PHP zipfiles.
|
||||
if lang == 'php' and release == '1.0.6':
|
||||
get('http://github.com/downloads/mongodb/mongo-php-driver/mongo-1.0.6-php5.2-osx.zip', 'mongo-1.0.6-php5.2-osx.zip')
|
||||
get('http://github.com/downloads/mongodb/mongo-php-driver/mongo-1.0.6-php5.3-osx.zip', 'mongo-1.0.6-php5.3-osx.zip')
|
||||
|
||||
def docs():
|
||||
# FIXME: in principle, the doc PDFs could be out of date.
|
||||
docs_url = time.strftime("http://downloads.mongodb.org/docs/mongodb-docs-%Y-%m-%d.pdf")
|
||||
docs_filename = time.strftime("mongodb-docs-%Y-%m-%d.pdf")
|
||||
get(docs_url, docs_filename)
|
||||
|
||||
def extras():
|
||||
# Extras
|
||||
extras = ("http://media.mongodb.org/zips.json", )
|
||||
for extra in extras:
|
||||
if extra.rfind('/') > -1:
|
||||
filename = extra[extra.rfind('/')+1:]
|
||||
else:
|
||||
raise Exception('URL %s lacks a slash?' % extra)
|
||||
get(extra, filename)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
dir=sys.argv[1]
|
||||
os.makedirs(dir)
|
||||
os.chdir(dir)
|
||||
|
||||
print """NOTE: the md5sums for all the -latest tarballs are out of
|
||||
date. You will probably see warnings as this script runs. (If you
|
||||
don't, feel free to delete this note.)"""
|
||||
core_server()
|
||||
drivers()
|
||||
docs()
|
||||
extras()
|
||||
|
|
@ -1,107 +0,0 @@
|
|||
// FileLock.java
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
/**
|
||||
* "locks" a resource by using the file system as storage
|
||||
* file has 1 line
|
||||
* <incarnation> <last ping time in millis>
|
||||
*/
|
||||
public class FileLock {
|
||||
|
||||
public FileLock( String logicalName )
|
||||
throws IOException {
|
||||
|
||||
_file = new File( "/tmp/java-fileLock-" + logicalName );
|
||||
_incarnation = "xxx" + Math.random() + "yyy";
|
||||
|
||||
if ( ! _file.exists() ) {
|
||||
FileOutputStream fout = new FileOutputStream( _file );
|
||||
fout.write( "\n".getBytes() );
|
||||
fout.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* takes lock
|
||||
* if someone else has it, blocks until the other one finishes
|
||||
*/
|
||||
public void lock()
|
||||
throws IOException {
|
||||
if ( _lock != null )
|
||||
throw new IllegalStateException( "can't lock when you're locked" );
|
||||
|
||||
try {
|
||||
_semaphore.acquire();
|
||||
}
|
||||
catch ( InterruptedException ie ) {
|
||||
throw new RuntimeException( "sad" , ie );
|
||||
}
|
||||
|
||||
_raf = new RandomAccessFile( _file , "rw" );
|
||||
_lock = _raf.getChannel().lock();
|
||||
}
|
||||
|
||||
public void unlock()
|
||||
throws IOException {
|
||||
|
||||
if ( _lock == null )
|
||||
throw new IllegalStateException( "can't unlock when you're not locked" );
|
||||
|
||||
_lock.release();
|
||||
_semaphore.release();
|
||||
|
||||
_locked = false;
|
||||
}
|
||||
|
||||
final File _file;
|
||||
final String _incarnation;
|
||||
|
||||
private RandomAccessFile _raf;
|
||||
private java.nio.channels.FileLock _lock;
|
||||
|
||||
private boolean _locked;
|
||||
|
||||
private static Semaphore _semaphore = new Semaphore(1);
|
||||
|
||||
|
||||
public static void main( final String[] args )
|
||||
throws Exception {
|
||||
|
||||
List<Thread> threads = new ArrayList<Thread>();
|
||||
|
||||
for ( int i=0; i<3; i++ ) {
|
||||
|
||||
threads.add( new Thread() {
|
||||
public void run() {
|
||||
try {
|
||||
FileLock lock = new FileLock( args[0] );
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
lock.lock();
|
||||
System.out.println( "time to lock:\t" + (System.currentTimeMillis()-start) );
|
||||
Thread.sleep( Integer.parseInt( args[1] ) );
|
||||
lock.unlock();
|
||||
System.out.println( "total time:\t" + (System.currentTimeMillis()-start) );
|
||||
}
|
||||
catch ( Exception e ) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
for ( Thread t : threads ) {
|
||||
t.start();
|
||||
}
|
||||
|
||||
for ( Thread t : threads ) {
|
||||
t.join();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -1,156 +0,0 @@
|
|||
// IOUtil.java
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
|
||||
public class IOUtil {
|
||||
|
||||
public static String urlFileName( String url ) {
|
||||
int idx = url.lastIndexOf( "/" );
|
||||
if ( idx < 0 )
|
||||
return url;
|
||||
return url.substring( idx + 1 );
|
||||
}
|
||||
|
||||
public static long pipe( InputStream in , OutputStream out )
|
||||
throws IOException {
|
||||
|
||||
long bytes = 0;
|
||||
|
||||
byte[] buf = new byte[2048];
|
||||
|
||||
while ( true ) {
|
||||
int x = in.read( buf );
|
||||
if ( x < 0 )
|
||||
break;
|
||||
|
||||
bytes += x;
|
||||
out.write( buf , 0 , x );
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
public static class PipingThread extends Thread {
|
||||
public PipingThread( InputStream in , OutputStream out ) {
|
||||
_in = in;
|
||||
_out = out;
|
||||
|
||||
_wrote = 0;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
_wrote = pipe( _in , _out );
|
||||
}
|
||||
catch ( IOException ioe ) {
|
||||
ioe.printStackTrace();
|
||||
_wrote = -1;
|
||||
}
|
||||
}
|
||||
|
||||
public long wrote() {
|
||||
return _wrote;
|
||||
}
|
||||
|
||||
long _wrote;
|
||||
|
||||
final InputStream _in;
|
||||
final OutputStream _out;
|
||||
}
|
||||
|
||||
public static String readStringFully( InputStream in )
|
||||
throws IOException {
|
||||
|
||||
ByteArrayOutputStream bout = new ByteArrayOutputStream();
|
||||
pipe( in , bout );
|
||||
return new String( bout.toByteArray() , "UTF8" );
|
||||
|
||||
}
|
||||
|
||||
public static Map<String,Object> readPythonSettings( File file )
|
||||
throws IOException {
|
||||
|
||||
String all = readStringFully( new FileInputStream( file ) );
|
||||
|
||||
Map<String,Object> map = new TreeMap<String,Object>();
|
||||
|
||||
for ( String line : all.split( "\n" ) ) {
|
||||
line = line.trim();
|
||||
if ( line.length() == 0 )
|
||||
continue;
|
||||
|
||||
String[] pcs = line.split( "=" );
|
||||
if ( pcs.length != 2 )
|
||||
continue;
|
||||
|
||||
String name = pcs[0].trim();
|
||||
String value = pcs[1].trim();
|
||||
|
||||
if ( value.startsWith( "\"" ) ) {
|
||||
map.put( name , value.substring( 1 , value.length() - 1 ) );
|
||||
}
|
||||
else {
|
||||
map.put( name , Long.parseLong( value ) );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
public static String[] runCommand( String cmd , File dir )
|
||||
throws IOException {
|
||||
|
||||
Process p = Runtime.getRuntime().exec( cmd.split( " +" ) , new String[]{} , dir );
|
||||
String[] results = new String[]{ IOUtil.readStringFully( p.getInputStream() ) , IOUtil.readStringFully( p.getErrorStream() ) };
|
||||
try {
|
||||
if ( p.waitFor() != 0 )
|
||||
throw new RuntimeException( "command failed [" + cmd + "]\n" + results[0] + "\n" + results[1] );
|
||||
}
|
||||
catch ( InterruptedException ie ) {
|
||||
throw new RuntimeException( "uh oh" );
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
public static void download( String http , File localDir )
|
||||
throws IOException {
|
||||
|
||||
File f = localDir;
|
||||
f.mkdirs();
|
||||
|
||||
f = new File( f.toString() + File.separator + urlFileName( http ) );
|
||||
|
||||
System.out.println( "downloading\n\t" + http + "\n\t" + f );
|
||||
|
||||
if ( f.exists() ) {
|
||||
System.out.println( "\t already exists" );
|
||||
return;
|
||||
}
|
||||
|
||||
URL url = new URL( http );
|
||||
|
||||
InputStream in = url.openConnection().getInputStream();
|
||||
OutputStream out = new FileOutputStream( f );
|
||||
|
||||
pipe( in , out );
|
||||
|
||||
out.close();
|
||||
in.close();
|
||||
|
||||
}
|
||||
|
||||
public static void main( String[] args )
|
||||
throws Exception {
|
||||
|
||||
|
||||
byte[] data = new byte[]{ 'e' , 'r' , 'h' , 0 };
|
||||
System.out.write( data );
|
||||
System.out.println( "yo" );
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
Manifest-Version: 1.0
|
||||
Main-Class: emr
|
||||
|
|
@ -1,380 +0,0 @@
|
|||
// emr.java
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import java.net.*;
|
||||
|
||||
import org.apache.hadoop.conf.*;
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.mapred.*;
|
||||
import org.apache.hadoop.fs.*;
|
||||
|
||||
|
||||
public class emr {
|
||||
|
||||
static class MongoSuite {
|
||||
String mongo;
|
||||
String code;
|
||||
String workingDir;
|
||||
|
||||
String suite;
|
||||
|
||||
void copy( MongoSuite c ) {
|
||||
mongo = c.mongo;
|
||||
code = c.code;
|
||||
workingDir = c.workingDir;
|
||||
|
||||
suite = c.suite;
|
||||
|
||||
}
|
||||
|
||||
void downloadTo( File localDir )
|
||||
throws IOException {
|
||||
IOUtil.download( mongo , localDir );
|
||||
IOUtil.download( code , localDir );
|
||||
}
|
||||
|
||||
boolean runTest()
|
||||
throws IOException {
|
||||
|
||||
// mkdir
|
||||
File dir = new File( workingDir , suite );
|
||||
dir.mkdirs();
|
||||
|
||||
// download
|
||||
System.out.println( "going to download" );
|
||||
downloadTo( dir );
|
||||
|
||||
|
||||
// explode
|
||||
System.out.println( "going to explode" );
|
||||
IOUtil.runCommand( "tar zxvf " + IOUtil.urlFileName( code ) , dir );
|
||||
String[] res = IOUtil.runCommand( "tar zxvf " + IOUtil.urlFileName( mongo ) , dir );
|
||||
for ( String x : res[0].split( "\n" ) ) {
|
||||
if ( x.indexOf( "/bin/" ) < 0 )
|
||||
continue;
|
||||
File f = new File( dir.toString() , x );
|
||||
if ( ! f.renameTo( new File( dir , IOUtil.urlFileName( x ) ) ) )
|
||||
throw new RuntimeException( "rename failed" );
|
||||
}
|
||||
|
||||
List<String> cmd = new ArrayList<String>();
|
||||
cmd.add( "/usr/bin/python" );
|
||||
cmd.add( "buildscripts/smoke.py" );
|
||||
|
||||
File log_config = new File( dir , "log_config.py" );
|
||||
System.out.println( "log_config: " + log_config.exists() );
|
||||
if ( log_config.exists() ) {
|
||||
|
||||
java.util.Map<String,Object> properties = IOUtil.readPythonSettings( log_config );
|
||||
|
||||
cmd.add( "--buildlogger-builder" );
|
||||
cmd.add( properties.get( "name" ).toString() );
|
||||
|
||||
cmd.add( "--buildlogger-buildnum" );
|
||||
cmd.add( properties.get( "number" ).toString() );
|
||||
|
||||
cmd.add( "--buildlogger-credentials" );
|
||||
cmd.add( "log_config.py" );
|
||||
|
||||
cmd.add( "--buildlogger-phase" );
|
||||
{
|
||||
int idx = suite.lastIndexOf( "/" );
|
||||
if ( idx < 0 )
|
||||
cmd.add( suite );
|
||||
else
|
||||
cmd.add( suite.substring( 0 , idx ) );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
cmd.add( suite );
|
||||
|
||||
System.out.println( cmd );
|
||||
|
||||
Process p = Runtime.getRuntime().exec( cmd.toArray( new String[cmd.size()] ) , new String[]{} , dir );
|
||||
|
||||
List<Thread> threads = new ArrayList<Thread>();
|
||||
threads.add( new IOUtil.PipingThread( p.getInputStream() , System.out ) );
|
||||
threads.add( new IOUtil.PipingThread( p.getErrorStream() , System.out ) );
|
||||
|
||||
for ( Thread t : threads )
|
||||
t.start();
|
||||
|
||||
try {
|
||||
for ( Thread t : threads ) {
|
||||
t.join();
|
||||
}
|
||||
int rc = p.waitFor();
|
||||
return rc == 0;
|
||||
}
|
||||
catch ( InterruptedException ie ) {
|
||||
ie.printStackTrace();
|
||||
throw new RuntimeException( "sad" , ie );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void readFields( DataInput in )
|
||||
throws IOException {
|
||||
mongo = in.readUTF();
|
||||
code = in.readUTF();
|
||||
workingDir = in.readUTF();
|
||||
|
||||
suite = in.readUTF();
|
||||
}
|
||||
|
||||
public void write( final DataOutput out )
|
||||
throws IOException {
|
||||
out.writeUTF( mongo );
|
||||
out.writeUTF( code );
|
||||
out.writeUTF( workingDir );
|
||||
|
||||
out.writeUTF( suite );
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "mongo: " + mongo + " code: " + code + " suite: " + suite + " workingDir: " + workingDir;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Map implements Mapper<Text, MongoSuite, Text, IntWritable> {
|
||||
|
||||
public void map( Text key, MongoSuite value, OutputCollector<Text,IntWritable> output, Reporter reporter )
|
||||
throws IOException {
|
||||
|
||||
FileLock lock = new FileLock( "mapper" );
|
||||
try {
|
||||
lock.lock();
|
||||
|
||||
System.out.println( "key: " + key );
|
||||
System.out.println( "value: " + value );
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
boolean passed = value.runTest();
|
||||
long end = System.currentTimeMillis();
|
||||
|
||||
output.collect( new Text( passed ? "passed" : "failed" ) , new IntWritable( 1 ) );
|
||||
output.collect( new Text( key.toString() + "-time-seconds" ) , new IntWritable( (int)((end-start)/(1000)) ) );
|
||||
output.collect( new Text( key.toString() + "-passed" ) , new IntWritable( passed ? 1 : 0 ) );
|
||||
|
||||
String ip = IOUtil.readStringFully( new URL( "http://myip.10gen.com/" ).openConnection().getInputStream() );
|
||||
ip = ip.substring( ip.indexOf( ":" ) + 1 ).trim();
|
||||
output.collect( new Text( ip ) , new IntWritable(1) );
|
||||
}
|
||||
catch ( RuntimeException re ) {
|
||||
re.printStackTrace();
|
||||
throw re;
|
||||
}
|
||||
catch ( IOException ioe ) {
|
||||
ioe.printStackTrace();
|
||||
throw ioe;
|
||||
}
|
||||
finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void configure(JobConf job) {}
|
||||
public void close(){}
|
||||
}
|
||||
|
||||
public static class Reduce implements Reducer<Text, IntWritable, Text, IntWritable> {
|
||||
|
||||
public void reduce( Text key, Iterator<IntWritable> values, OutputCollector<Text,IntWritable> output , Reporter reporter )
|
||||
throws IOException {
|
||||
|
||||
int sum = 0;
|
||||
while ( values.hasNext() ) {
|
||||
sum += values.next().get();
|
||||
}
|
||||
output.collect( key , new IntWritable( sum ) );
|
||||
}
|
||||
|
||||
public void configure(JobConf job) {}
|
||||
public void close(){}
|
||||
}
|
||||
|
||||
public static class MySplit implements InputSplit , Writable {
|
||||
|
||||
public MySplit(){
|
||||
}
|
||||
|
||||
MySplit( MongoSuite config , int length ) {
|
||||
_config = config;
|
||||
_length = length;
|
||||
}
|
||||
|
||||
public long getLength() {
|
||||
return _length;
|
||||
}
|
||||
|
||||
public String[] getLocations() {
|
||||
return new String[0];
|
||||
}
|
||||
|
||||
public void readFields( DataInput in )
|
||||
throws IOException {
|
||||
_config = new MongoSuite();
|
||||
_config.readFields( in );
|
||||
_length = in.readInt();
|
||||
}
|
||||
|
||||
public void write( final DataOutput out )
|
||||
throws IOException {
|
||||
_config.write( out );
|
||||
out.writeInt( _length );
|
||||
}
|
||||
|
||||
MongoSuite _config;
|
||||
int _length;
|
||||
}
|
||||
|
||||
public static class InputMagic implements InputFormat<Text,MongoSuite> {
|
||||
|
||||
public RecordReader<Text,MongoSuite> getRecordReader( InputSplit split, JobConf job , Reporter reporter ){
|
||||
final MySplit s = (MySplit)split;
|
||||
return new RecordReader<Text,MongoSuite>() {
|
||||
|
||||
public void close(){}
|
||||
|
||||
public Text createKey() {
|
||||
return new Text();
|
||||
}
|
||||
|
||||
public MongoSuite createValue() {
|
||||
return new MongoSuite();
|
||||
}
|
||||
|
||||
public long getPos() {
|
||||
return _seen ? 1 : 0;
|
||||
}
|
||||
|
||||
public float getProgress() {
|
||||
return getPos();
|
||||
}
|
||||
|
||||
public boolean next( Text key , MongoSuite value ) {
|
||||
key.set( s._config.suite );
|
||||
value.copy( s._config );
|
||||
|
||||
|
||||
boolean x = _seen;
|
||||
_seen = true;
|
||||
return !x;
|
||||
}
|
||||
|
||||
boolean _seen = false;
|
||||
};
|
||||
}
|
||||
|
||||
public InputSplit[] getSplits( JobConf job , int numSplits ){
|
||||
String[] pcs = job.get( "suites" ).split(",");
|
||||
InputSplit[] splits = new InputSplit[pcs.length];
|
||||
for ( int i=0; i<splits.length; i++ ) {
|
||||
MongoSuite c = new MongoSuite();
|
||||
c.suite = pcs[i];
|
||||
|
||||
c.mongo = job.get( "mongo" );
|
||||
c.code = job.get( "code" );
|
||||
c.workingDir = job.get( "workingDir" );
|
||||
|
||||
splits[i] = new MySplit( c , 100 /* XXX */);
|
||||
}
|
||||
return splits;
|
||||
}
|
||||
|
||||
public void validateInput(JobConf job){}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* args
|
||||
* mongo tgz
|
||||
* code tgz
|
||||
* output path
|
||||
* tests to run ?
|
||||
*/
|
||||
|
||||
public static void main( String[] args ) throws Exception{
|
||||
|
||||
JobConf conf = new JobConf();
|
||||
conf.setJarByClass(emr.class);
|
||||
|
||||
String workingDir = "/data/db/emr/";
|
||||
|
||||
|
||||
// parse args
|
||||
|
||||
int pos = 0;
|
||||
for ( ; pos < args.length; pos++ ) {
|
||||
if ( ! args[pos].startsWith( "--" ) )
|
||||
break;
|
||||
|
||||
String arg = args[pos].substring(2);
|
||||
if ( arg.equals( "workingDir" ) ) {
|
||||
workingDir = args[++pos];
|
||||
}
|
||||
else {
|
||||
System.err.println( "unknown arg: " + arg );
|
||||
throw new RuntimeException( "unknown arg: " + arg );
|
||||
}
|
||||
}
|
||||
|
||||
String mongo = args[pos++];
|
||||
String code = args[pos++];
|
||||
String output = args[pos++];
|
||||
|
||||
String suites = "";
|
||||
for ( ; pos < args.length; pos++ ) {
|
||||
if ( suites.length() > 0 )
|
||||
suites += ",";
|
||||
suites += args[pos];
|
||||
}
|
||||
|
||||
if ( suites.length() == 0 )
|
||||
throw new RuntimeException( "no suites" );
|
||||
|
||||
System.out.println( "workingDir:\t" + workingDir );
|
||||
System.out.println( "mongo:\t" + mongo );
|
||||
System.out.println( "code:\t " + code );
|
||||
System.out.println( "output\t: " + output );
|
||||
System.out.println( "suites\t: " + suites );
|
||||
|
||||
if ( false ) {
|
||||
MongoSuite s = new MongoSuite();
|
||||
s.mongo = mongo;
|
||||
s.code = code;
|
||||
s.workingDir = workingDir;
|
||||
s.suite = suites;
|
||||
s.runTest();
|
||||
return;
|
||||
}
|
||||
|
||||
// main hadoop set
|
||||
conf.set( "mongo" , mongo );
|
||||
conf.set( "code" , code );
|
||||
conf.set( "workingDir" , workingDir );
|
||||
conf.set( "suites" , suites );
|
||||
|
||||
conf.set( "mapred.map.tasks" , "1" );
|
||||
conf.setLong( "mapred.task.timeout" , 4 * 3600 * 1000 /* 4 hours */);
|
||||
|
||||
conf.setOutputKeyClass(Text.class);
|
||||
conf.setOutputValueClass(IntWritable.class);
|
||||
|
||||
conf.setMapperClass(Map.class);
|
||||
conf.setReducerClass(Reduce.class);
|
||||
|
||||
conf.setInputFormat(InputMagic.class);
|
||||
conf.setOutputFormat(TextOutputFormat.class);
|
||||
|
||||
FileOutputFormat.setOutputPath(conf, new Path(output) );
|
||||
|
||||
// actually run
|
||||
|
||||
JobClient.runJob( conf );
|
||||
}
|
||||
}
|
||||
|
|
@ -1,385 +0,0 @@
|
|||
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import datetime
|
||||
import time
|
||||
import subprocess
|
||||
import urllib
|
||||
import urllib2
|
||||
import json
|
||||
import pprint
|
||||
|
||||
import boto
|
||||
import simples3
|
||||
|
||||
import pymongo
|
||||
|
||||
def findSettingsSetup():
|
||||
sys.path.append( "./" )
|
||||
sys.path.append( "../" )
|
||||
sys.path.append( "../../" )
|
||||
sys.path.append( "../../../" )
|
||||
|
||||
findSettingsSetup()
|
||||
import settings
|
||||
import buildscripts.utils as utils
|
||||
import buildscripts.smoke as smoke
|
||||
|
||||
bucket = simples3.S3Bucket( settings.emr_bucket , settings.emr_id , settings.emr_key )
|
||||
|
||||
def _get_status():
|
||||
|
||||
def gh( cmds ):
|
||||
txt = ""
|
||||
for cmd in cmds:
|
||||
res = utils.execsys( "git " + cmd )
|
||||
txt = txt + res[0] + res[1]
|
||||
return utils.md5string( txt )
|
||||
|
||||
return "%s-%s" % ( utils.execsys( "git describe" )[0].strip(), gh( [ "diff" , "status" ] ) )
|
||||
|
||||
def _get_most_recent_tgz( prefix ):
|
||||
# this is icky, but works for now
|
||||
all = []
|
||||
for x in os.listdir( "." ):
|
||||
if not x.startswith( prefix ) or not x.endswith( ".tgz" ):
|
||||
continue
|
||||
all.append( ( x , os.stat(x).st_mtime ) )
|
||||
|
||||
if len(all) == 0:
|
||||
raise Exception( "can't find file with prefix: " + prefix )
|
||||
|
||||
all.sort( lambda x,y: int(y[1] - x[1]) )
|
||||
|
||||
return all[0][0]
|
||||
|
||||
def get_build_info():
|
||||
return ( os.environ.get('MONGO_BUILDER_NAME') , os.environ.get('MONGO_BUILD_NUMBER') )
|
||||
|
||||
def make_tarball():
|
||||
|
||||
m = _get_most_recent_tgz( "mongodb-" )
|
||||
|
||||
c = "test-code-emr.tgz"
|
||||
tar = "tar zcf %s src jstests buildscripts" % c
|
||||
|
||||
log_config = "log_config.py"
|
||||
if os.path.exists( log_config ):
|
||||
os.unlink( log_config )
|
||||
|
||||
credentials = do_credentials()
|
||||
if credentials:
|
||||
|
||||
builder , buildnum = get_build_info()
|
||||
|
||||
if builder and buildnum:
|
||||
|
||||
file = open( log_config , "wb" )
|
||||
file.write( 'username="%s"\npassword="%s"\n' % credentials )
|
||||
file.write( 'name="%s"\nnumber=%s\n'% ( builder , buildnum ) )
|
||||
|
||||
file.close()
|
||||
|
||||
tar = tar + " " + log_config
|
||||
|
||||
utils.execsys( tar )
|
||||
return ( m , c )
|
||||
|
||||
def _put_ine( bucket , local , remote ):
|
||||
print( "going to put\n\t%s\n\thttp://%s.s3.amazonaws.com/%s" % ( local , settings.emr_bucket , remote ) )
|
||||
|
||||
for x in bucket.listdir( prefix=remote ):
|
||||
print( "\talready existed" )
|
||||
return remote
|
||||
|
||||
bucket.put( remote , open( local , "rb" ).read() , acl="public-read" )
|
||||
return remote
|
||||
|
||||
def build_jar():
|
||||
root = "build/emrjar"
|
||||
src = "buildscripts/emr"
|
||||
|
||||
if os.path.exists( root ):
|
||||
shutil.rmtree( root )
|
||||
os.makedirs( root )
|
||||
|
||||
for x in os.listdir( src ):
|
||||
if not x.endswith( ".java" ):
|
||||
continue
|
||||
shutil.copyfile( src + "/" + x , root + "/" + x )
|
||||
shutil.copyfile( src + "/MANIFEST.MF" , root + "/MANIFEST.FM" )
|
||||
|
||||
classpath = os.listdir( src + "/lib" )
|
||||
for x in classpath:
|
||||
shutil.copyfile( src + "/lib/" + x , root + "/" + x )
|
||||
classpath.append( "." )
|
||||
classpath = ":".join(classpath)
|
||||
|
||||
for x in os.listdir( root ):
|
||||
if x.endswith( ".java" ):
|
||||
if subprocess.call( [ "javac" , "-cp" , classpath , x ] , cwd=root) != 0:
|
||||
raise Exception( "compiled failed" )
|
||||
|
||||
args = [ "jar" , "-cfm" , "emr.jar" , "MANIFEST.FM" ]
|
||||
for x in os.listdir( root ):
|
||||
if x.endswith( ".class" ):
|
||||
args.append( x )
|
||||
subprocess.call( args , cwd=root )
|
||||
|
||||
shutil.copyfile( root + "/emr.jar" , "emr.jar" )
|
||||
|
||||
return "emr.jar"
|
||||
|
||||
def push():
|
||||
mongo , test_code = make_tarball()
|
||||
print( mongo )
|
||||
print( test_code )
|
||||
|
||||
root = "emr/%s/%s" % ( datetime.date.today().strftime("%Y-%m-%d") , os.uname()[0].lower() )
|
||||
|
||||
def make_long_name(local,hash):
|
||||
pcs = local.rpartition( "." )
|
||||
h = _get_status()
|
||||
if hash:
|
||||
h = utils.md5sum( local )
|
||||
return "%s/%s-%s.%s" % ( root , pcs[0] , h , pcs[2] )
|
||||
|
||||
mongo = _put_ine( bucket , mongo , make_long_name( mongo , False ) )
|
||||
test_code = _put_ine( bucket , test_code , make_long_name( test_code , True ) )
|
||||
|
||||
jar = build_jar()
|
||||
jar = _put_ine( bucket , jar , make_long_name( jar , False ) )
|
||||
|
||||
setup = "buildscripts/emr/emrnodesetup.sh"
|
||||
setup = _put_ine( bucket , setup , make_long_name( setup , True ) )
|
||||
|
||||
return mongo , test_code , jar , setup
|
||||
|
||||
def run_tests( things , tests ):
|
||||
if len(tests) == 0:
|
||||
raise Exception( "no tests" )
|
||||
oldNum = len(tests)
|
||||
tests = fix_suites( tests )
|
||||
print( "tests expanded from %d to %d" % ( oldNum , len(tests) ) )
|
||||
|
||||
print( "things:%s\ntests:%s\n" % ( things , tests ) )
|
||||
|
||||
emr = boto.connect_emr( settings.emr_id , settings.emr_key )
|
||||
|
||||
def http(path):
|
||||
return "http://%s.s3.amazonaws.com/%s" % ( settings.emr_bucket , path )
|
||||
|
||||
run_s3_path = "emr/%s/%s/%s/" % ( os.getenv( "USER" ) ,
|
||||
os.getenv( "HOST" ) ,
|
||||
datetime.datetime.today().strftime( "%Y%m%d-%H%M" ) )
|
||||
|
||||
run_s3_root = "s3n://%s/%s/" % ( settings.emr_bucket , run_s3_path )
|
||||
|
||||
out = run_s3_root + "out"
|
||||
logs = run_s3_root + "logs"
|
||||
|
||||
jar="s3n://%s/%s" % ( settings.emr_bucket , things[2] )
|
||||
step_args=[ http(things[0]) , http(things[1]) , out , ",".join(tests) ]
|
||||
|
||||
step = boto.emr.step.JarStep( "emr main" , jar=jar,step_args=step_args )
|
||||
print( "jar:%s\nargs:%s" % ( jar , step_args ) )
|
||||
|
||||
setup = boto.emr.BootstrapAction( "setup" , "s3n://%s/%s" % ( settings.emr_bucket , things[3] ) , [] )
|
||||
|
||||
jobid = emr.run_jobflow( name = "Mongo EMR for %s from %s" % ( os.getenv( "USER" ) , os.getenv( "HOST" ) ) ,
|
||||
ec2_keyname = "emr1" ,
|
||||
slave_instance_type = "m1.large" ,
|
||||
ami_version = "latest" ,
|
||||
num_instances=5 ,
|
||||
log_uri = logs ,
|
||||
bootstrap_actions = [ setup ] ,
|
||||
steps = [ step ] )
|
||||
|
||||
|
||||
print( "%s jobid: %s" % ( datetime.datetime.today() , jobid ) )
|
||||
|
||||
while ( True ):
|
||||
flow = emr.describe_jobflow( jobid )
|
||||
print( "%s status: %s" % ( datetime.datetime.today() , flow.state ) )
|
||||
if flow.state == "COMPLETED" or flow.state == "FAILED":
|
||||
break
|
||||
time.sleep(30)
|
||||
|
||||
syncdir = "build/emrout/" + jobid + "/"
|
||||
sync_s3( run_s3_path , syncdir )
|
||||
|
||||
final_out = "build/emrout/" + jobid + "/"
|
||||
|
||||
print("output in: " + final_out )
|
||||
do_output( final_out )
|
||||
|
||||
def sync_s3( remote_dir , local_dir ):
|
||||
for x in bucket.listdir( remote_dir ):
|
||||
out = local_dir + "/" + x[0]
|
||||
|
||||
if os.path.exists( out ) and x[2].find( utils.md5sum( out ) ) >= 0:
|
||||
continue
|
||||
|
||||
dir = out.rpartition( "/" )[0]
|
||||
if not os.path.exists( dir ):
|
||||
os.makedirs( dir )
|
||||
|
||||
thing = bucket.get( x[0] )
|
||||
open( out , "wb" ).write( thing.read() )
|
||||
|
||||
def fix_suites( suites ):
|
||||
fixed = []
|
||||
for name,x in smoke.expand_suites( suites , False ):
|
||||
idx = name.find( "/jstests" )
|
||||
if idx >= 0:
|
||||
name = name[idx+1:]
|
||||
fixed.append( name )
|
||||
return fixed
|
||||
|
||||
def do_credentials():
|
||||
root = "buildbot.tac"
|
||||
|
||||
while len(root) < 40 :
|
||||
if os.path.exists( root ):
|
||||
break
|
||||
root = "../" + root
|
||||
|
||||
if not os.path.exists( root ):
|
||||
return None
|
||||
|
||||
credentials = {}
|
||||
execfile(root, credentials, credentials)
|
||||
|
||||
if "slavename" not in credentials:
|
||||
return None
|
||||
|
||||
if "passwd" not in credentials:
|
||||
return None
|
||||
|
||||
return ( credentials["slavename"] , credentials["passwd"] )
|
||||
|
||||
|
||||
def do_output( dir ):
|
||||
|
||||
def go_down( start ):
|
||||
lst = os.listdir(dir)
|
||||
if len(lst) != 1:
|
||||
raise Exception( "sad: " + start )
|
||||
return start + "/" + lst[0]
|
||||
|
||||
while "out" not in os.listdir( dir ):
|
||||
dir = go_down( dir )
|
||||
|
||||
dir = dir + "/out"
|
||||
|
||||
pieces = os.listdir(dir)
|
||||
pieces.sort()
|
||||
|
||||
passed = []
|
||||
failed = []
|
||||
times = {}
|
||||
|
||||
for x in pieces:
|
||||
if not x.startswith( "part" ):
|
||||
continue
|
||||
full = dir + "/" + x
|
||||
|
||||
for line in open( full , "rb" ):
|
||||
if line.find( "-passed" ) >= 0:
|
||||
passed.append( line.partition( "-passed" )[0] )
|
||||
continue
|
||||
|
||||
if line.find( "-failed" ) >= 0:
|
||||
failed.append( line.partition( "-failed" )[0] )
|
||||
continue
|
||||
|
||||
if line.find( "-time-seconds" ) >= 0:
|
||||
p = line.partition( "-time-seconds" )
|
||||
times[p[0]] = p[2].strip()
|
||||
continue
|
||||
|
||||
print( "\t" + line.strip() )
|
||||
|
||||
def print_list(name,lst):
|
||||
print( name )
|
||||
for x in lst:
|
||||
print( "\t%s\t%s" % ( x , times[x] ) )
|
||||
|
||||
print_list( "passed" , passed )
|
||||
print_list( "failed" , failed )
|
||||
|
||||
if do_credentials():
|
||||
builder , buildnum = get_build_info()
|
||||
if builder and buildnum:
|
||||
conn = pymongo.Connection( "bbout1.10gen.cc" )
|
||||
db = conn.buildlogs
|
||||
q = { "builder" : builder , "buildnum" : int(buildnum) }
|
||||
doc = db.builds.find_one( q )
|
||||
|
||||
if doc:
|
||||
print( "\nhttp://buildlogs.mongodb.org/build/%s" % doc["_id"] )
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) == 1:
|
||||
print( "need an arg" )
|
||||
|
||||
elif sys.argv[1] == "tarball":
|
||||
make_tarball()
|
||||
elif sys.argv[1] == "jar":
|
||||
build_jar()
|
||||
elif sys.argv[1] == "push":
|
||||
print( push() )
|
||||
|
||||
elif sys.argv[1] == "sync":
|
||||
sync_s3( sys.argv[2] , sys.argv[3] )
|
||||
|
||||
elif sys.argv[1] == "fix_suites":
|
||||
for x in fix_suites( sys.argv[2:] ):
|
||||
print(x)
|
||||
|
||||
elif sys.argv[1] == "credentials":
|
||||
print( do_credentials() )
|
||||
|
||||
elif sys.argv[1] == "test":
|
||||
m , c = make_tarball()
|
||||
build_jar()
|
||||
cmd = [ "java" , "-cp" , os.environ.get( "CLASSPATH" , "." ) + ":emr.jar" , "emr" ]
|
||||
|
||||
workingDir = "/data/emr/test"
|
||||
cmd.append( "--workingDir" )
|
||||
cmd.append( workingDir )
|
||||
if os.path.exists( workingDir ):
|
||||
shutil.rmtree( workingDir )
|
||||
|
||||
cmd.append( "file://" + os.getcwd() + "/" + m )
|
||||
cmd.append( "file://" + os.getcwd() + "/" + c )
|
||||
|
||||
out = "/tmp/emrresults"
|
||||
cmd.append( out )
|
||||
if os.path.exists( out ):
|
||||
shutil.rmtree( out )
|
||||
|
||||
cmd.append( "jstests/basic1.js" )
|
||||
|
||||
subprocess.call( cmd )
|
||||
|
||||
for x in os.listdir( out ):
|
||||
if x.startswith( "." ):
|
||||
continue
|
||||
print( x )
|
||||
for z in open( out + "/" + x ):
|
||||
print( "\t" + z.strip() )
|
||||
|
||||
elif sys.argv[1] == "output":
|
||||
do_output( sys.argv[2] )
|
||||
|
||||
elif sys.argv[1] == "full":
|
||||
things = push()
|
||||
run_tests( things , sys.argv[2:] )
|
||||
|
||||
else:
|
||||
things = push()
|
||||
run_tests( things , sys.argv[1:] )
|
||||
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
sudo mkdir /mnt/data
|
||||
sudo ln -s /mnt/data /data
|
||||
sudo chown hadoop /mnt/data
|
||||
|
||||
sudo easy_install pymongo
|
||||
|
|
@ -6,7 +6,7 @@ import re
|
|||
import utils
|
||||
|
||||
|
||||
assertNames = [ "uassert" , "massert", "fassert", "fassertFailed" ]
|
||||
assertNames = [ "uassert" , "massert" ]
|
||||
|
||||
def assignErrorCodes():
|
||||
cur = 10000
|
||||
|
|
@ -32,15 +32,10 @@ def assignErrorCodes():
|
|||
codes = []
|
||||
|
||||
def readErrorCodes( callback, replaceZero = False ):
|
||||
|
||||
quick = [ "assert" , "Exception"]
|
||||
|
||||
ps = [ re.compile( "(([umsgf]asser(t|ted))) *\(( *)(\d+)" ) ,
|
||||
re.compile( "((User|Msg|MsgAssertion)Exceptio(n))\(( *)(\d+)" ),
|
||||
re.compile( "((fassertFailed)()) *\(( *)(\d+)" )
|
||||
ps = [ re.compile( "(([umsg]asser(t|ted))) *\(( *)(\d+)" ) ,
|
||||
re.compile( "((User|Msg|MsgAssertion)Exceptio(n))\(( *)(\d+)" ) ,
|
||||
re.compile( "(((verify))) *\(( *)(\d+)" )
|
||||
]
|
||||
|
||||
bad = [ re.compile( "\sassert *\(" ) ]
|
||||
|
||||
for x in utils.getAllSourceFiles():
|
||||
|
||||
|
|
@ -50,46 +45,30 @@ def readErrorCodes( callback, replaceZero = False ):
|
|||
lineNum = 1
|
||||
|
||||
for line in open( x ):
|
||||
|
||||
found = False
|
||||
for zz in quick:
|
||||
if line.find( zz ) >= 0:
|
||||
found = True
|
||||
break
|
||||
|
||||
if found:
|
||||
|
||||
for p in ps:
|
||||
|
||||
if x.find( "src/mongo/" ) >= 0:
|
||||
for b in bad:
|
||||
if len(b.findall( line )) > 0:
|
||||
print( x )
|
||||
print( line )
|
||||
raise Exception( "you can't use a bare assert" )
|
||||
|
||||
for p in ps:
|
||||
|
||||
def repl( m ):
|
||||
m = m.groups()
|
||||
|
||||
start = m[0]
|
||||
spaces = m[3]
|
||||
code = m[4]
|
||||
if code == '0' and replaceZero :
|
||||
code = getNextCode( lastCodes )
|
||||
lastCodes.append( code )
|
||||
code = str( code )
|
||||
needReplace[0] = True
|
||||
|
||||
print( "Adding code " + code + " to line " + x + ":" + str( lineNum ) )
|
||||
|
||||
else :
|
||||
codes.append( ( x , lineNum , line , code ) )
|
||||
callback( x , lineNum , line , code )
|
||||
|
||||
return start + "(" + spaces + code
|
||||
|
||||
line = re.sub( p, repl, line )
|
||||
# end if ps loop
|
||||
def repl( m ):
|
||||
m = m.groups()
|
||||
|
||||
start = m[0]
|
||||
spaces = m[3]
|
||||
code = m[4]
|
||||
if code == '0' and replaceZero :
|
||||
code = getNextCode( lastCodes )
|
||||
lastCodes.append( code )
|
||||
code = str( code )
|
||||
needReplace[0] = True
|
||||
|
||||
print( "Adding code " + code + " to line " + x + ":" + str( lineNum ) )
|
||||
|
||||
else :
|
||||
codes.append( ( x , lineNum , line , code ) )
|
||||
callback( x , lineNum , line , code )
|
||||
|
||||
return start + "(" + spaces + code
|
||||
|
||||
line = re.sub( p, repl, line )
|
||||
|
||||
if replaceZero : lines.append( line )
|
||||
lineNum = lineNum + 1
|
||||
|
|
@ -99,7 +78,6 @@ def readErrorCodes( callback, replaceZero = False ):
|
|||
of = open( x + ".tmp", 'w' )
|
||||
of.write( "".join( lines ) )
|
||||
of.close()
|
||||
os.remove(x)
|
||||
os.rename( x + ".tmp", x )
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,202 +0,0 @@
|
|||
#!/bin/sh -e
|
||||
#
|
||||
# Copyright (c) 2005-2011 The OpenSSL Project.
|
||||
#
|
||||
# Depending on output file name, the script either embeds fingerprint
|
||||
# into libcrypto.so or static application. "Static" refers to static
|
||||
# libcrypto.a, not [necessarily] application per se.
|
||||
#
|
||||
# Even though this script is called fipsld, it expects C compiler
|
||||
# command line syntax and $FIPSLD_CC or $CC environment variable set
|
||||
# and can even be used to compile source files.
|
||||
|
||||
#set -x
|
||||
|
||||
# Note: edit this to point to your copy of g++ if this is not correct.
|
||||
CC="/usr/bin/g++"
|
||||
|
||||
# Initially -c wasn't intended to be interpreted here, but it might
|
||||
# make life easier for those who want to build FIPS-ified applications
|
||||
# with minimal [if any] modifications to their Makefiles...
|
||||
( while [ "x$1" != "x" -a "x$1" != "x-c" -a "x$1" != "x-E" ]; do shift; done;
|
||||
[ $# -ge 1 ]
|
||||
) && exec ${CC} "$@"
|
||||
|
||||
TARGET=`(while [ "x$1" != "x" -a "x$1" != "x-o" ]; do shift; done; echo $2)`
|
||||
|
||||
# If using an auto-tooled (autoconf/automake/libtool) project,
|
||||
# configure will fail when testing the compiler or even performing
|
||||
# simple checks. Pass-through to compiler directly if application is
|
||||
# is not being linked with libcrypto, allowing auto-tooled applications
|
||||
# to utilize fipsld (e.g. CC=/usr/local/ssl/bin/fipsld FIPSLD_CC=gcc
|
||||
# ./configure && make). But keep in mind[!] that if certified code
|
||||
# resides in a shared library, then fipsld *may not* be used and
|
||||
# end-developer should not modify application configuration and build
|
||||
# procedures. This is because in-core fingerprint and associated
|
||||
# procedures are already embedded into and executed in shared library
|
||||
# context.
|
||||
case `basename "${TARGET}"` in
|
||||
libcrypto*|libfips*|*.dll) ;;
|
||||
*) case "$*" in
|
||||
*libcrypto.a*|*-lcrypto*|*fipscanister.o*) ;;
|
||||
*) exec ${CC} "$@" ;;
|
||||
esac
|
||||
esac
|
||||
|
||||
[ -n "${TARGET}" ] || { echo 'no -o specified'; exit 1; }
|
||||
|
||||
# Turn on debugging output?
|
||||
( while [ "x$1" != "x" -a "x$1" != "x-DDEBUG_FINGERPRINT_PREMAIN" ]; do shift; done;
|
||||
[ $# -ge 1 ]
|
||||
) && set -x
|
||||
|
||||
THERE="`echo $0 | sed -e 's|[^/]*$||'`"..
|
||||
|
||||
# fipscanister.o can appear in command line
|
||||
CANISTER_O=`(while [ "x$1" != "x" ]; do case "$1" in *fipscanister.o) echo $1; exit;; esac; shift; done)`
|
||||
if [ -z "${CANISTER_O}" ]; then
|
||||
# If set, FIPSLIBDIR is location of installed validated FIPS module
|
||||
if [ -n "${FIPSLIBDIR}" ]; then
|
||||
CANISTER_O="${FIPSLIBDIR}/fipscanister.o"
|
||||
elif [ -f "${THERE}/fips/fipscanister.o" ]; then
|
||||
CANISTER_O="${THERE}/fips/fipscanister.o"
|
||||
elif [ -f "${THERE}/lib/fipscanister.o" ]; then
|
||||
CANISTER_O="${THERE}/lib/fipscanister.o"
|
||||
fi
|
||||
CANISTER_O_CMD="${CANISTER_O}"
|
||||
fi
|
||||
[ -f ${CANISTER_O} ] || { echo "unable to find ${CANISTER_O}"; exit 1; }
|
||||
|
||||
PREMAIN_C=`dirname "${CANISTER_O}"`/fips_premain.c
|
||||
PREMAIN_O=`dirname "${CANISTER_O}"`/fips_premain.o
|
||||
|
||||
HMAC_KEY="etaonrishdlcupfm"
|
||||
|
||||
case "`(uname -s) 2>/dev/null`" in
|
||||
OSF1|IRIX*) _WL_PREMAIN="-Wl,-init,FINGERPRINT_premain" ;;
|
||||
HP-UX) _WL_PREMAIN="-Wl,+init,FINGERPRINT_premain" ;;
|
||||
AIX) _WL_PREMAIN="-Wl,-binitfini:FINGERPRINT_premain,-bnoobjreorder";;
|
||||
Darwin) ( while [ "x$1" != "x" -a "x$1" != "x-dynamiclib" ]; do shift; done;
|
||||
[ $# -ge 1 ]
|
||||
) && _WL_PREMAIN="-Wl,-init,_FINGERPRINT_premain" ;;
|
||||
esac
|
||||
|
||||
case "${TARGET}" in
|
||||
[!/]*) TARGET=./${TARGET} ;;
|
||||
esac
|
||||
|
||||
case `basename "${TARGET}"` in
|
||||
lib*|*.dll) # must be linking a shared lib...
|
||||
# Shared lib creation can be taking place in the source
|
||||
# directory only, but fipscanister.o can reside elsewhere...
|
||||
|
||||
if [ -x "${THERE}/fips/fips_standalone_sha1" ]; then
|
||||
FINGERTYPE="${THERE}/fips/fips_standalone_sha1"
|
||||
PREMAIN_DSO="${THERE}/fips/fips_premain_dso"
|
||||
elif [ -x "${THERE}/bin/fips_standalone_sha1" ]; then
|
||||
FINGERTYPE="${THERE}/bin/fips_standalone_sha1"
|
||||
PREMAIN_DSO="./fips_premain_dso"
|
||||
fi
|
||||
|
||||
# verify fipspremain.c against its detached signature...
|
||||
${FINGERTYPE} "${PREMAIN_C}" | sed "s/(.*\//(/" | \
|
||||
diff -w "${PREMAIN_C}.sha1" - || \
|
||||
{ echo "${PREMAIN_C} fingerprint mismatch"; exit 1; }
|
||||
# verify fipscanister.o against its detached signature...
|
||||
${FINGERTYPE} "${CANISTER_O}" | sed "s/(.*\//(/" | \
|
||||
diff -w "${CANISTER_O}.sha1" - || \
|
||||
{ echo "${CANISTER_O} fingerprint mismatch"; exit 1; }
|
||||
|
||||
[ -z "${FIPSLD_LIBCRYPTO}" -a -f "${THERE}/libcrypto.a" ] && \
|
||||
FIPSLD_LIBCRYPTO="${THERE}/libcrypto.a"
|
||||
|
||||
|
||||
# Temporarily remove fipscanister.o from libcrypto.a!
|
||||
# We are required to use the standalone copy...
|
||||
if [ -n "${FIPSLD_LIBCRYPTO}" ]; then
|
||||
if ar d "${FIPSLD_LIBCRYPTO}" fipscanister.o; then
|
||||
(ranlib "${FIPSLD_LIBCRYPTO}") 2>/dev/null || :
|
||||
trap 'ar r "${FIPSLD_LIBCRYPTO}" "${CANISTER_O}";
|
||||
(ranlib "${FIPSLD_LIBCRYPTO}") 2>/dev/null || :;
|
||||
sleep 1;
|
||||
touch -c "${TARGET}"' 0
|
||||
fi
|
||||
fi
|
||||
|
||||
/bin/rm -f "${TARGET}"
|
||||
${CC} -x c "${PREMAIN_C}" -c -o "${PREMAIN_O}"
|
||||
${CC} ${CANISTER_O_CMD:+"${CANISTER_O_CMD}"} \
|
||||
"${PREMAIN_O}" \
|
||||
${_WL_PREMAIN} "$@"
|
||||
|
||||
if [ "x${FIPS_SIG}" != "x" ]; then
|
||||
# embed signature
|
||||
"${FIPS_SIG}" "${TARGET}"
|
||||
[ $? -ne 42 ] && exit $?
|
||||
fi
|
||||
|
||||
# generate signature...
|
||||
SIG=`"${PREMAIN_DSO}" "${TARGET}"`
|
||||
|
||||
/bin/rm -f "${TARGET}"
|
||||
if [ -z "${SIG}" ]; then
|
||||
echo "unable to collect signature"; exit 1
|
||||
fi
|
||||
|
||||
# recompile with signature...
|
||||
${CC} -x c -DHMAC_SHA1_SIG=\"${SIG}\" "${PREMAIN_C}" -c -o "${PREMAIN_O}"
|
||||
${CC} ${CANISTER_O_CMD:+"${CANISTER_O_CMD}"} \
|
||||
-DHMAC_SHA1_SIG=\"${SIG}\" "${PREMAIN_O}" \
|
||||
${_WL_PREMAIN} "$@"
|
||||
;;
|
||||
|
||||
*) # must be linking statically...
|
||||
# Static linking can be taking place either in the source
|
||||
# directory or off the installed binary target destination.
|
||||
if [ -x "${THERE}/fips/fips_standalone_sha1" ]; then
|
||||
FINGERTYPE="${THERE}/fips/fips_standalone_sha1"
|
||||
elif [ -x "${THERE}/bin/fips_standalone_sha1" ]; then
|
||||
FINGERTYPE="${THERE}/bin/fips_standalone_sha1"
|
||||
else # Installed tree is expected to contain
|
||||
# lib/fipscanister.o, lib/fipscanister.o.sha1 and
|
||||
# lib/fips_premain.c [not to mention bin/openssl].
|
||||
FINGERTYPE="openssl sha1 -hmac ${HMAC_KEY}"
|
||||
fi
|
||||
|
||||
# verify fipscanister.o against its detached signature...
|
||||
${FINGERTYPE} "${CANISTER_O}" | sed "s/(.*\//(/" | \
|
||||
diff -w "${CANISTER_O}.sha1" - || \
|
||||
{ echo "${CANISTER_O} fingerprint mismatch"; exit 1; }
|
||||
|
||||
# verify fips_premain.c against its detached signature...
|
||||
${FINGERTYPE} "${PREMAIN_C}" | sed "s/(.*\//(/" | \
|
||||
diff -w "${PREMAIN_C}.sha1" - || \
|
||||
{ echo "${PREMAIN_C} fingerprint mismatch"; exit 1; }
|
||||
|
||||
/bin/rm -f "${TARGET}"
|
||||
${CC} -x c "${PREMAIN_C}" -c -o "${PREMAIN_O}"
|
||||
${CC} ${CANISTER_O_CMD:+"${CANISTER_O_CMD}"} \
|
||||
"${PREMAIN_O}" \
|
||||
${_WL_PREMAIN} "$@"
|
||||
|
||||
if [ "x${FIPS_SIG}" != "x" ]; then
|
||||
# embed signature
|
||||
"${FIPS_SIG}" "${TARGET}"
|
||||
[ $? -ne 42 ] && exit $?
|
||||
fi
|
||||
|
||||
# generate signature...
|
||||
SIG=`"${TARGET}"`
|
||||
|
||||
/bin/rm -f "${TARGET}"
|
||||
if [ -z "${SIG}" ]; then
|
||||
echo "unable to collect signature"; exit 1
|
||||
fi
|
||||
|
||||
# recompile with signature...
|
||||
${CC} -x c -DHMAC_SHA1_SIG=\"${SIG}\" "${PREMAIN_C}" -c -o "${PREMAIN_O}"
|
||||
${CC} ${CANISTER_O_CMD:+"${CANISTER_O_CMD}"} \
|
||||
-DHMAC_SHA1_SIG=\"${SIG}\" "${PREMAIN_O}" \
|
||||
${_WL_PREMAIN} "$@"
|
||||
;;
|
||||
esac
|
||||
9
buildscripts/hacks_mandriva.py
Normal file
9
buildscripts/hacks_mandriva.py
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
import os
|
||||
import glob
|
||||
|
||||
def insert( env , options ):
|
||||
jslibPaths = glob.glob('/usr/include/js-*/')
|
||||
if len(jslibPaths) >= 1:
|
||||
jslibPath = jslibPaths.pop()
|
||||
env.Append( CPPPATH=[ jslibPath ] )
|
||||
54
buildscripts/hacks_ubuntu.py
Normal file
54
buildscripts/hacks_ubuntu.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
|
||||
import os
|
||||
|
||||
def insert( env , options ):
|
||||
|
||||
# now that sm is in the source tree, don't need this
|
||||
# if not foundxulrunner( env , options ):
|
||||
# if os.path.exists( "usr/include/mozjs/" ):
|
||||
# env.Append( CPPDEFINES=[ "MOZJS" ] )
|
||||
|
||||
return
|
||||
|
||||
def foundxulrunner( env , options ):
|
||||
best = None
|
||||
|
||||
for x in os.listdir( "/usr/include" ):
|
||||
if x.find( "xulrunner" ) != 0:
|
||||
continue
|
||||
if x == "xulrunner":
|
||||
best = x
|
||||
break
|
||||
best = x
|
||||
|
||||
|
||||
if best is None:
|
||||
print( "warning: using ubuntu without xulrunner-dev. we recommend installing it" )
|
||||
return False
|
||||
|
||||
incroot = "/usr/include/" + best + "/"
|
||||
libroot = "/usr/lib"
|
||||
if options["linux64"] and os.path.exists("/usr/lib64"):
|
||||
libroot += "64";
|
||||
libroot += "/" + best
|
||||
|
||||
|
||||
if not os.path.exists( libroot ):
|
||||
print( "warning: found xulrunner include but not lib for: " + best )
|
||||
return False
|
||||
|
||||
env.Prepend( LIBPATH=[ libroot ] )
|
||||
env.Prepend( RPATH=[ libroot ] )
|
||||
|
||||
env.Prepend( CPPPATH=[ incroot + "stable/" ,
|
||||
incroot + "unstable/" ,
|
||||
incroot ] )
|
||||
env.Prepend( CPPPATH=[ "/usr/include/nspr/" ] )
|
||||
|
||||
env.Append( CPPDEFINES=[ "XULRUNNER" , "OLDJS" ] )
|
||||
if best.find( "1.9.0" ) >= 0 or best.endswith("1.9"):
|
||||
if best.endswith( "1.9.1.9" ):
|
||||
pass
|
||||
else:
|
||||
env.Append( CPPDEFINES=[ "XULRUNNER190" ] )
|
||||
return True
|
||||
|
|
@ -1,107 +0,0 @@
|
|||
|
||||
import sys
|
||||
import codecs
|
||||
|
||||
import cpplint
|
||||
import utils
|
||||
|
||||
|
||||
def run_lint( paths, nudgeOn=False ):
|
||||
# errors are as of 10/14
|
||||
# idea is not to let it any new type of error
|
||||
# as we knock one out, we should remove line
|
||||
# note: not all of these are things we want, so please check first
|
||||
|
||||
nudge = [] # things we'd like to turn on sson, so don't make worse
|
||||
later = [] # things that are unlikely anytime soon, so meh
|
||||
never = [] # things we totally disagree with
|
||||
|
||||
never.append( '-build/header_guard' ) # errors found: 345
|
||||
nudge.append( '-build/include' ) # errors found: 924
|
||||
nudge.append( '-build/include_order' ) # errors found: 511
|
||||
nudge.append( '-build/include_what_you_use' ) # errors found: 986
|
||||
nudge.append( '-build/namespaces' ) # errors found: 131
|
||||
never.append( '-readability/braces' ) # errors found: 880
|
||||
later.append( '-readability/casting' ) # errors found: 748
|
||||
nudge.append( '-readability/function' ) # errors found: 49
|
||||
later.append( '-readability/streams' ) # errors found: 72
|
||||
later.append( '-readability/todo' ) # errors found: 309
|
||||
nudge.append( '-runtime/arrays' ) # errors found: 5
|
||||
later.append( '-runtime/explicit' ) # errors found: 322
|
||||
later.append( '-runtime/int' ) # errors found: 1420
|
||||
later.append( '-runtime/printf' ) # errors found: 29
|
||||
nudge.append( '-runtime/references' ) # errors found: 1338
|
||||
nudge.append( '-runtime/rtti' ) # errors found: 36
|
||||
nudge.append( '-runtime/sizeof' ) # errors found: 57
|
||||
nudge.append( '-runtime/string' ) # errors found: 6
|
||||
nudge.append( '-runtime/threadsafe_fn' ) # errors found: 46
|
||||
never.append( '-whitespace/blank_line' ) # errors found: 2080
|
||||
never.append( '-whitespace/braces' ) # errors found: 962
|
||||
later.append( '-whitespace/comma' ) # errors found: 621
|
||||
later.append( '-whitespace/comments' ) # errors found: 2189
|
||||
later.append( '-whitespace/end_of_line' ) # errors found: 4340
|
||||
later.append( '-whitespace/labels' ) # errors found: 58
|
||||
later.append( '-whitespace/line_length' ) # errors found: 14500
|
||||
later.append( '-whitespace/newline' ) # errors found: 1520
|
||||
nudge.append( '-whitespace/operators' ) # errors found: 2297
|
||||
never.append( '-whitespace/parens' ) # errors found: 49058
|
||||
nudge.append( '-whitespace/semicolon' ) # errors found: 121
|
||||
nudge.append( '-whitespace/tab' ) # errors found: 233
|
||||
|
||||
filters = later + never
|
||||
if not nudgeOn:
|
||||
filters = filters + nudge
|
||||
|
||||
|
||||
sourceFiles = []
|
||||
for x in paths:
|
||||
utils.getAllSourceFiles( sourceFiles, x )
|
||||
|
||||
|
||||
args = [ "--filter=" + ",".join( filters ) , "--counting=detailed" ] + sourceFiles
|
||||
filenames = cpplint.ParseArguments( args )
|
||||
|
||||
def _ourIsTestFilename(fn):
|
||||
if fn.find( "dbtests" ) >= 0:
|
||||
return True
|
||||
if fn.endswith( "_test.cpp" ):
|
||||
return True
|
||||
return False
|
||||
|
||||
cpplint._IsTestFilename = _ourIsTestFilename
|
||||
|
||||
# Change stderr to write with replacement characters so we don't die
|
||||
# if we try to print something containing non-ASCII characters.
|
||||
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
|
||||
codecs.getreader('utf8'),
|
||||
codecs.getwriter('utf8'),
|
||||
'replace')
|
||||
|
||||
cpplint._cpplint_state.ResetErrorCounts()
|
||||
for filename in filenames:
|
||||
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level)
|
||||
cpplint._cpplint_state.PrintErrorCounts()
|
||||
|
||||
return cpplint._cpplint_state.error_count == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
paths = []
|
||||
nudge = False
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if arg.startswith( "--" ):
|
||||
arg = arg[2:]
|
||||
if arg == "nudge":
|
||||
nudge = True
|
||||
continue
|
||||
else:
|
||||
print( "unknown arg [%s]" % arg )
|
||||
sys.exit(-1)
|
||||
paths.append( arg )
|
||||
|
||||
if len(paths) == 0:
|
||||
paths.append( "src/mongo/" )
|
||||
|
||||
if not run_lint( paths, nudge ):
|
||||
sys.exit(-1)
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
'''Helper script for constructing an archive (zip or tar) from a list of files.
|
||||
|
||||
The output format (tar, tgz, zip) is determined from the file name, unless the user specifies
|
||||
--format on the command line.
|
||||
|
||||
This script simplifies the specification of filename transformations, so that, e.g.,
|
||||
src/mongo/foo.cpp and build/linux2/normal/buildinfo.cpp can get put into the same
|
||||
directory in the archive, perhaps mongodb-2.0.2/src/mongo.
|
||||
|
||||
Usage:
|
||||
|
||||
make_archive.py -o <output-file> [--format (tar|tgz|zip)] \
|
||||
[--transform match1=replacement1 [--transform match2=replacement2 [...]]] \
|
||||
<input file 1> [...]
|
||||
|
||||
If the input file names start with "@", the file is expected to contain a list of
|
||||
whitespace-separated file names to include in the archive. This helps get around the Windows
|
||||
command line length limit.
|
||||
|
||||
Transformations are processed in command-line order and are short-circuiting. So, if a file matches
|
||||
match1, it is never compared against match2 or later. Matches are just python startswith()
|
||||
comparisons.
|
||||
|
||||
For a detailed usage example, see src/SConscript.client or src/mongo/SConscript.
|
||||
'''
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
def main(argv):
|
||||
opts = parse_options(argv[1:])
|
||||
archive = open_archive_for_write(opts.output_filename, opts.archive_format)
|
||||
try:
|
||||
for input_filename in opts.input_filenames:
|
||||
archive.add(input_filename, arcname=get_preferred_filename(input_filename,
|
||||
opts.transformations))
|
||||
finally:
|
||||
archive.close()
|
||||
|
||||
def parse_options(args):
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('-o', dest='output_filename', default=None,
|
||||
help='Name of the archive to output.', metavar='FILE')
|
||||
parser.add_option('--format', dest='archive_format', default=None,
|
||||
choices=('zip', 'tar', 'tgz'),
|
||||
help='Format of archive to create. '
|
||||
'If omitted, use the suffix of the output filename to decide.')
|
||||
parser.add_option('--transform', action='append', dest='transformations', default=[])
|
||||
|
||||
(opts, input_filenames) = parser.parse_args(args)
|
||||
opts.input_filenames = []
|
||||
|
||||
for input_filename in input_filenames:
|
||||
if input_filename.startswith('@'):
|
||||
opts.input_filenames.extend(open(input_filename[1:], 'r').read().split())
|
||||
else:
|
||||
opts.input_filenames.append(input_filename)
|
||||
|
||||
if opts.output_filename is None:
|
||||
parser.error('-o switch is required')
|
||||
|
||||
if opts.archive_format is None:
|
||||
if opts.output_filename.endswith('.zip'):
|
||||
opts.archive_format = 'zip'
|
||||
elif opts.output_filename.endswith('tar.gz') or opts.output_filename.endswith('.tgz'):
|
||||
opts.archive_format = 'tgz'
|
||||
elif opts.output_filename.endswith('.tar'):
|
||||
opts.archive_format = 'tar'
|
||||
else:
|
||||
parser.error('Could not deduce archive format from output filename "%s"' %
|
||||
opts.output_filename)
|
||||
|
||||
try:
|
||||
opts.transformations = [
|
||||
xform.replace(os.path.altsep or os.path.sep, os.path.sep).split('=', 1)
|
||||
for xform in opts.transformations]
|
||||
except Exception, e:
|
||||
parser.error(e)
|
||||
|
||||
return opts
|
||||
|
||||
def open_archive_for_write(filename, archive_format):
|
||||
'''Open a tar or zip archive for write, with the given format, and return it.
|
||||
|
||||
The type of archive is determined by the "archive_format" parameter, which should be
|
||||
"tar", "tgz" (for gzipped tar) or "zip".
|
||||
'''
|
||||
|
||||
if archive_format in ('tar', 'tgz'):
|
||||
import tarfile
|
||||
mode = 'w'
|
||||
if archive_format is 'tgz':
|
||||
mode += '|gz'
|
||||
return tarfile.open(filename, mode)
|
||||
if archive_format is 'zip':
|
||||
import zipfile
|
||||
# Infuriatingly, Zipfile calls the "add" method "write", but they're otherwise identical,
|
||||
# for our purposes. WrappedZipFile is a minimal adapter class.
|
||||
class WrappedZipFile(zipfile.ZipFile):
|
||||
def add(self, filename, arcname):
|
||||
return self.write(filename, arcname)
|
||||
return WrappedZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
|
||||
raise ValueError('Unsupported archive format "%s"' % archive_format)
|
||||
|
||||
def get_preferred_filename(input_filename, transformations):
|
||||
for match, replace in transformations:
|
||||
if input_filename.startswith(match):
|
||||
return replace + input_filename[len(match):]
|
||||
return input_filename
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
sys.exit(0)
|
||||
|
|
@ -1,143 +0,0 @@
|
|||
"""Utility functions for SCons to discover and configure MongoDB modules.
|
||||
|
||||
A MongoDB module is an organized collection of source code and build rules that can be provided at
|
||||
compile-time to alter or extend the behavior of MongoDB. The files comprising a single MongoDB
|
||||
module are arranged in a directory hierarchy, rooted in a directory whose name is by convention the
|
||||
module name, and containing in that root directory at least two files: a build.py file and a
|
||||
SConscript file.
|
||||
|
||||
MongoDB modules are discovered by a call to the discover_modules() function, whose sole parameter is
|
||||
the directory which is the immediate parent of all module directories. The exact directory is
|
||||
chosen by the SConstruct file, which is the direct consumer of this python module. The only rule is
|
||||
that it must be a subdirectory of the src/ directory, to correctly work with the SCons variant
|
||||
directory system that separates build products for source.
|
||||
|
||||
Once discovered, modules are configured by the configure_modules() function, and the build system
|
||||
integrates their SConscript files into the rest of the build.
|
||||
|
||||
MongoDB module build.py files implement a single function, configure(conf, env), which they may use
|
||||
to configure the supplied "env" object. The configure functions may add extra LIBDEPS to mongod,
|
||||
mongos and the mongo shell (TODO: other mongo tools and the C++ client), and through those libraries
|
||||
alter those programs' behavior.
|
||||
|
||||
MongoDB module SConscript files can describe libraries, programs and unit tests, just as other
|
||||
MongoDB SConscript files do.
|
||||
"""
|
||||
|
||||
__all__ = ('discover_modules', 'configure_modules', 'register_module_test')
|
||||
|
||||
import imp
|
||||
import inspect
|
||||
import os
|
||||
|
||||
def discover_modules(module_root):
|
||||
"""Scans module_root for subdirectories that look like MongoDB modules.
|
||||
|
||||
Returns a list of imported build.py module objects.
|
||||
"""
|
||||
found_modules = []
|
||||
|
||||
if not os.path.isdir(module_root):
|
||||
return found_modules
|
||||
|
||||
for name in os.listdir(module_root):
|
||||
root = os.path.join(module_root, name)
|
||||
if name.startswith('.') or not os.path.isdir(root):
|
||||
continue
|
||||
|
||||
build_py = os.path.join(root, 'build.py')
|
||||
module = None
|
||||
|
||||
if os.path.isfile(build_py):
|
||||
print "adding module: %s" % name
|
||||
fp = open(build_py, "r")
|
||||
try:
|
||||
module = imp.load_module("module_" + name, fp, build_py,
|
||||
(".py", "r", imp.PY_SOURCE))
|
||||
if getattr(module, "name", None) is None:
|
||||
module.name = name
|
||||
found_modules.append(module)
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
return found_modules
|
||||
|
||||
def configure_modules(modules, conf, env):
|
||||
""" Run the configure() function in the build.py python modules for each module in "modules"
|
||||
(as created by discover_modules).
|
||||
|
||||
The configure() function should prepare the Mongo build system for building the module.
|
||||
"""
|
||||
for module in modules:
|
||||
name = module.name
|
||||
print "configuring module: %s" % name
|
||||
|
||||
root = os.path.dirname(module.__file__)
|
||||
module.configure(conf, env)
|
||||
|
||||
def get_module_sconscripts(modules):
|
||||
sconscripts = []
|
||||
for m in modules:
|
||||
module_dir_path = __get_src_relative_path(os.path.join(os.path.dirname(m.__file__)))
|
||||
sconscripts.append(os.path.join(module_dir_path, 'SConscript'))
|
||||
return sconscripts
|
||||
|
||||
def __get_src_relative_path(path):
|
||||
"""Return a path relative to ./src.
|
||||
|
||||
The src directory is important because of its relationship to BUILD_DIR,
|
||||
established in the SConstruct file. For variant directories to work properly
|
||||
in SCons, paths relative to the src or BUILD_DIR must often be generated.
|
||||
"""
|
||||
src_dir = os.path.abspath('src')
|
||||
path = os.path.abspath(os.path.normpath(path))
|
||||
if not path.startswith(src_dir):
|
||||
raise ValueError('Path "%s" is not relative to the src directory "%s"' % (path, src_dir))
|
||||
result = path[len(src_dir) + 1:]
|
||||
return result
|
||||
|
||||
def __get_module_path(module_frame_depth):
|
||||
"""Return the path to the MongoDB module whose build.py is executing "module_frame_depth" frames
|
||||
above this function, relative to the "src" directory.
|
||||
"""
|
||||
module_filename = inspect.stack()[module_frame_depth + 1][1]
|
||||
return os.path.dirname(__get_src_relative_path(module_filename))
|
||||
|
||||
def __get_module_src_path(module_frame_depth):
|
||||
"""Return the path relative to the SConstruct file of the MongoDB module's source tree.
|
||||
|
||||
module_frame_depth is the number of frames above the current one in which one can find a
|
||||
function from the MongoDB module's build.py function.
|
||||
"""
|
||||
return os.path.join('src', __get_module_path(module_frame_depth + 1))
|
||||
|
||||
def __get_module_build_path(module_frame_depth):
|
||||
"""Return the path relative to the SConstruct file of the MongoDB module's build tree.
|
||||
|
||||
module_frame_depth is the number of frames above the current one in which one can find a
|
||||
function from the MongoDB module's build.py function.
|
||||
"""
|
||||
return os.path.join('$BUILD_DIR', __get_module_path(module_frame_depth + 1))
|
||||
|
||||
def get_current_module_src_path():
|
||||
"""Return the path relative to the SConstruct file of the current MongoDB module's source tree.
|
||||
|
||||
May only meaningfully be called from within build.py
|
||||
"""
|
||||
return __get_module_src_path(1)
|
||||
|
||||
def get_current_module_build_path():
|
||||
"""Return the path relative to the SConstruct file of the current MongoDB module's build tree.
|
||||
|
||||
May only meaningfully be called from within build.py
|
||||
"""
|
||||
|
||||
return __get_module_build_path(1)
|
||||
|
||||
def get_current_module_libdep_name(libdep_rel_path):
|
||||
"""Return a $BUILD_DIR relative path to a "libdep_rel_path", where "libdep_rel_path"
|
||||
is specified relative to the MongoDB module's build.py file.
|
||||
|
||||
May only meaningfully be called from within build.py
|
||||
"""
|
||||
return os.path.join(__get_module_build_path(1), libdep_rel_path)
|
||||
|
|
@ -376,7 +376,7 @@ def make_deb(distro, arch, spec, srcdir):
|
|||
oldcwd=os.getcwd()
|
||||
try:
|
||||
os.chdir(sdir)
|
||||
sysassert(["dpkg-buildpackage", "-a"+distro_arch, "-k Richard Kreuter <richard@10gen.com>"])
|
||||
sysassert(["dpkg-buildpackage", "-a"+distro_arch])
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
r=distro.repodir(arch)
|
||||
|
|
@ -576,7 +576,7 @@ Description: An object/document-oriented database
|
|||
"""
|
||||
s=re.sub("@@PACKAGE_BASENAME@@", "mongodb%s" % spec.suffix(), s)
|
||||
conflict_suffixes=["", "-stable", "-unstable", "-nightly", "-10gen", "-10gen-unstable"]
|
||||
conflict_suffixes = [suff for suff in conflict_suffixes if suff != spec.suffix()]
|
||||
conflict_suffixes.remove(spec.suffix())
|
||||
s=re.sub("@@PACKAGE_CONFLICTS@@", ", ".join(["mongodb"+suffix for suffix in conflict_suffixes]), s)
|
||||
f=open(path, 'w')
|
||||
try:
|
||||
|
|
@ -686,8 +686,7 @@ binary-arch: build install
|
|||
#\tdh_installinfo
|
||||
\tdh_installman
|
||||
\tdh_link
|
||||
# Appears to be broken on Ubuntu 11.10...?
|
||||
#\tdh_strip
|
||||
\tdh_strip
|
||||
\tdh_compress
|
||||
\tdh_fixperms
|
||||
\tdh_installdeb
|
||||
|
|
@ -902,10 +901,8 @@ fi
|
|||
%{_bindir}/mongo
|
||||
%{_bindir}/mongodump
|
||||
%{_bindir}/mongoexport
|
||||
#@@VERSION!=2.1.0@@%{_bindir}/mongofiles
|
||||
%{_bindir}/mongofiles
|
||||
%{_bindir}/mongoimport
|
||||
#@@VERSION>=2.1.0@@%{_bindir}/mongooplog
|
||||
#@@VERSION>=2.1.0@@%{_bindir}/mongoperf
|
||||
%{_bindir}/mongorestore
|
||||
#@@VERSION>1.9@@%{_bindir}/mongotop
|
||||
%{_bindir}/mongostat
|
||||
|
|
@ -924,9 +921,6 @@ fi
|
|||
%{_mandir}/man1/mongostat.1*
|
||||
# FIXME: uncomment when mongosniff is back in the package
|
||||
#%{_mandir}/man1/mongosniff.1*
|
||||
#@@VERSION>2.4.0@@%{_mandir}/man1/mongotop.1*
|
||||
#@@VERSION>2.4.0@@%{_mandir}/man1/mongoperf.1*
|
||||
#@@VERSION>2.4.0@@%{_mandir}/man1/mongooplog.1*
|
||||
|
||||
%files server
|
||||
%defattr(-,root,root,-)
|
||||
|
|
@ -958,9 +952,9 @@ fi
|
|||
s=re.sub("@@PACKAGE_REVISION@@", str(int(spec.param("revision"))+1) if spec.param("revision") else "1", s)
|
||||
s=re.sub("@@BINARYDIR@@", BINARYDIR, s)
|
||||
conflict_suffixes=["", "-10gen", "-10gen-unstable"]
|
||||
conflict_suffixes = [suff for suff in conflict_suffixes if suff != spec.suffix()]
|
||||
conflict_suffixes.remove(suffix)
|
||||
s=re.sub("@@PACKAGE_CONFLICTS@@", ", ".join(["mongo"+_ for _ in conflict_suffixes]), s)
|
||||
if suffix.endswith("-10gen"):
|
||||
if suffix == "-10gen":
|
||||
s=re.sub("@@PACKAGE_PROVIDES@@", "mongo-stable", s)
|
||||
s=re.sub("@@PACKAGE_OBSOLETES@@", "mongo-stable", s)
|
||||
elif suffix == "-10gen-unstable":
|
||||
|
|
@ -971,25 +965,9 @@ fi
|
|||
|
||||
lines=[]
|
||||
for line in s.split("\n"):
|
||||
m = re.search("@@VERSION(>|>=|!=)(\d.*)@@(.*)", line)
|
||||
if m:
|
||||
op = m.group(1)
|
||||
ver = m.group(2)
|
||||
fn = m.group(3)
|
||||
if op == '>':
|
||||
if spec.version_better_than(ver):
|
||||
lines.append(fn)
|
||||
elif op == '>=':
|
||||
if spec.version() == ver or spec.version_better_than(ver):
|
||||
lines.append(fn)
|
||||
elif op == '!=':
|
||||
if spec.version() != ver:
|
||||
lines.append(fn)
|
||||
else:
|
||||
# Since we're inventing our own template system for RPM
|
||||
# specfiles here, we oughtn't use template syntax we don't
|
||||
# support.
|
||||
raise Exception("BUG: probable bug in packager script: %s, %s, %s" % (m.group(1), m.group(2), m.group(3)))
|
||||
m = re.search("@@VERSION>(.*)@@(.*)", line)
|
||||
if m and spec.version_better_than(m.group(1)):
|
||||
lines.append(m.group(2))
|
||||
else:
|
||||
lines.append(line)
|
||||
s="\n".join(lines)
|
||||
|
|
|
|||
|
|
@ -16,30 +16,11 @@ import subprocess
|
|||
|
||||
def check_dir( bucket , prefix , todel ):
|
||||
|
||||
deleteAll = False
|
||||
|
||||
for ( key , modify , etag , size ) in bucket.listdir( prefix=prefix ):
|
||||
if key.find( todel ) < 0:
|
||||
continue
|
||||
print( key )
|
||||
|
||||
if not deleteAll:
|
||||
|
||||
val = raw_input( "Delete (Y,y,n,N):" ).strip()
|
||||
|
||||
if val == "n":
|
||||
print( "skipping this one" )
|
||||
continue
|
||||
elif val == "N":
|
||||
break
|
||||
|
||||
if val == "Y":
|
||||
val = "y"
|
||||
deleteAll = True
|
||||
|
||||
if val != "y":
|
||||
raise Exception( "invalid input :(" )
|
||||
|
||||
time.sleep( 2 )
|
||||
bucket.delete( key )
|
||||
|
||||
def clean( todel ):
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ def check_dir( bucket , prefix ):
|
|||
zips = {}
|
||||
md5s = {}
|
||||
for ( key , modify , etag , size ) in bucket.listdir( prefix=prefix ):
|
||||
if key.endswith( ".tgz" ) or key.endswith( ".zip" ) or key.endswith( ".tar.gz" ):
|
||||
if key.endswith( ".tgz" ) or key.endswith( ".zip" ):
|
||||
zips[key] = etag.replace( '"' , '' )
|
||||
elif key.endswith( ".md5" ):
|
||||
md5s[key] = True
|
||||
|
|
@ -40,7 +40,7 @@ def run():
|
|||
|
||||
bucket = simples3.S3Bucket( settings.bucket , settings.id , settings.key )
|
||||
|
||||
for x in [ "osx" , "linux" , "win32" , "sunos5" , "src" ]:
|
||||
for x in [ "osx" , "linux" , "win32" , "sunos5" ]:
|
||||
check_dir( bucket , x )
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,181 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
import urllib2
|
||||
import subprocess
|
||||
import tarfile
|
||||
import shutil
|
||||
import errno
|
||||
# To ensure it exists on the system
|
||||
import gzip
|
||||
|
||||
#
|
||||
# Useful script for installing multiple versions of MongoDB on a machine
|
||||
# Only really tested/works on Linux.
|
||||
#
|
||||
|
||||
class MultiVersionDownloader :
|
||||
|
||||
def __init__(self, install_dir, link_dir, platform):
|
||||
self.install_dir = install_dir
|
||||
self.link_dir = link_dir
|
||||
match = re.compile("(.*)\/(.*)").match(platform)
|
||||
self.platform = match.group(1)
|
||||
self.arch = match.group(2)
|
||||
self.links = self.download_links()
|
||||
|
||||
def download_links(self):
|
||||
href = "http://dl.mongodb.org/dl/%s/%s" \
|
||||
% (self.platform.lower(), self.arch)
|
||||
|
||||
html = urllib2.urlopen(href).read()
|
||||
|
||||
links = {}
|
||||
for line in html.split():
|
||||
match = re.compile("http:\/\/downloads\.mongodb\.org\/%s/mongodb-%s-%s-([^\"]*)\.tgz" \
|
||||
% (self.platform.lower(), self.platform.lower(), self.arch)).search(line)
|
||||
|
||||
if match == None: continue
|
||||
|
||||
link = match.group(0)
|
||||
version = match.group(1)
|
||||
links[version] = link
|
||||
|
||||
return links
|
||||
|
||||
def download_version(self, version):
|
||||
|
||||
try:
|
||||
os.makedirs(self.install_dir)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST and os.path.isdir(self.install_dir):
|
||||
pass
|
||||
else: raise
|
||||
|
||||
urls = []
|
||||
for link_version, link_url in self.links.iteritems():
|
||||
if link_version.startswith(version):
|
||||
# If we have a "-" in our version, exact match only
|
||||
if version.find("-") >= 0:
|
||||
if link_version != version: continue
|
||||
elif link_version.find("-") >= 0:
|
||||
continue
|
||||
|
||||
urls.append((link_version, link_url))
|
||||
|
||||
if len(urls) == 0:
|
||||
raise Exception("Cannot find a link for version %s, versions %s found." \
|
||||
% (version, self.links))
|
||||
|
||||
urls.sort()
|
||||
full_version = urls[-1][0]
|
||||
url = urls[-1][1]
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
temp_file = tempfile.mktemp(suffix=".tgz")
|
||||
|
||||
data = urllib2.urlopen(url)
|
||||
|
||||
print "Downloading data for version %s (%s)..." % (version, full_version)
|
||||
|
||||
with open(temp_file, 'wb') as f:
|
||||
f.write(data.read())
|
||||
print "Uncompressing data for version %s (%s)..." % (version, full_version)
|
||||
|
||||
# Can't use cool with syntax b/c of python 2.6
|
||||
tf = tarfile.open(temp_file, 'r:gz')
|
||||
|
||||
try:
|
||||
tf.extractall(path=temp_dir)
|
||||
except:
|
||||
tf.close()
|
||||
raise
|
||||
|
||||
tf.close()
|
||||
|
||||
extract_dir = os.listdir(temp_dir)[0]
|
||||
temp_install_dir = os.path.join(temp_dir, extract_dir)
|
||||
|
||||
shutil.move(temp_install_dir, self.install_dir)
|
||||
|
||||
shutil.rmtree(temp_dir)
|
||||
os.remove(temp_file)
|
||||
|
||||
self.symlink_version(version, os.path.abspath(os.path.join(self.install_dir, extract_dir)))
|
||||
|
||||
|
||||
def symlink_version(self, version, installed_dir):
|
||||
|
||||
try:
|
||||
os.makedirs(self.link_dir)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST and os.path.isdir(self.link_dir):
|
||||
pass
|
||||
else: raise
|
||||
|
||||
for executable in os.listdir(os.path.join(installed_dir, "bin")):
|
||||
|
||||
link_name = "%s-%s" % (executable, version)
|
||||
|
||||
os.symlink(os.path.join(installed_dir, "bin", executable),\
|
||||
os.path.join(self.link_dir, link_name))
|
||||
|
||||
|
||||
CL_HELP_MESSAGE = \
|
||||
"""
|
||||
Downloads and installs particular mongodb versions into an install directory and symlinks the binaries with versions to
|
||||
another directory.
|
||||
|
||||
Usage: install_multiversion_mongodb.sh INSTALL_DIR LINK_DIR PLATFORM_AND_ARCH VERSION1 [VERSION2 VERSION3 ...]
|
||||
|
||||
Ex: install_multiversion_mongodb.sh ./install ./link "Linux/x86_64" "2.0.6" "2.0.3-rc0" "2.0" "2.2" "2.3"
|
||||
|
||||
If "rc" is included in the version name, we'll use the exact rc, otherwise we'll pull the highest non-rc
|
||||
version compatible with the version specified.
|
||||
"""
|
||||
|
||||
def parse_cl_args(args):
|
||||
|
||||
def raise_exception(msg):
|
||||
print CL_HELP_MESSAGE
|
||||
raise Exception(msg)
|
||||
|
||||
if len(args) == 0: raise_exception("Missing INSTALL_DIR")
|
||||
|
||||
install_dir = args[0]
|
||||
|
||||
args = args[1:]
|
||||
if len(args) == 0: raise_exception("Missing LINK_DIR")
|
||||
|
||||
link_dir = args[0]
|
||||
|
||||
args = args[1:]
|
||||
if len(args) == 0: raise_exception("Missing PLATFORM_AND_ARCH")
|
||||
|
||||
platform = args[0]
|
||||
|
||||
args = args[1:]
|
||||
if re.compile(".*\/.*").match(platform) == None:
|
||||
raise_exception("PLATFORM_AND_ARCH isn't of the correct format")
|
||||
|
||||
if len(args) == 0: raise_exception("Missing VERSION1")
|
||||
|
||||
versions = args
|
||||
|
||||
return (MultiVersionDownloader(install_dir, link_dir, platform), versions)
|
||||
|
||||
def main():
|
||||
|
||||
downloader, versions = parse_cl_args(sys.argv[1:])
|
||||
|
||||
for version in versions:
|
||||
downloader.download_version(version)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
@ -33,16 +33,15 @@
|
|||
# off all mongods on a box, which means you can't run two smoke.py
|
||||
# jobs on the same host at once. So something's gotta change.
|
||||
|
||||
from datetime import datetime
|
||||
from __future__ import with_statement
|
||||
|
||||
import glob
|
||||
from optparse import OptionParser
|
||||
import os
|
||||
import parser
|
||||
import re
|
||||
import shutil
|
||||
import shlex
|
||||
import socket
|
||||
import stat
|
||||
from subprocess import (Popen,
|
||||
PIPE,
|
||||
call)
|
||||
|
|
@ -50,44 +49,20 @@ import sys
|
|||
import time
|
||||
|
||||
from pymongo import Connection
|
||||
from pymongo.errors import OperationFailure
|
||||
|
||||
import utils
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
try:
|
||||
from hashlib import md5 # new in 2.5
|
||||
except ImportError:
|
||||
from md5 import md5 # deprecated in 2.5
|
||||
|
||||
try:
|
||||
import json
|
||||
except:
|
||||
try:
|
||||
import simplejson as json
|
||||
except:
|
||||
json = None
|
||||
|
||||
|
||||
# TODO clean this up so we don't need globals...
|
||||
mongo_repo = os.getcwd() #'./'
|
||||
failfile = os.path.join(mongo_repo, 'failfile.smoke')
|
||||
test_path = None
|
||||
mongod_executable = None
|
||||
mongod_port = None
|
||||
shell_executable = None
|
||||
continue_on_failure = None
|
||||
file_of_commands_mode = False
|
||||
start_mongod = True
|
||||
|
||||
tests = []
|
||||
winners = []
|
||||
losers = {}
|
||||
fails = [] # like losers but in format of tests
|
||||
|
||||
# For replication hash checking
|
||||
replicated_collections = []
|
||||
|
|
@ -97,9 +72,6 @@ screwy_in_slave = {}
|
|||
|
||||
smoke_db_prefix = ''
|
||||
small_oplog = False
|
||||
small_oplog_rs = False
|
||||
|
||||
all_test_results = []
|
||||
|
||||
# This class just implements the with statement API, for a sneaky
|
||||
# purpose below.
|
||||
|
|
@ -109,24 +81,10 @@ class Nothing(object):
|
|||
def __exit__(self, type, value, traceback):
|
||||
return not isinstance(value, Exception)
|
||||
|
||||
def buildlogger(cmd, is_global=False):
|
||||
# if the environment variable MONGO_USE_BUILDLOGGER
|
||||
# is set to 'true', then wrap the command with a call
|
||||
# to buildlogger.py, which sends output to the buidlogger
|
||||
# machine; otherwise, return as usual.
|
||||
if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
|
||||
if is_global:
|
||||
return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
|
||||
else:
|
||||
return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
|
||||
return cmd
|
||||
|
||||
|
||||
class mongod(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
self.proc = None
|
||||
self.auth = False
|
||||
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
|
|
@ -164,15 +122,6 @@ class mongod(object):
|
|||
print >> sys.stderr, "timeout starting mongod"
|
||||
return False
|
||||
|
||||
def setup_admin_user(self, port=mongod_port):
|
||||
try:
|
||||
Connection( "localhost" , int(port) ).admin.add_user("admin","password")
|
||||
except OperationFailure, e:
|
||||
if e.message == 'need to login':
|
||||
pass # SERVER-4225
|
||||
else:
|
||||
raise e
|
||||
|
||||
def start(self):
|
||||
global mongod_port
|
||||
global mongod
|
||||
|
|
@ -190,45 +139,25 @@ class mongod(object):
|
|||
self.slave = True
|
||||
if os.path.exists(dir_name):
|
||||
if 'slave' in self.kwargs:
|
||||
argv = [utils.find_python(), "buildscripts/cleanbb.py", '--nokill', dir_name]
|
||||
argv = ["python", "buildscripts/cleanbb.py", '--nokill', dir_name]
|
||||
else:
|
||||
argv = [utils.find_python(), "buildscripts/cleanbb.py", dir_name]
|
||||
argv = ["python", "buildscripts/cleanbb.py", dir_name]
|
||||
call(argv)
|
||||
utils.ensureDir(dir_name)
|
||||
argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
|
||||
# This should always be set for tests
|
||||
argv += ['--setParameter', 'enableTestCommands=1']
|
||||
if self.kwargs.get('small_oplog'):
|
||||
argv += ["--master", "--oplogSize", "511"]
|
||||
if self.kwargs.get('small_oplog_rs'):
|
||||
argv += ["--replSet", "foo", "--oplogSize", "511"]
|
||||
argv += ["--master", "--oplogSize", "256"]
|
||||
if self.slave:
|
||||
argv += ['--slave', '--source', 'localhost:' + str(srcport)]
|
||||
if self.kwargs.get('no_journal'):
|
||||
argv += ['--nojournal']
|
||||
if self.kwargs.get('no_preallocj'):
|
||||
argv += ['--nopreallocj']
|
||||
if self.kwargs.get('auth'):
|
||||
argv += ['--auth']
|
||||
authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
|
||||
if authMechanism != 'MONGODB-CR':
|
||||
argv.append('--setParameter=authenticationMechanisms=' + authMechanism)
|
||||
self.auth = True
|
||||
if self.kwargs.get('use_ssl'):
|
||||
argv += ['--sslOnNormalPorts',
|
||||
'--sslPEMKeyFile', 'jstests/libs/server.pem',
|
||||
'--sslCAFile', 'jstests/libs/ca.pem',
|
||||
'--sslWeakCertificateValidation']
|
||||
|
||||
print "running " + " ".join(argv)
|
||||
self.proc = self._start(buildlogger(argv, is_global=True))
|
||||
|
||||
self.proc = Popen(argv)
|
||||
if not self.did_mongod_start(self.port):
|
||||
raise Exception("Failed to start mongod")
|
||||
|
||||
if self.auth:
|
||||
self.setup_admin_user(self.port)
|
||||
|
||||
if self.slave:
|
||||
local = Connection(port=self.port, slave_okay=True).local
|
||||
synced = False
|
||||
|
|
@ -237,53 +166,20 @@ class mongod(object):
|
|||
for source in local.sources.find(fields=["syncedTo"]):
|
||||
synced = synced and "syncedTo" in source and source["syncedTo"]
|
||||
|
||||
def _start(self, argv):
|
||||
"""In most cases, just call subprocess.Popen(). On windows,
|
||||
add the started process to a new Job Object, so that any
|
||||
child processes of this process can be killed with a single
|
||||
call to TerminateJobObject (see self.stop()).
|
||||
"""
|
||||
proc = Popen(argv)
|
||||
|
||||
if os.sys.platform == "win32":
|
||||
# Create a job object with the "kill on job close"
|
||||
# flag; this is inherited by child processes (ie
|
||||
# the mongod started on our behalf by buildlogger)
|
||||
# and lets us terminate the whole tree of processes
|
||||
# rather than orphaning the mongod.
|
||||
import win32job
|
||||
|
||||
self.job_object = win32job.CreateJobObject(None, '')
|
||||
|
||||
job_info = win32job.QueryInformationJobObject(
|
||||
self.job_object, win32job.JobObjectExtendedLimitInformation)
|
||||
job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
||||
win32job.SetInformationJobObject(
|
||||
self.job_object,
|
||||
win32job.JobObjectExtendedLimitInformation,
|
||||
job_info)
|
||||
|
||||
win32job.AssignProcessToJobObject(self.job_object, proc._handle)
|
||||
|
||||
return proc
|
||||
|
||||
def stop(self):
|
||||
if not self.proc:
|
||||
print >> sys.stderr, "probable bug: self.proc unset in stop()"
|
||||
return
|
||||
try:
|
||||
if os.sys.platform == "win32":
|
||||
import win32job
|
||||
win32job.TerminateJobObject(self.job_object, -1)
|
||||
import time
|
||||
# Windows doesn't seem to kill the process immediately, so give it some time to die
|
||||
time.sleep(5)
|
||||
else:
|
||||
# This function not available in Python 2.5
|
||||
self.proc.terminate()
|
||||
# This function not available in Python 2.5
|
||||
self.proc.terminate()
|
||||
except AttributeError:
|
||||
from os import kill
|
||||
kill(self.proc.pid, 15)
|
||||
if os.sys.platform == "win32":
|
||||
import win32process
|
||||
win32process.TerminateProcess(self.proc._handle, -1)
|
||||
else:
|
||||
from os import kill
|
||||
kill(self.proc.pid, 15)
|
||||
self.proc.wait()
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
|
|
@ -345,53 +241,12 @@ def check_db_hashes(master, slave):
|
|||
lost_in_master.append(db)
|
||||
|
||||
|
||||
def ternary( b , l="true", r="false" ):
|
||||
if b:
|
||||
return l
|
||||
return r
|
||||
|
||||
|
||||
# Blech.
|
||||
def skipTest(path):
|
||||
basename = os.path.basename(path)
|
||||
parentPath = os.path.dirname(path)
|
||||
parentDir = os.path.basename(parentPath)
|
||||
if small_oplog: # For tests running in parallel
|
||||
if basename in ["cursor8.js", "indexh.js", "dropdb.js", "connections_opened.js", "opcounters.js"]:
|
||||
if small_oplog:
|
||||
if os.path.basename(path) in ["cursor8.js", "indexh.js", "dropdb.js"]:
|
||||
return True
|
||||
if os.sys.platform == "sunos5":
|
||||
if basename == "geo_update_btree.js":
|
||||
return True
|
||||
if auth or keyFile: # For tests running with auth
|
||||
# Skip any tests that run with auth explicitly
|
||||
if parentDir == "auth" or "auth" in basename:
|
||||
return True
|
||||
if parentPath == mongo_repo: # Skip client tests
|
||||
return True
|
||||
if parentDir == "tool": # SERVER-6368
|
||||
return True
|
||||
if parentDir == "dur": # SERVER-7317
|
||||
return True
|
||||
if parentDir == "disk": # SERVER-7356
|
||||
return True
|
||||
|
||||
authTestsToSkip = [("sharding", "gle_with_conf_servers.js"), # SERVER-6972
|
||||
("sharding", "read_pref.js"), # SERVER-6972
|
||||
("sharding", "read_pref_cmd.js"), # SERVER-6972
|
||||
("sharding", "read_pref_rs_client.js"), # SERVER-6972
|
||||
("sharding", "sync_conn_cmd.js"), #SERVER-6327
|
||||
("sharding", "sync3.js"), # SERVER-6388 for this and those below
|
||||
("sharding", "sync6.js"),
|
||||
("sharding", "parallel.js"),
|
||||
("jstests", "bench_test1.js"),
|
||||
("jstests", "bench_test2.js"),
|
||||
("jstests", "bench_test3.js"),
|
||||
("jstests", "drop2.js") # SERVER-8589
|
||||
]
|
||||
|
||||
if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def runTest(test):
|
||||
|
|
@ -404,117 +259,55 @@ def runTest(test):
|
|||
if skipTest(path):
|
||||
print "skipping " + path
|
||||
return
|
||||
if file_of_commands_mode:
|
||||
# smoke.py was invoked like "--mode files --from-file foo",
|
||||
# so don't try to interpret the test path too much
|
||||
if os.sys.platform == "win32":
|
||||
argv = [path]
|
||||
else:
|
||||
argv = shlex.split(path)
|
||||
path = argv[0]
|
||||
# if the command is a python script, use the script name
|
||||
if os.path.basename(path) in ('python', 'python.exe'):
|
||||
path = argv[1]
|
||||
elif ext == ".js":
|
||||
argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
|
||||
if ext == ".js":
|
||||
argv = [shell_executable, "--port", mongod_port]
|
||||
if not usedb:
|
||||
argv += ["--nodb"]
|
||||
if small_oplog or small_oplog_rs:
|
||||
if small_oplog:
|
||||
argv += ["--eval", 'testingReplication = true;']
|
||||
if use_ssl:
|
||||
argv += ["--ssl",
|
||||
"--sslPEMKeyFile", "jstests/libs/client.pem",
|
||||
"--sslCAFile", "jstests/libs/ca.pem"]
|
||||
argv += [path]
|
||||
elif ext in ["", ".exe"]:
|
||||
# Blech.
|
||||
if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]:
|
||||
argv = [path]
|
||||
# more blech
|
||||
elif os.path.basename(path) in ['mongos', 'mongos.exe']:
|
||||
elif os.path.basename(path) == 'mongos':
|
||||
argv = [path, "--test"]
|
||||
else:
|
||||
argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
|
||||
"--port", mongod_port]
|
||||
else:
|
||||
raise Bug("fell off in extension case: %s" % path)
|
||||
|
||||
if keyFile:
|
||||
f = open(keyFile, 'r')
|
||||
keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
|
||||
f.close()
|
||||
os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
|
||||
else:
|
||||
keyFileData = None
|
||||
|
||||
|
||||
# sys.stdout.write() is more atomic than print, so using it prevents
|
||||
# lines being interrupted by, e.g., child processes
|
||||
sys.stdout.write(" *******************************************\n")
|
||||
sys.stdout.write(" Test : %s ...\n" % os.path.basename(path))
|
||||
sys.stdout.flush()
|
||||
|
||||
raise Bug("fell off in extenstion case: %s" % path)
|
||||
sys.stderr.write( "starting test : %s \n" % os.path.basename(path) )
|
||||
sys.stderr.flush()
|
||||
print " *******************************************"
|
||||
print " Test : " + os.path.basename(path) + " ..."
|
||||
t1 = time.time()
|
||||
# FIXME: we don't handle the case where the subprocess
|
||||
# hangs... that's bad.
|
||||
if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
|
||||
evalString = 'TestData = new Object();' + \
|
||||
'TestData.testPath = "' + path + '";' + \
|
||||
'TestData.testFile = "' + os.path.basename( path ) + '";' + \
|
||||
'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
|
||||
'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
|
||||
'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
|
||||
'TestData.auth = ' + ternary( auth ) + ";" + \
|
||||
'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
|
||||
'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";"
|
||||
if os.sys.platform == "win32":
|
||||
# double quotes in the evalString on windows; this
|
||||
# prevents the backslashes from being removed when
|
||||
# the shell (i.e. bash) evaluates this string. yuck.
|
||||
evalString = evalString.replace('\\', '\\\\')
|
||||
|
||||
if auth and usedb:
|
||||
evalString += 'jsTest.authenticate(db.getMongo());'
|
||||
|
||||
argv = argv + [ '--eval', evalString]
|
||||
if argv[0].endswith( 'mongo' ) and not '--eval' in argv :
|
||||
argv = argv + [ '--eval', 'TestData = new Object();' +
|
||||
'TestData.testPath = "' + path + '";' +
|
||||
'TestData.testFile = "' + os.path.basename( path ) + '";' +
|
||||
'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' +
|
||||
'TestData.noJournal = ' + ( 'true' if no_journal else 'false' ) + ";" +
|
||||
'TestData.noJournalPrealloc = ' + ( 'true' if no_preallocj else 'false' ) + ";" ]
|
||||
|
||||
if argv[0].endswith( 'test' ) and no_preallocj :
|
||||
argv = argv + [ '--nopreallocj' ]
|
||||
|
||||
|
||||
sys.stdout.write(" Command : %s\n" % ' '.join(argv))
|
||||
sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
|
||||
sys.stdout.flush()
|
||||
|
||||
os.environ['MONGO_TEST_FILENAME'] = os.path.basename(path)
|
||||
t1 = time.time()
|
||||
r = call(buildlogger(argv), cwd=test_path)
|
||||
print argv
|
||||
r = call(argv, cwd=test_path)
|
||||
t2 = time.time()
|
||||
del os.environ['MONGO_TEST_FILENAME']
|
||||
|
||||
timediff = t2 - t1
|
||||
# timediff is seconds by default
|
||||
scale = 1
|
||||
suffix = "seconds"
|
||||
# if timediff is less than 10 seconds use ms
|
||||
if timediff < 10:
|
||||
scale = 1000
|
||||
suffix = "ms"
|
||||
# if timediff is more than 60 seconds use minutes
|
||||
elif timediff > 60:
|
||||
scale = 1.0 / 60.0
|
||||
suffix = "minutes"
|
||||
sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
|
||||
sys.stdout.flush()
|
||||
|
||||
print " " + str((t2 - t1) * 1000) + "ms"
|
||||
if r != 0:
|
||||
raise TestExitFailure(path, r)
|
||||
|
||||
if start_mongod:
|
||||
try:
|
||||
c = Connection(host="127.0.0.1", port=int(mongod_port), ssl=use_ssl)
|
||||
except Exception,e:
|
||||
print "Exception from pymongo: ", e
|
||||
raise TestServerFailure(path)
|
||||
try:
|
||||
c = Connection( "127.0.0.1" , int(mongod_port) )
|
||||
except Exception,e:
|
||||
raise TestServerFailure(path)
|
||||
|
||||
print ""
|
||||
|
||||
|
|
@ -524,81 +317,18 @@ def run_tests(tests):
|
|||
# dbpath, etc., and so long as we shut ours down properly,
|
||||
# starting this mongod shouldn't break anything, though.)
|
||||
|
||||
# The reason we want to use "with" is so that we get __exit__ semantics
|
||||
# but "with" is only supported on Python 2.5+
|
||||
# The reason we use with is so that we get __exit__ semantics
|
||||
|
||||
if start_mongod:
|
||||
master = mongod(small_oplog_rs=small_oplog_rs,
|
||||
small_oplog=small_oplog,
|
||||
no_journal=no_journal,
|
||||
no_preallocj=no_preallocj,
|
||||
auth=auth,
|
||||
authMechanism=authMechanism,
|
||||
use_ssl=use_ssl).__enter__()
|
||||
else:
|
||||
master = Nothing()
|
||||
try:
|
||||
if small_oplog:
|
||||
slave = mongod(slave=True).__enter__()
|
||||
elif small_oplog_rs:
|
||||
slave = mongod(slave=True,
|
||||
small_oplog_rs=small_oplog_rs,
|
||||
small_oplog=small_oplog,
|
||||
no_journal=no_journal,
|
||||
no_preallocj=no_preallocj,
|
||||
auth=auth,
|
||||
authMechanism=authMechanism,
|
||||
use_ssl=use_ssl).__enter__()
|
||||
primary = Connection(port=master.port, slave_okay=True);
|
||||
|
||||
primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
|
||||
{'_id': 0, 'host':'localhost:%s' % master.port},
|
||||
{'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
|
||||
|
||||
ismaster = False
|
||||
while not ismaster:
|
||||
result = primary.admin.command("ismaster");
|
||||
ismaster = result["ismaster"]
|
||||
time.sleep(1)
|
||||
else:
|
||||
slave = Nothing()
|
||||
|
||||
try:
|
||||
if small_oplog or small_oplog_rs:
|
||||
with mongod(small_oplog=small_oplog,no_journal=no_journal,no_preallocj=no_preallocj) as master:
|
||||
with mongod(slave=True) if small_oplog else Nothing() as slave:
|
||||
if small_oplog:
|
||||
master.wait_for_repl()
|
||||
|
||||
tests_run = 0
|
||||
for tests_run, test in enumerate(tests):
|
||||
test_result = { "test": test[0], "start": time.time() }
|
||||
for test in tests:
|
||||
try:
|
||||
fails.append(test)
|
||||
runTest(test)
|
||||
fails.pop()
|
||||
winners.append(test)
|
||||
|
||||
test_result["passed"] = True
|
||||
test_result["end"] = time.time()
|
||||
all_test_results.append( test_result )
|
||||
|
||||
if small_oplog or small_oplog_rs:
|
||||
master.wait_for_repl()
|
||||
elif test[1]: # reach inside test and see if "usedb" is true
|
||||
if (tests_run+1) % 20 == 0:
|
||||
# restart mongo every 20 times, for our 32-bit machines
|
||||
master.__exit__(None, None, None)
|
||||
master = mongod(small_oplog_rs=small_oplog_rs,
|
||||
small_oplog=small_oplog,
|
||||
no_journal=no_journal,
|
||||
no_preallocj=no_preallocj,
|
||||
auth=auth,
|
||||
authMechanism=authMechanism,
|
||||
use_ssl=use_ssl).__enter__()
|
||||
|
||||
except TestFailure, f:
|
||||
test_result["passed"] = False
|
||||
test_result["end"] = time.time()
|
||||
test_result["error"] = str(f)
|
||||
all_test_results.append( test_result )
|
||||
try:
|
||||
print f
|
||||
# Record the failing test and re-raise.
|
||||
|
|
@ -611,15 +341,12 @@ def run_tests(tests):
|
|||
return 1
|
||||
if isinstance(slave, mongod):
|
||||
check_db_hashes(master, slave)
|
||||
finally:
|
||||
slave.__exit__(None, None, None)
|
||||
finally:
|
||||
master.__exit__(None, None, None)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def report():
|
||||
print "%d tests succeeded" % len(winners)
|
||||
print "%d test%s succeeded" % (len(winners), '' if len(winners) == 1 else 's')
|
||||
num_missed = len(tests) - (len(winners) + len(losers.keys()))
|
||||
if num_missed:
|
||||
print "%d tests didn't get run" % num_missed
|
||||
|
|
@ -641,39 +368,18 @@ at the end of testing:""" % (src, dst)
|
|||
at the end of testing:"""
|
||||
for db in screwy_in_slave.keys():
|
||||
print "%s\t %s" % (db, screwy_in_slave[db])
|
||||
if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
|
||||
if small_oplog and not (lost_in_master or lost_in_slave or screwy_in_slave):
|
||||
print "replication ok for %d collections" % (len(replicated_collections))
|
||||
if losers or lost_in_slave or lost_in_master or screwy_in_slave:
|
||||
raise Exception("Test failures")
|
||||
|
||||
# Keys are the suite names (passed on the command line to smoke.py)
|
||||
# Values are pairs: (filenames, <start mongod before running tests>)
|
||||
suiteGlobalConfig = {"js": ("[!_]*.js", True),
|
||||
"quota": ("quota/*.js", True),
|
||||
"jsPerf": ("perf/*.js", True),
|
||||
"disk": ("disk/*.js", True),
|
||||
"jsSlowNightly": ("slowNightly/*.js", True),
|
||||
"jsSlowWeekly": ("slowWeekly/*.js", False),
|
||||
"parallel": ("parallel/*.js", True),
|
||||
"clone": ("clone/*.js", False),
|
||||
"repl": ("repl/*.js", False),
|
||||
"replSets": ("replsets/*.js", False),
|
||||
"dur": ("dur/*.js", False),
|
||||
"auth": ("auth/*.js", False),
|
||||
"sharding": ("sharding/*.js", False),
|
||||
"tool": ("tool/*.js", False),
|
||||
"aggregation": ("aggregation/*.js", True),
|
||||
"multiVersion": ("multiVersion/*.js", True),
|
||||
"failPoint": ("fail_point/*.js", False),
|
||||
"ssl": ("ssl/*.js", True)
|
||||
}
|
||||
|
||||
def expand_suites(suites,expandUseDB=True):
|
||||
def expand_suites(suites):
|
||||
globstr = None
|
||||
tests = []
|
||||
for suite in suites:
|
||||
if suite == 'all':
|
||||
return expand_suites(['test', 'perf', 'client', 'js', 'jsPerf', 'jsSlowNightly', 'jsSlowWeekly', 'clone', 'parallel', 'repl', 'auth', 'sharding', 'tool'],expandUseDB=expandUseDB)
|
||||
return expand_suites(['test', 'perf', 'client', 'js', 'jsPerf', 'jsSlowNightly', 'jsSlowWeekly', 'parallel', 'clone', 'parallel', 'repl', 'auth', 'sharding', 'tool'])
|
||||
if suite == 'test':
|
||||
if os.sys.platform == "win32":
|
||||
program = 'test.exe'
|
||||
|
|
@ -699,31 +405,31 @@ def expand_suites(suites,expandUseDB=True):
|
|||
program = 'mongos'
|
||||
tests += [(os.path.join(mongo_repo, program), False)]
|
||||
elif os.path.exists( suite ):
|
||||
usedb = True
|
||||
for name in suiteGlobalConfig:
|
||||
if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
|
||||
usedb = suiteGlobalConfig[name][1]
|
||||
break
|
||||
tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
|
||||
tests += [ ( os.path.join( mongo_repo , suite ) , True ) ]
|
||||
else:
|
||||
try:
|
||||
globstr, usedb = suiteGlobalConfig[suite]
|
||||
globstr, usedb = {"js": ("[!_]*.js", True),
|
||||
"quota": ("quota/*.js", True),
|
||||
"jsPerf": ("perf/*.js", True),
|
||||
"disk": ("disk/*.js", True),
|
||||
"jsSlowNightly": ("slowNightly/*.js", True),
|
||||
"jsSlowWeekly": ("slowWeekly/*.js", True),
|
||||
"parallel": ("parallel/*.js", True),
|
||||
"clone": ("clone/*.js", False),
|
||||
"repl": ("repl/*.js", False),
|
||||
"replSets": ("replsets/*.js", False),
|
||||
"dur": ("dur/*.js", False),
|
||||
"auth": ("auth/*.js", False),
|
||||
"sharding": ("sharding/*.js", False),
|
||||
"tool": ("tool/*.js", False)}[suite]
|
||||
except KeyError:
|
||||
raise Exception('unknown test suite %s' % suite)
|
||||
|
||||
if globstr:
|
||||
if usedb and not expandUseDB:
|
||||
tests += [ (suite,False) ]
|
||||
else:
|
||||
if globstr.endswith('.js'):
|
||||
loc = 'jstests/'
|
||||
else:
|
||||
loc = ''
|
||||
globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
|
||||
globstr = os.path.normpath(globstr)
|
||||
paths = glob.glob(globstr)
|
||||
paths.sort()
|
||||
tests += [(path, usedb) for path in paths]
|
||||
globstr = os.path.join(mongo_repo, (os.path.join(('jstests/' if globstr.endswith('.js') else ''), globstr)))
|
||||
paths = glob.glob(globstr)
|
||||
paths.sort()
|
||||
tests += [(path, usedb) for path in paths]
|
||||
|
||||
return tests
|
||||
|
||||
|
|
@ -732,15 +438,44 @@ def add_exe(e):
|
|||
e += ".exe"
|
||||
return e
|
||||
|
||||
def set_globals(options, tests):
|
||||
global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, small_oplog_rs
|
||||
global no_journal, no_preallocj, auth, authMechanism, keyFile, smoke_db_prefix, test_path, start_mongod
|
||||
global use_ssl
|
||||
global file_of_commands_mode
|
||||
start_mongod = options.start_mongod
|
||||
if hasattr(options, 'use_ssl'):
|
||||
use_ssl = options.use_ssl
|
||||
#Careful, this can be called multiple times
|
||||
def main():
|
||||
global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, smoke_db_prefix, test_path
|
||||
parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
|
||||
parser.add_option('--mode', dest='mode', default='suite',
|
||||
help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
|
||||
# Some of our tests hard-code pathnames e.g., to execute, so until
|
||||
# that changes we don't have the freedom to run from anyplace.
|
||||
# parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
|
||||
parser.add_option('--test-path', dest='test_path', default=None,
|
||||
help="Path to the test executables to run, "
|
||||
"currently only used for 'client' (%default)")
|
||||
parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
|
||||
help='Path to mongod to run (%default)')
|
||||
parser.add_option('--port', dest='mongod_port', default="32000",
|
||||
help='Port the mongod will bind to (%default)')
|
||||
parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
|
||||
help='Path to mongo, for .js test files (%default)')
|
||||
parser.add_option('--continue-on-failure', dest='continue_on_failure',
|
||||
action="store_true", default=False,
|
||||
help='If supplied, continue testing even after a test fails')
|
||||
parser.add_option('--from-file', dest='File',
|
||||
help="Run tests/suites named in FILE, one test per line, '-' means stdin")
|
||||
parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
|
||||
help="Prefix to use for the mongods' dbpaths ('%default')")
|
||||
parser.add_option('--small-oplog', dest='small_oplog', default=False,
|
||||
action="store_true",
|
||||
help='Run tests with master/slave replication & use a small oplog')
|
||||
parser.add_option('--nojournal', dest='no_journal', default=False,
|
||||
action="store_true",
|
||||
help='Do not turn on journaling in tests')
|
||||
parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
|
||||
action="store_true",
|
||||
help='Do not preallocate journal files in tests')
|
||||
global tests
|
||||
(options, tests) = parser.parse_args()
|
||||
|
||||
print tests
|
||||
|
||||
test_path = options.test_path
|
||||
|
||||
mongod_executable = add_exe(options.mongod_executable)
|
||||
|
|
@ -756,250 +491,31 @@ def set_globals(options, tests):
|
|||
continue_on_failure = options.continue_on_failure
|
||||
smoke_db_prefix = options.smoke_db_prefix
|
||||
small_oplog = options.small_oplog
|
||||
if hasattr(options, "small_oplog_rs"):
|
||||
small_oplog_rs = options.small_oplog_rs
|
||||
no_journal = options.no_journal
|
||||
no_preallocj = options.no_preallocj
|
||||
if options.mode == 'suite' and tests == ['client']:
|
||||
# The client suite doesn't work with authentication
|
||||
if options.auth:
|
||||
print "Not running client suite with auth even though --auth was provided"
|
||||
auth = False;
|
||||
keyFile = False;
|
||||
authMechanism = None
|
||||
else:
|
||||
auth = options.auth
|
||||
authMechanism = options.authMechanism
|
||||
keyFile = options.keyFile
|
||||
|
||||
if auth and not keyFile:
|
||||
# if only --auth was given to smoke.py, load the
|
||||
# default keyFile from jstests/libs/authTestsKey
|
||||
keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
|
||||
|
||||
# if smoke.py is running a list of commands read from a
|
||||
# file (or stdin) rather than running a suite of js tests
|
||||
file_of_commands_mode = options.File and options.mode == 'files'
|
||||
|
||||
def file_version():
|
||||
return md5(open(__file__, 'r').read()).hexdigest()
|
||||
|
||||
def clear_failfile():
|
||||
if os.path.exists(failfile):
|
||||
os.remove(failfile)
|
||||
|
||||
def run_old_fails():
|
||||
global tests
|
||||
|
||||
try:
|
||||
f = open(failfile, 'r')
|
||||
state = pickle.load(f)
|
||||
f.close()
|
||||
except Exception:
|
||||
try:
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
clear_failfile()
|
||||
return # This counts as passing so we will run all tests
|
||||
|
||||
if ('version' not in state or state['version'] != file_version()):
|
||||
print "warning: old version of failfile.smoke detected. skipping recent fails"
|
||||
clear_failfile()
|
||||
return
|
||||
|
||||
testsAndOptions = state['testsAndOptions']
|
||||
tests = [x[0] for x in testsAndOptions]
|
||||
passed = []
|
||||
try:
|
||||
for (i, (test, options)) in enumerate(testsAndOptions):
|
||||
# SERVER-5102: until we can figure out a better way to manage
|
||||
# dependencies of the --only-old-fails build phase, just skip
|
||||
# tests which we can't safely run at this point
|
||||
path, usedb = test
|
||||
|
||||
if not os.path.exists(path):
|
||||
passed.append(i)
|
||||
winners.append(test)
|
||||
continue
|
||||
|
||||
filename = os.path.basename(path)
|
||||
if filename in ('test', 'test.exe') or filename.endswith('.js'):
|
||||
set_globals(options, [filename])
|
||||
oldWinners = len(winners)
|
||||
run_tests([test])
|
||||
if len(winners) != oldWinners: # can't use return value due to continue_on_failure
|
||||
passed.append(i)
|
||||
finally:
|
||||
for offset, i in enumerate(passed):
|
||||
testsAndOptions.pop(i - offset)
|
||||
|
||||
if testsAndOptions:
|
||||
f = open(failfile, 'w')
|
||||
state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
|
||||
pickle.dump(state, f)
|
||||
else:
|
||||
clear_failfile()
|
||||
|
||||
report() # exits with failure code if there is an error
|
||||
|
||||
def add_to_failfile(tests, options):
|
||||
try:
|
||||
f = open(failfile, 'r')
|
||||
testsAndOptions = pickle.load(f)["testsAndOptions"]
|
||||
except Exception:
|
||||
testsAndOptions = []
|
||||
|
||||
for test in tests:
|
||||
if (test, options) not in testsAndOptions:
|
||||
testsAndOptions.append( (test, options) )
|
||||
|
||||
state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
|
||||
f = open(failfile, 'w')
|
||||
pickle.dump(state, f)
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, auth, keyFile, smoke_db_prefix, test_path
|
||||
parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
|
||||
parser.add_option('--mode', dest='mode', default='suite',
|
||||
help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
|
||||
# Some of our tests hard-code pathnames e.g., to execute, so until
|
||||
# that changes we don't have the freedom to run from anyplace.
|
||||
# parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
|
||||
parser.add_option('--test-path', dest='test_path', default=None,
|
||||
help="Path to the test executables to run, "
|
||||
"currently only used for 'client' (%default)")
|
||||
parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
|
||||
help='Path to mongod to run (%default)')
|
||||
parser.add_option('--port', dest='mongod_port', default="27999",
|
||||
help='Port the mongod will bind to (%default)')
|
||||
parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
|
||||
help='Path to mongo, for .js test files (%default)')
|
||||
parser.add_option('--continue-on-failure', dest='continue_on_failure',
|
||||
action="store_true", default=False,
|
||||
help='If supplied, continue testing even after a test fails')
|
||||
parser.add_option('--from-file', dest='File',
|
||||
help="Run tests/suites named in FILE, one test per line, '-' means stdin")
|
||||
parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
|
||||
help="Prefix to use for the mongods' dbpaths ('%default')")
|
||||
parser.add_option('--small-oplog', dest='small_oplog', default=False,
|
||||
action="store_true",
|
||||
help='Run tests with master/slave replication & use a small oplog')
|
||||
parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
|
||||
action="store_true",
|
||||
help='Run tests with replica set replication & use a small oplog')
|
||||
parser.add_option('--nojournal', dest='no_journal', default=False,
|
||||
action="store_true",
|
||||
help='Do not turn on journaling in tests')
|
||||
parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
|
||||
action="store_true",
|
||||
help='Do not preallocate journal files in tests')
|
||||
parser.add_option('--auth', dest='auth', default=False,
|
||||
action="store_true",
|
||||
help='Run standalone mongods in tests with authentication enabled')
|
||||
parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
|
||||
help='Use the given authentication mechanism, when --auth is used.')
|
||||
parser.add_option('--keyFile', dest='keyFile', default=None,
|
||||
help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
|
||||
parser.add_option('--ignore', dest='ignore_files', default=None,
|
||||
help='Pattern of files to ignore in tests')
|
||||
parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
|
||||
action="store_true",
|
||||
help='Check the failfile and only run all tests that failed last time')
|
||||
parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
|
||||
action="store_true",
|
||||
help='Clear the failfile. Do this if all tests pass')
|
||||
parser.add_option('--with-cleanbb', dest='with_cleanbb', default=False,
|
||||
action="store_true",
|
||||
help='Clear database files from previous smoke.py runs')
|
||||
parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
|
||||
action='store_false',
|
||||
help='Do not start mongod before commencing test running')
|
||||
parser.add_option('--use-ssl', dest='use_ssl', default=False,
|
||||
action='store_true',
|
||||
help='Run mongo shell and mongod instances with SSL encryption')
|
||||
|
||||
# Buildlogger invocation from command line
|
||||
parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
|
||||
action="store", help='Set the "builder name" for buildlogger')
|
||||
parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
|
||||
action="store", help='Set the "build number" for buildlogger')
|
||||
parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
|
||||
action="store", help='Path to Python file containing buildlogger credentials')
|
||||
parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
|
||||
action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
|
||||
|
||||
global tests
|
||||
(options, tests) = parser.parse_args()
|
||||
|
||||
set_globals(options, tests)
|
||||
|
||||
buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
|
||||
if all(buildlogger_opts):
|
||||
os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
|
||||
os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
|
||||
os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
|
||||
os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
|
||||
if options.buildlogger_phase:
|
||||
os.environ['MONGO_PHASE'] = options.buildlogger_phase
|
||||
elif any(buildlogger_opts):
|
||||
# some but not all of the required options were sete
|
||||
raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
|
||||
|
||||
if options.File:
|
||||
if options.File == '-':
|
||||
tests = sys.stdin.readlines()
|
||||
else:
|
||||
f = open(options.File)
|
||||
tests = f.readlines()
|
||||
with open(options.File) as f:
|
||||
tests = f.readlines()
|
||||
tests = [t.rstrip('\n') for t in tests]
|
||||
|
||||
if options.only_old_fails:
|
||||
run_old_fails()
|
||||
return
|
||||
elif options.reset_old_fails:
|
||||
clear_failfile()
|
||||
return
|
||||
|
||||
# If we're in suite mode, tests is a list of names of sets of tests.
|
||||
if options.mode == 'suite':
|
||||
tests = expand_suites(tests)
|
||||
elif options.mode == 'files':
|
||||
tests = [(os.path.abspath(test), start_mongod) for test in tests]
|
||||
|
||||
if options.ignore_files != None :
|
||||
ignore_patt = re.compile( options.ignore_files )
|
||||
print "Ignoring files with pattern: ", ignore_patt
|
||||
|
||||
def ignore_test( test ):
|
||||
if ignore_patt.search( test[0] ) != None:
|
||||
print "Ignoring test ", test[0]
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
tests = filter( ignore_test, tests )
|
||||
tests = [(os.path.abspath(test), True) for test in tests]
|
||||
|
||||
if not tests:
|
||||
print "warning: no tests specified"
|
||||
return
|
||||
|
||||
if options.with_cleanbb:
|
||||
dbroot = os.path.join(options.smoke_db_prefix, 'data', 'db')
|
||||
call([utils.find_python(), "buildscripts/cleanbb.py", "--nokill", dbroot])
|
||||
raise Exception( "no tests specified" )
|
||||
|
||||
try:
|
||||
run_tests(tests)
|
||||
finally:
|
||||
add_to_failfile(fails, options)
|
||||
|
||||
f = open( "smoke-last.json", "wb" )
|
||||
f.write( json.dumps( { "results" : all_test_results } ) )
|
||||
f.close()
|
||||
|
||||
report()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -57,21 +57,14 @@ def push_tag( bucket , tag , extension , gzip=False ):
|
|||
os.remove( localName )
|
||||
|
||||
|
||||
def push_all( filter=None):
|
||||
def push_all():
|
||||
tags = run_git("tag -l").strip().split( "\n" )
|
||||
|
||||
bucket = simples3.S3Bucket( settings.bucket , settings.id , settings.key )
|
||||
|
||||
for tag in tags:
|
||||
if filter and tag.find( filter ) < 0:
|
||||
print( "skipping %s because it doesn't match filter %s" % ( tag, filter ) )
|
||||
continue
|
||||
push_tag( bucket , tag , "tar" , True )
|
||||
push_tag( bucket , tag , "zip" )
|
||||
|
||||
if __name__ == "__main__":
|
||||
filter = None
|
||||
if len(sys.argv) > 1:
|
||||
filter = sys.argv[1]
|
||||
print( "filter: %s" % filter )
|
||||
push_all(filter)
|
||||
push_all()
|
||||
|
|
|
|||
|
|
@ -1,26 +1,16 @@
|
|||
|
||||
import codecs
|
||||
import re
|
||||
import socket
|
||||
import time
|
||||
import os
|
||||
import os.path
|
||||
import itertools
|
||||
import subprocess
|
||||
import sys
|
||||
import hashlib
|
||||
|
||||
# various utilities that are handy
|
||||
|
||||
def getAllSourceFiles( arr=None , prefix="." ):
|
||||
if arr is None:
|
||||
arr = []
|
||||
|
||||
if not os.path.isdir( prefix ):
|
||||
# assume a file
|
||||
arr.append( prefix )
|
||||
return arr
|
||||
|
||||
|
||||
for x in os.listdir( prefix ):
|
||||
if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
|
||||
continue
|
||||
|
|
@ -29,7 +19,6 @@ def getAllSourceFiles( arr=None , prefix="." ):
|
|||
getAllSourceFiles( arr , full )
|
||||
else:
|
||||
if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
|
||||
full = full.replace( "//" , "/" )
|
||||
arr.append( full )
|
||||
|
||||
return arr
|
||||
|
|
@ -147,84 +136,34 @@ def didMongodStart( port=27017 , timeout=20 ):
|
|||
timeout = timeout - 1
|
||||
return False
|
||||
|
||||
def which(executable):
|
||||
if sys.platform == 'win32':
|
||||
paths = os.environ.get('Path', '').split(';')
|
||||
else:
|
||||
paths = os.environ.get('PATH', '').split(':')
|
||||
|
||||
for path in paths:
|
||||
path = os.path.expandvars(path)
|
||||
path = os.path.expanduser(path)
|
||||
path = os.path.abspath(path)
|
||||
executable_path = os.path.join(path, executable)
|
||||
if os.path.exists(executable_path):
|
||||
return executable_path
|
||||
|
||||
return executable
|
||||
|
||||
def md5sum( file ):
|
||||
#TODO error handling, etc..
|
||||
return execsys( "md5sum " + file )[0].partition(" ")[0]
|
||||
|
||||
def md5string( a_string ):
|
||||
return hashlib.md5(a_string).hexdigest()
|
||||
|
||||
def find_python(min_version=(2, 5)):
|
||||
def smoke_python_name():
|
||||
# if this script is being run by py2.5 or greater,
|
||||
# then we assume that "python" points to a 2.5 or
|
||||
# greater python VM. otherwise, explicitly use 2.5
|
||||
# which we assume to be installed.
|
||||
min_version_tuple = (2, 5)
|
||||
try:
|
||||
if sys.version_info >= min_version:
|
||||
if sys.version_info >= min_version_tuple:
|
||||
return sys.executable
|
||||
except AttributeError:
|
||||
# In case the version of Python is somehow missing sys.version_info or sys.executable.
|
||||
pass
|
||||
|
||||
import subprocess
|
||||
version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
|
||||
binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
|
||||
binaries = ['python2.5', 'python2.6', 'python2.7', 'python25', 'python26', 'python27', 'python']
|
||||
for binary in binaries:
|
||||
try:
|
||||
# py-2.4 compatible replacement for shell backticks
|
||||
out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
for stream in (out, err):
|
||||
match = version.search(stream)
|
||||
if match:
|
||||
versiontuple = tuple(map(int, match.group(1).split('.')))
|
||||
if versiontuple >= min_version:
|
||||
return which(binary)
|
||||
if versiontuple >= min_version_tuple:
|
||||
return binary
|
||||
except:
|
||||
pass
|
||||
|
||||
raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
|
||||
|
||||
def smoke_command(*args):
|
||||
# return a list of arguments that comprises a complete
|
||||
# invocation of smoke.py
|
||||
here = os.path.dirname(__file__)
|
||||
smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
|
||||
# the --with-cleanbb argument causes smoke.py to run
|
||||
# buildscripts/cleanbb.py before each test phase; this
|
||||
# prevents us from running out of disk space on slaves
|
||||
return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
|
||||
|
||||
def run_smoke_command(*args):
|
||||
# to run a command line script from a scons Alias (or any
|
||||
# Action), the command sequence must be enclosed in a list,
|
||||
# otherwise SCons treats it as a list of dependencies.
|
||||
return [smoke_command(*args)]
|
||||
|
||||
# unicode is a pain. some strings cannot be unicode()'d
|
||||
# but we want to just preserve the bytes in a human-readable
|
||||
# fashion. this codec error handler will substitute the
|
||||
# repr() of the offending bytes into the decoded string
|
||||
# at the position they occurred
|
||||
def replace_with_repr(unicode_error):
|
||||
offender = unicode_error.object[unicode_error.start:unicode_error.end]
|
||||
return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
|
||||
|
||||
codecs.register_error('repr', replace_with_repr)
|
||||
|
||||
def unicode_dammit(string, encoding='utf8'):
|
||||
# convert a string to a unicode, using the Python
|
||||
# representation of non-ascii bytes when necessary
|
||||
#
|
||||
# name inpsired by BeautifulSoup's "UnicodeDammit"
|
||||
return string.decode(encoding, 'repr')
|
||||
# if that all fails, fall back to "python"
|
||||
return "python"
|
||||
|
||||
|
|
|
|||
79
client/clientOnly.cpp
Normal file
79
client/clientOnly.cpp
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
// clientOnly.cpp
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "pch.h"
|
||||
#include "../client/dbclient.h"
|
||||
#include "../db/cmdline.h"
|
||||
#include "../s/shard.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
CmdLine cmdLine;
|
||||
|
||||
const char * curNs = "in client mode";
|
||||
|
||||
bool dbexitCalled = false;
|
||||
|
||||
void exitCleanly( ExitCode code ) {
|
||||
dbexit( code );
|
||||
}
|
||||
|
||||
void dbexit( ExitCode returnCode, const char *whyMsg , bool tryToGetLock ) {
|
||||
dbexitCalled = true;
|
||||
out() << "dbexit called" << endl;
|
||||
if ( whyMsg )
|
||||
out() << " b/c " << whyMsg << endl;
|
||||
out() << "exiting" << endl;
|
||||
::exit( returnCode );
|
||||
}
|
||||
|
||||
bool inShutdown() {
|
||||
return dbexitCalled;
|
||||
}
|
||||
|
||||
void setupSignals() {
|
||||
// maybe should do SIGPIPE here, not sure
|
||||
}
|
||||
|
||||
string getDbContext() {
|
||||
return "in client only mode";
|
||||
}
|
||||
|
||||
bool haveLocalShardingInfo( const string& ns ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DBClientBase * createDirectClient() {
|
||||
uassert( 10256 , "no createDirectClient in clientOnly" , 0 );
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Shard::getAllShards( vector<Shard>& all ) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
bool Shard::isAShardNode( const string& ident ) {
|
||||
assert(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
string prettyHostName() {
|
||||
assert(0);
|
||||
return "";
|
||||
}
|
||||
|
||||
}
|
||||
459
client/connpool.cpp
Normal file
459
client/connpool.cpp
Normal file
|
|
@ -0,0 +1,459 @@
|
|||
/* connpool.cpp
|
||||
*/
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// _ todo: reconnect?
|
||||
|
||||
#include "pch.h"
|
||||
#include "connpool.h"
|
||||
#include "../db/commands.h"
|
||||
#include "syncclusterconnection.h"
|
||||
#include "../s/shard.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
// ------ PoolForHost ------
|
||||
|
||||
PoolForHost::~PoolForHost() {
|
||||
while ( ! _pool.empty() ) {
|
||||
StoredConnection sc = _pool.top();
|
||||
delete sc.conn;
|
||||
_pool.pop();
|
||||
}
|
||||
}
|
||||
|
||||
void PoolForHost::done( DBConnectionPool * pool, DBClientBase * c ) {
|
||||
if ( _pool.size() >= _maxPerHost ) {
|
||||
pool->onDestroy( c );
|
||||
delete c;
|
||||
}
|
||||
else {
|
||||
_pool.push(c);
|
||||
}
|
||||
}
|
||||
|
||||
DBClientBase * PoolForHost::get( DBConnectionPool * pool , double socketTimeout ) {
|
||||
|
||||
time_t now = time(0);
|
||||
|
||||
while ( ! _pool.empty() ) {
|
||||
StoredConnection sc = _pool.top();
|
||||
_pool.pop();
|
||||
|
||||
if ( ! sc.ok( now ) ) {
|
||||
pool->onDestroy( sc.conn );
|
||||
delete sc.conn;
|
||||
continue;
|
||||
}
|
||||
|
||||
assert( sc.conn->getSoTimeout() == socketTimeout );
|
||||
|
||||
return sc.conn;
|
||||
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void PoolForHost::flush() {
|
||||
vector<StoredConnection> all;
|
||||
while ( ! _pool.empty() ) {
|
||||
StoredConnection c = _pool.top();
|
||||
_pool.pop();
|
||||
all.push_back( c );
|
||||
bool res;
|
||||
c.conn->isMaster( res );
|
||||
}
|
||||
|
||||
for ( vector<StoredConnection>::iterator i=all.begin(); i != all.end(); ++i ) {
|
||||
_pool.push( *i );
|
||||
}
|
||||
}
|
||||
|
||||
void PoolForHost::getStaleConnections( vector<DBClientBase*>& stale ) {
|
||||
time_t now = time(0);
|
||||
|
||||
vector<StoredConnection> all;
|
||||
while ( ! _pool.empty() ) {
|
||||
StoredConnection c = _pool.top();
|
||||
_pool.pop();
|
||||
|
||||
if ( c.ok( now ) )
|
||||
all.push_back( c );
|
||||
else
|
||||
stale.push_back( c.conn );
|
||||
}
|
||||
|
||||
for ( size_t i=0; i<all.size(); i++ ) {
|
||||
_pool.push( all[i] );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
PoolForHost::StoredConnection::StoredConnection( DBClientBase * c ) {
|
||||
conn = c;
|
||||
when = time(0);
|
||||
}
|
||||
|
||||
bool PoolForHost::StoredConnection::ok( time_t now ) {
|
||||
// if connection has been idle for 30 minutes, kill it
|
||||
return ( now - when ) < 1800;
|
||||
}
|
||||
|
||||
void PoolForHost::createdOne( DBClientBase * base) {
|
||||
if ( _created == 0 )
|
||||
_type = base->type();
|
||||
_created++;
|
||||
}
|
||||
|
||||
unsigned PoolForHost::_maxPerHost = 50;
|
||||
|
||||
// ------ DBConnectionPool ------
|
||||
|
||||
DBConnectionPool pool;
|
||||
|
||||
DBConnectionPool::DBConnectionPool()
|
||||
: _mutex("DBConnectionPool") ,
|
||||
_name( "dbconnectionpool" ) ,
|
||||
_hooks( new list<DBConnectionHook*>() ) {
|
||||
}
|
||||
|
||||
DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
|
||||
assert( ! inShutdown() );
|
||||
scoped_lock L(_mutex);
|
||||
PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
|
||||
return p.get( this , socketTimeout );
|
||||
}
|
||||
|
||||
DBClientBase* DBConnectionPool::_finishCreate( const string& host , double socketTimeout , DBClientBase* conn ) {
|
||||
{
|
||||
scoped_lock L(_mutex);
|
||||
PoolForHost& p = _pools[PoolKey(host,socketTimeout)];
|
||||
p.createdOne( conn );
|
||||
}
|
||||
|
||||
try {
|
||||
onCreate( conn );
|
||||
onHandedOut( conn );
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
delete conn;
|
||||
throw;
|
||||
}
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
DBClientBase* DBConnectionPool::get(const ConnectionString& url, double socketTimeout) {
|
||||
DBClientBase * c = _get( url.toString() , socketTimeout );
|
||||
if ( c ) {
|
||||
try {
|
||||
onHandedOut( c );
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
delete c;
|
||||
throw;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
string errmsg;
|
||||
c = url.connect( errmsg, socketTimeout );
|
||||
uassert( 13328 , _name + ": connect failed " + url.toString() + " : " + errmsg , c );
|
||||
|
||||
return _finishCreate( url.toString() , socketTimeout , c );
|
||||
}
|
||||
|
||||
DBClientBase* DBConnectionPool::get(const string& host, double socketTimeout) {
|
||||
DBClientBase * c = _get( host , socketTimeout );
|
||||
if ( c ) {
|
||||
try {
|
||||
onHandedOut( c );
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
delete c;
|
||||
throw;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
string errmsg;
|
||||
ConnectionString cs = ConnectionString::parse( host , errmsg );
|
||||
uassert( 13071 , (string)"invalid hostname [" + host + "]" + errmsg , cs.isValid() );
|
||||
|
||||
c = cs.connect( errmsg, socketTimeout );
|
||||
if ( ! c )
|
||||
throw SocketException( SocketException::CONNECT_ERROR , host , 11002 , str::stream() << _name << " error: " << errmsg );
|
||||
return _finishCreate( host , socketTimeout , c );
|
||||
}
|
||||
|
||||
void DBConnectionPool::release(const string& host, DBClientBase *c) {
|
||||
if ( c->isFailed() ) {
|
||||
onDestroy( c );
|
||||
delete c;
|
||||
return;
|
||||
}
|
||||
scoped_lock L(_mutex);
|
||||
_pools[PoolKey(host,c->getSoTimeout())].done(this,c);
|
||||
}
|
||||
|
||||
|
||||
DBConnectionPool::~DBConnectionPool() {
|
||||
// connection closing is handled by ~PoolForHost
|
||||
}
|
||||
|
||||
void DBConnectionPool::flush() {
|
||||
scoped_lock L(_mutex);
|
||||
for ( PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++ ) {
|
||||
PoolForHost& p = i->second;
|
||||
p.flush();
|
||||
}
|
||||
}
|
||||
|
||||
void DBConnectionPool::addHook( DBConnectionHook * hook ) {
|
||||
_hooks->push_back( hook );
|
||||
}
|
||||
|
||||
void DBConnectionPool::onCreate( DBClientBase * conn ) {
|
||||
if ( _hooks->size() == 0 )
|
||||
return;
|
||||
|
||||
for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
|
||||
(*i)->onCreate( conn );
|
||||
}
|
||||
}
|
||||
|
||||
void DBConnectionPool::onHandedOut( DBClientBase * conn ) {
|
||||
if ( _hooks->size() == 0 )
|
||||
return;
|
||||
|
||||
for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
|
||||
(*i)->onHandedOut( conn );
|
||||
}
|
||||
}
|
||||
|
||||
void DBConnectionPool::onDestroy( DBClientBase * conn ) {
|
||||
if ( _hooks->size() == 0 )
|
||||
return;
|
||||
|
||||
for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
|
||||
(*i)->onDestroy( conn );
|
||||
}
|
||||
}
|
||||
|
||||
void DBConnectionPool::appendInfo( BSONObjBuilder& b ) {
|
||||
|
||||
int avail = 0;
|
||||
long long created = 0;
|
||||
|
||||
|
||||
map<ConnectionString::ConnectionType,long long> createdByType;
|
||||
|
||||
set<string> replicaSets;
|
||||
|
||||
BSONObjBuilder bb( b.subobjStart( "hosts" ) );
|
||||
{
|
||||
scoped_lock lk( _mutex );
|
||||
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
|
||||
if ( i->second.numCreated() == 0 )
|
||||
continue;
|
||||
|
||||
string s = str::stream() << i->first.ident << "::" << i->first.timeout;
|
||||
|
||||
BSONObjBuilder temp( bb.subobjStart( s ) );
|
||||
temp.append( "available" , i->second.numAvailable() );
|
||||
temp.appendNumber( "created" , i->second.numCreated() );
|
||||
temp.done();
|
||||
|
||||
avail += i->second.numAvailable();
|
||||
created += i->second.numCreated();
|
||||
|
||||
long long& x = createdByType[i->second.type()];
|
||||
x += i->second.numCreated();
|
||||
|
||||
{
|
||||
string setName = i->first.ident;
|
||||
if ( setName.find( "/" ) != string::npos ) {
|
||||
setName = setName.substr( 0 , setName.find( "/" ) );
|
||||
replicaSets.insert( setName );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
bb.done();
|
||||
|
||||
|
||||
BSONObjBuilder setBuilder( b.subobjStart( "replicaSets" ) );
|
||||
for ( set<string>::iterator i=replicaSets.begin(); i!=replicaSets.end(); ++i ) {
|
||||
string rs = *i;
|
||||
ReplicaSetMonitorPtr m = ReplicaSetMonitor::get( rs );
|
||||
if ( ! m ) {
|
||||
warning() << "no monitor for set: " << rs << endl;
|
||||
continue;
|
||||
}
|
||||
|
||||
BSONObjBuilder temp( setBuilder.subobjStart( rs ) );
|
||||
m->appendInfo( temp );
|
||||
temp.done();
|
||||
}
|
||||
setBuilder.done();
|
||||
|
||||
{
|
||||
BSONObjBuilder temp( bb.subobjStart( "createdByType" ) );
|
||||
for ( map<ConnectionString::ConnectionType,long long>::iterator i=createdByType.begin(); i!=createdByType.end(); ++i ) {
|
||||
temp.appendNumber( ConnectionString::typeToString( i->first ) , i->second );
|
||||
}
|
||||
temp.done();
|
||||
}
|
||||
|
||||
b.append( "totalAvailable" , avail );
|
||||
b.appendNumber( "totalCreated" , created );
|
||||
}
|
||||
|
||||
bool DBConnectionPool::serverNameCompare::operator()( const string& a , const string& b ) const{
|
||||
const char* ap = a.c_str();
|
||||
const char* bp = b.c_str();
|
||||
|
||||
while (true){
|
||||
if (*ap == '\0' || *ap == '/'){
|
||||
if (*bp == '\0' || *bp == '/')
|
||||
return false; // equal strings
|
||||
else
|
||||
return true; // a is shorter
|
||||
}
|
||||
|
||||
if (*bp == '\0' || *bp == '/')
|
||||
return false; // b is shorter
|
||||
|
||||
if ( *ap < *bp)
|
||||
return true;
|
||||
else if (*ap > *bp)
|
||||
return false;
|
||||
|
||||
++ap;
|
||||
++bp;
|
||||
}
|
||||
assert(false);
|
||||
}
|
||||
|
||||
bool DBConnectionPool::poolKeyCompare::operator()( const PoolKey& a , const PoolKey& b ) const {
|
||||
if (DBConnectionPool::serverNameCompare()( a.ident , b.ident ))
|
||||
return true;
|
||||
|
||||
if (DBConnectionPool::serverNameCompare()( b.ident , a.ident ))
|
||||
return false;
|
||||
|
||||
return a.timeout < b.timeout;
|
||||
}
|
||||
|
||||
|
||||
void DBConnectionPool::taskDoWork() {
|
||||
vector<DBClientBase*> toDelete;
|
||||
|
||||
{
|
||||
// we need to get the connections inside the lock
|
||||
// but we can actually delete them outside
|
||||
scoped_lock lk( _mutex );
|
||||
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
|
||||
i->second.getStaleConnections( toDelete );
|
||||
}
|
||||
}
|
||||
|
||||
for ( size_t i=0; i<toDelete.size(); i++ ) {
|
||||
try {
|
||||
onDestroy( toDelete[i] );
|
||||
delete toDelete[i];
|
||||
}
|
||||
catch ( ... ) {
|
||||
// we don't care if there was a socket error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------ ScopedDbConnection ------
|
||||
|
||||
ScopedDbConnection * ScopedDbConnection::steal() {
|
||||
assert( _conn );
|
||||
ScopedDbConnection * n = new ScopedDbConnection( _host , _conn, _socketTimeout );
|
||||
_conn = 0;
|
||||
return n;
|
||||
}
|
||||
|
||||
void ScopedDbConnection::_setSocketTimeout(){
|
||||
if( ! _conn ) return;
|
||||
if( _conn->type() == ConnectionString::MASTER )
|
||||
(( DBClientConnection* ) _conn)->setSoTimeout( _socketTimeout );
|
||||
else if( _conn->type() == ConnectionString::SYNC )
|
||||
(( SyncClusterConnection* ) _conn)->setAllSoTimeouts( _socketTimeout );
|
||||
}
|
||||
|
||||
ScopedDbConnection::~ScopedDbConnection() {
|
||||
if ( _conn ) {
|
||||
if ( ! _conn->isFailed() ) {
|
||||
/* see done() comments above for why we log this line */
|
||||
log() << "scoped connection to " << _conn->getServerAddress() << " not being returned to the pool" << endl;
|
||||
}
|
||||
kill();
|
||||
}
|
||||
}
|
||||
|
||||
ScopedDbConnection::ScopedDbConnection(const Shard& shard, double socketTimeout )
|
||||
: _host( shard.getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
|
||||
_setSocketTimeout();
|
||||
}
|
||||
|
||||
ScopedDbConnection::ScopedDbConnection(const Shard* shard, double socketTimeout )
|
||||
: _host( shard->getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
|
||||
_setSocketTimeout();
|
||||
}
|
||||
|
||||
|
||||
class PoolFlushCmd : public Command {
|
||||
public:
|
||||
PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
|
||||
virtual void help( stringstream &help ) const { help<<"internal"; }
|
||||
virtual LockType locktype() const { return NONE; }
|
||||
virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
|
||||
pool.flush();
|
||||
return true;
|
||||
}
|
||||
virtual bool slaveOk() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} poolFlushCmd;
|
||||
|
||||
class PoolStats : public Command {
|
||||
public:
|
||||
PoolStats() : Command( "connPoolStats" ) {}
|
||||
virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
|
||||
virtual LockType locktype() const { return NONE; }
|
||||
virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
|
||||
pool.appendInfo( result );
|
||||
result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
|
||||
result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
|
||||
return true;
|
||||
}
|
||||
virtual bool slaveOk() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
} poolStatsCmd;
|
||||
|
||||
AtomicUInt AScopedConnection::_numConnections;
|
||||
|
||||
} // namespace mongo
|
||||
291
client/connpool.h
Normal file
291
client/connpool.h
Normal file
|
|
@ -0,0 +1,291 @@
|
|||
/** @file connpool.h */
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stack>
|
||||
#include "dbclient.h"
|
||||
#include "redef_macros.h"
|
||||
|
||||
#include "../util/background.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
class Shard;
|
||||
class DBConnectionPool;
|
||||
|
||||
/**
|
||||
* not thread safe
|
||||
* thread safety is handled by DBConnectionPool
|
||||
*/
|
||||
class PoolForHost {
|
||||
public:
|
||||
PoolForHost()
|
||||
: _created(0) {}
|
||||
|
||||
PoolForHost( const PoolForHost& other ) {
|
||||
assert(other._pool.size() == 0);
|
||||
_created = other._created;
|
||||
assert( _created == 0 );
|
||||
}
|
||||
|
||||
~PoolForHost();
|
||||
|
||||
int numAvailable() const { return (int)_pool.size(); }
|
||||
|
||||
void createdOne( DBClientBase * base );
|
||||
long long numCreated() const { return _created; }
|
||||
|
||||
ConnectionString::ConnectionType type() const { assert(_created); return _type; }
|
||||
|
||||
/**
|
||||
* gets a connection or return NULL
|
||||
*/
|
||||
DBClientBase * get( DBConnectionPool * pool , double socketTimeout );
|
||||
|
||||
void done( DBConnectionPool * pool , DBClientBase * c );
|
||||
|
||||
void flush();
|
||||
|
||||
void getStaleConnections( vector<DBClientBase*>& stale );
|
||||
|
||||
static void setMaxPerHost( unsigned max ) { _maxPerHost = max; }
|
||||
static unsigned getMaxPerHost() { return _maxPerHost; }
|
||||
private:
|
||||
|
||||
struct StoredConnection {
|
||||
StoredConnection( DBClientBase * c );
|
||||
|
||||
bool ok( time_t now );
|
||||
|
||||
DBClientBase* conn;
|
||||
time_t when;
|
||||
};
|
||||
|
||||
std::stack<StoredConnection> _pool;
|
||||
|
||||
long long _created;
|
||||
ConnectionString::ConnectionType _type;
|
||||
|
||||
static unsigned _maxPerHost;
|
||||
};
|
||||
|
||||
class DBConnectionHook {
|
||||
public:
|
||||
virtual ~DBConnectionHook() {}
|
||||
virtual void onCreate( DBClientBase * conn ) {}
|
||||
virtual void onHandedOut( DBClientBase * conn ) {}
|
||||
virtual void onDestroy( DBClientBase * conn ) {}
|
||||
};
|
||||
|
||||
/** Database connection pool.
|
||||
|
||||
Generally, use ScopedDbConnection and do not call these directly.
|
||||
|
||||
This class, so far, is suitable for use with unauthenticated connections.
|
||||
Support for authenticated connections requires some adjustements: please
|
||||
request...
|
||||
|
||||
Usage:
|
||||
|
||||
{
|
||||
ScopedDbConnection c("myserver");
|
||||
c.conn()...
|
||||
}
|
||||
*/
|
||||
class DBConnectionPool : public PeriodicTask {
|
||||
|
||||
public:
|
||||
|
||||
DBConnectionPool();
|
||||
~DBConnectionPool();
|
||||
|
||||
/** right now just controls some asserts. defaults to "dbconnectionpool" */
|
||||
void setName( const string& name ) { _name = name; }
|
||||
|
||||
void onCreate( DBClientBase * conn );
|
||||
void onHandedOut( DBClientBase * conn );
|
||||
void onDestroy( DBClientBase * conn );
|
||||
|
||||
void flush();
|
||||
|
||||
DBClientBase *get(const string& host, double socketTimeout = 0);
|
||||
DBClientBase *get(const ConnectionString& host, double socketTimeout = 0);
|
||||
|
||||
void release(const string& host, DBClientBase *c);
|
||||
|
||||
void addHook( DBConnectionHook * hook ); // we take ownership
|
||||
void appendInfo( BSONObjBuilder& b );
|
||||
|
||||
/** compares server namees, but is smart about replica set names */
|
||||
struct serverNameCompare {
|
||||
bool operator()( const string& a , const string& b ) const;
|
||||
};
|
||||
|
||||
virtual string taskName() const { return "DBConnectionPool-cleaner"; }
|
||||
virtual void taskDoWork();
|
||||
|
||||
private:
|
||||
DBConnectionPool( DBConnectionPool& p );
|
||||
|
||||
DBClientBase* _get( const string& ident , double socketTimeout );
|
||||
|
||||
DBClientBase* _finishCreate( const string& ident , double socketTimeout, DBClientBase* conn );
|
||||
|
||||
struct PoolKey {
|
||||
PoolKey( string i , double t ) : ident( i ) , timeout( t ) {}
|
||||
string ident;
|
||||
double timeout;
|
||||
};
|
||||
|
||||
struct poolKeyCompare {
|
||||
bool operator()( const PoolKey& a , const PoolKey& b ) const;
|
||||
};
|
||||
|
||||
typedef map<PoolKey,PoolForHost,poolKeyCompare> PoolMap; // servername -> pool
|
||||
|
||||
mongo::mutex _mutex;
|
||||
string _name;
|
||||
|
||||
PoolMap _pools;
|
||||
|
||||
// pointers owned by me, right now they leak on shutdown
|
||||
// _hooks itself also leaks because it creates a shutdown race condition
|
||||
list<DBConnectionHook*> * _hooks;
|
||||
|
||||
};
|
||||
|
||||
extern DBConnectionPool pool;
|
||||
|
||||
class AScopedConnection : boost::noncopyable {
|
||||
public:
|
||||
AScopedConnection() { _numConnections++; }
|
||||
virtual ~AScopedConnection() { _numConnections--; }
|
||||
|
||||
virtual DBClientBase* get() = 0;
|
||||
virtual void done() = 0;
|
||||
virtual string getHost() const = 0;
|
||||
|
||||
/**
|
||||
* @return true iff this has a connection to the db
|
||||
*/
|
||||
virtual bool ok() const = 0;
|
||||
|
||||
/**
|
||||
* @return total number of current instances of AScopedConnection
|
||||
*/
|
||||
static int getNumConnections() { return _numConnections; }
|
||||
|
||||
private:
|
||||
static AtomicUInt _numConnections;
|
||||
};
|
||||
|
||||
/** Use to get a connection from the pool. On exceptions things
|
||||
clean up nicely (i.e. the socket gets closed automatically when the
|
||||
scopeddbconnection goes out of scope).
|
||||
*/
|
||||
class ScopedDbConnection : public AScopedConnection {
|
||||
public:
|
||||
/** the main constructor you want to use
|
||||
throws UserException if can't connect
|
||||
*/
|
||||
explicit ScopedDbConnection(const string& host, double socketTimeout = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeout( socketTimeout ) {
|
||||
_setSocketTimeout();
|
||||
}
|
||||
|
||||
ScopedDbConnection() : _host( "" ) , _conn(0), _socketTimeout( 0 ) {}
|
||||
|
||||
/* @param conn - bind to an existing connection */
|
||||
ScopedDbConnection(const string& host, DBClientBase* conn, double socketTimeout = 0 ) : _host( host ) , _conn( conn ), _socketTimeout( socketTimeout ) {
|
||||
_setSocketTimeout();
|
||||
}
|
||||
|
||||
/** throws UserException if can't connect */
|
||||
explicit ScopedDbConnection(const ConnectionString& url, double socketTimeout = 0 ) : _host(url.toString()), _conn( pool.get(url, socketTimeout) ), _socketTimeout( socketTimeout ) {
|
||||
_setSocketTimeout();
|
||||
}
|
||||
|
||||
/** throws UserException if can't connect */
|
||||
explicit ScopedDbConnection(const Shard& shard, double socketTimeout = 0 );
|
||||
explicit ScopedDbConnection(const Shard* shard, double socketTimeout = 0 );
|
||||
|
||||
~ScopedDbConnection();
|
||||
|
||||
/** get the associated connection object */
|
||||
DBClientBase* operator->() {
|
||||
uassert( 11004 , "connection was returned to the pool already" , _conn );
|
||||
return _conn;
|
||||
}
|
||||
|
||||
/** get the associated connection object */
|
||||
DBClientBase& conn() {
|
||||
uassert( 11005 , "connection was returned to the pool already" , _conn );
|
||||
return *_conn;
|
||||
}
|
||||
|
||||
/** get the associated connection object */
|
||||
DBClientBase* get() {
|
||||
uassert( 13102 , "connection was returned to the pool already" , _conn );
|
||||
return _conn;
|
||||
}
|
||||
|
||||
bool ok() const { return _conn > 0; }
|
||||
|
||||
string getHost() const { return _host; }
|
||||
|
||||
/** Force closure of the connection. You should call this if you leave it in
|
||||
a bad state. Destructor will do this too, but it is verbose.
|
||||
*/
|
||||
void kill() {
|
||||
delete _conn;
|
||||
_conn = 0;
|
||||
}
|
||||
|
||||
/** Call this when you are done with the connection.
|
||||
|
||||
If you do not call done() before this object goes out of scope,
|
||||
we can't be sure we fully read all expected data of a reply on the socket. so
|
||||
we don't try to reuse the connection in that situation.
|
||||
*/
|
||||
void done() {
|
||||
if ( ! _conn )
|
||||
return;
|
||||
|
||||
/* we could do this, but instead of assume one is using autoreconnect mode on the connection
|
||||
if ( _conn->isFailed() )
|
||||
kill();
|
||||
else
|
||||
*/
|
||||
pool.release(_host, _conn);
|
||||
_conn = 0;
|
||||
}
|
||||
|
||||
ScopedDbConnection * steal();
|
||||
|
||||
private:
|
||||
|
||||
void _setSocketTimeout();
|
||||
|
||||
const string _host;
|
||||
DBClientBase *_conn;
|
||||
const double _socketTimeout;
|
||||
|
||||
};
|
||||
|
||||
} // namespace mongo
|
||||
|
||||
#include "undef_macros.h"
|
||||
26
client/constants.h
Normal file
26
client/constants.h
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
// constants.h
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/* query results include a 32 result flag word consisting of these bits */
|
||||
enum ResultFlagType {
|
||||
/* returned, with zero results, when getMore is called but the cursor id
|
||||
is not valid at the server. */
|
||||
ResultFlag_CursorNotFound = 1,
|
||||
|
||||
/* { $err : ... } is being returned */
|
||||
ResultFlag_ErrSet = 2,
|
||||
|
||||
/* Have to update config from the server, usually $err is also set */
|
||||
ResultFlag_ShardConfigStale = 4,
|
||||
|
||||
/* for backward compatability: this let's us know the server supports
|
||||
the QueryOption_AwaitData option. if it doesn't, a repl slave client should sleep
|
||||
a little between getMore's.
|
||||
*/
|
||||
ResultFlag_AwaitCapable = 8
|
||||
};
|
||||
|
||||
}
|
||||
1053
client/dbclient.cpp
Normal file
1053
client/dbclient.cpp
Normal file
File diff suppressed because it is too large
Load diff
983
client/dbclient.h
Normal file
983
client/dbclient.h
Normal file
|
|
@ -0,0 +1,983 @@
|
|||
/** @file dbclient.h
|
||||
|
||||
Core MongoDB C++ driver interfaces are defined here.
|
||||
*/
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../pch.h"
|
||||
#include "../util/net/message.h"
|
||||
#include "../util/net/message_port.h"
|
||||
#include "../db/jsobj.h"
|
||||
#include "../db/json.h"
|
||||
#include <stack>
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/** the query field 'options' can have these bits set: */
|
||||
enum QueryOptions {
|
||||
/** Tailable means cursor is not closed when the last data is retrieved. rather, the cursor marks
|
||||
the final object's position. you can resume using the cursor later, from where it was located,
|
||||
if more data were received. Set on dbQuery and dbGetMore.
|
||||
|
||||
like any "latent cursor", the cursor may become invalid at some point -- for example if that
|
||||
final object it references were deleted. Thus, you should be prepared to requery if you get back
|
||||
ResultFlag_CursorNotFound.
|
||||
*/
|
||||
QueryOption_CursorTailable = 1 << 1,
|
||||
|
||||
/** allow query of replica slave. normally these return an error except for namespace "local".
|
||||
*/
|
||||
QueryOption_SlaveOk = 1 << 2,
|
||||
|
||||
// findingStart mode is used to find the first operation of interest when
|
||||
// we are scanning through a repl log. For efficiency in the common case,
|
||||
// where the first operation of interest is closer to the tail than the head,
|
||||
// we start from the tail of the log and work backwards until we find the
|
||||
// first operation of interest. Then we scan forward from that first operation,
|
||||
// actually returning results to the client. During the findingStart phase,
|
||||
// we release the db mutex occasionally to avoid blocking the db process for
|
||||
// an extended period of time.
|
||||
QueryOption_OplogReplay = 1 << 3,
|
||||
|
||||
/** The server normally times out idle cursors after an inactivy period to prevent excess memory uses
|
||||
Set this option to prevent that.
|
||||
*/
|
||||
QueryOption_NoCursorTimeout = 1 << 4,
|
||||
|
||||
/** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
|
||||
than returning no data. After a timeout period, we do return as normal.
|
||||
*/
|
||||
QueryOption_AwaitData = 1 << 5,
|
||||
|
||||
/** Stream the data down full blast in multiple "more" packages, on the assumption that the client
|
||||
will fully read all data queried. Faster when you are pulling a lot of data and know you want to
|
||||
pull it all down. Note: it is not allowed to not read all the data unless you close the connection.
|
||||
|
||||
Use the query( boost::function<void(const BSONObj&)> f, ... ) version of the connection's query()
|
||||
method, and it will take care of all the details for you.
|
||||
*/
|
||||
QueryOption_Exhaust = 1 << 6,
|
||||
|
||||
/** When sharded, this means its ok to return partial results
|
||||
Usually we will fail a query if all required shards aren't up
|
||||
If this is set, it'll be a partial result set
|
||||
*/
|
||||
QueryOption_PartialResults = 1 << 7 ,
|
||||
|
||||
QueryOption_AllSupported = QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay | QueryOption_NoCursorTimeout | QueryOption_AwaitData | QueryOption_Exhaust | QueryOption_PartialResults
|
||||
|
||||
};
|
||||
|
||||
enum UpdateOptions {
|
||||
/** Upsert - that is, insert the item if no matching item is found. */
|
||||
UpdateOption_Upsert = 1 << 0,
|
||||
|
||||
/** Update multiple documents (if multiple documents match query expression).
|
||||
(Default is update a single document and stop.) */
|
||||
UpdateOption_Multi = 1 << 1,
|
||||
|
||||
/** flag from mongo saying this update went everywhere */
|
||||
UpdateOption_Broadcast = 1 << 2
|
||||
};
|
||||
|
||||
enum RemoveOptions {
|
||||
/** only delete one option */
|
||||
RemoveOption_JustOne = 1 << 0,
|
||||
|
||||
/** flag from mongo saying this update went everywhere */
|
||||
RemoveOption_Broadcast = 1 << 1
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* need to put in DbMesssage::ReservedOptions as well
|
||||
*/
|
||||
enum InsertOptions {
|
||||
/** With muli-insert keep processing inserts if one fails */
|
||||
InsertOption_ContinueOnError = 1 << 0
|
||||
};
|
||||
|
||||
class DBClientBase;
|
||||
|
||||
/**
|
||||
* ConnectionString handles parsing different ways to connect to mongo and determining method
|
||||
* samples:
|
||||
* server
|
||||
* server:port
|
||||
* foo/server:port,server:port SET
|
||||
* server,server,server SYNC
|
||||
*
|
||||
* tyipcal use
|
||||
* string errmsg,
|
||||
* ConnectionString cs = ConnectionString::parse( url , errmsg );
|
||||
* if ( ! cs.isValid() ) throw "bad: " + errmsg;
|
||||
* DBClientBase * conn = cs.connect( errmsg );
|
||||
*/
|
||||
class ConnectionString {
|
||||
public:
|
||||
enum ConnectionType { INVALID , MASTER , PAIR , SET , SYNC };
|
||||
|
||||
ConnectionString() {
|
||||
_type = INVALID;
|
||||
}
|
||||
|
||||
ConnectionString( const HostAndPort& server ) {
|
||||
_type = MASTER;
|
||||
_servers.push_back( server );
|
||||
_finishInit();
|
||||
}
|
||||
|
||||
ConnectionString( ConnectionType type , const string& s , const string& setName = "" ) {
|
||||
_type = type;
|
||||
_setName = setName;
|
||||
_fillServers( s );
|
||||
|
||||
switch ( _type ) {
|
||||
case MASTER:
|
||||
assert( _servers.size() == 1 );
|
||||
break;
|
||||
case SET:
|
||||
assert( _setName.size() );
|
||||
assert( _servers.size() >= 1 ); // 1 is ok since we can derive
|
||||
break;
|
||||
case PAIR:
|
||||
assert( _servers.size() == 2 );
|
||||
break;
|
||||
default:
|
||||
assert( _servers.size() > 0 );
|
||||
}
|
||||
|
||||
_finishInit();
|
||||
}
|
||||
|
||||
ConnectionString( const string& s , ConnectionType favoredMultipleType ) {
|
||||
_type = INVALID;
|
||||
|
||||
_fillServers( s );
|
||||
if ( _type != INVALID ) {
|
||||
// set already
|
||||
}
|
||||
else if ( _servers.size() == 1 ) {
|
||||
_type = MASTER;
|
||||
}
|
||||
else {
|
||||
_type = favoredMultipleType;
|
||||
assert( _type == SET || _type == SYNC );
|
||||
}
|
||||
_finishInit();
|
||||
}
|
||||
|
||||
bool isValid() const { return _type != INVALID; }
|
||||
|
||||
string toString() const { return _string; }
|
||||
|
||||
DBClientBase* connect( string& errmsg, double socketTimeout = 0 ) const;
|
||||
|
||||
string getSetName() const { return _setName; }
|
||||
|
||||
vector<HostAndPort> getServers() const { return _servers; }
|
||||
|
||||
ConnectionType type() const { return _type; }
|
||||
|
||||
static ConnectionString parse( const string& url , string& errmsg );
|
||||
|
||||
static string typeToString( ConnectionType type );
|
||||
|
||||
private:
|
||||
|
||||
void _fillServers( string s );
|
||||
void _finishInit();
|
||||
|
||||
ConnectionType _type;
|
||||
vector<HostAndPort> _servers;
|
||||
string _string;
|
||||
string _setName;
|
||||
};
|
||||
|
||||
/**
|
||||
* controls how much a clients cares about writes
|
||||
* default is NORMAL
|
||||
*/
|
||||
enum WriteConcern {
|
||||
W_NONE = 0 , // TODO: not every connection type fully supports this
|
||||
W_NORMAL = 1
|
||||
// TODO SAFE = 2
|
||||
};
|
||||
|
||||
class BSONObj;
|
||||
class ScopedDbConnection;
|
||||
class DBClientCursor;
|
||||
class DBClientCursorBatchIterator;
|
||||
|
||||
/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
|
||||
Examples:
|
||||
QUERY( "age" << 33 << "school" << "UCLA" ).sort("name")
|
||||
QUERY( "age" << GT << 30 << LT << 50 )
|
||||
*/
|
||||
class Query {
|
||||
public:
|
||||
BSONObj obj;
|
||||
Query() : obj(BSONObj()) { }
|
||||
Query(const BSONObj& b) : obj(b) { }
|
||||
Query(const string &json) :
|
||||
obj(fromjson(json)) { }
|
||||
Query(const char * json) :
|
||||
obj(fromjson(json)) { }
|
||||
|
||||
/** Add a sort (ORDER BY) criteria to the query expression.
|
||||
@param sortPattern the sort order template. For example to order by name ascending, time descending:
|
||||
{ name : 1, ts : -1 }
|
||||
i.e.
|
||||
BSON( "name" << 1 << "ts" << -1 )
|
||||
or
|
||||
fromjson(" name : 1, ts : -1 ")
|
||||
*/
|
||||
Query& sort(const BSONObj& sortPattern);
|
||||
|
||||
/** Add a sort (ORDER BY) criteria to the query expression.
|
||||
This version of sort() assumes you want to sort on a single field.
|
||||
@param asc = 1 for ascending order
|
||||
asc = -1 for descending order
|
||||
*/
|
||||
Query& sort(const string &field, int asc = 1) { sort( BSON( field << asc ) ); return *this; }
|
||||
|
||||
/** Provide a hint to the query.
|
||||
@param keyPattern Key pattern for the index to use.
|
||||
Example:
|
||||
hint("{ts:1}")
|
||||
*/
|
||||
Query& hint(BSONObj keyPattern);
|
||||
Query& hint(const string &jsonKeyPatt) { return hint(fromjson(jsonKeyPatt)); }
|
||||
|
||||
/** Provide min and/or max index limits for the query.
|
||||
min <= x < max
|
||||
*/
|
||||
Query& minKey(const BSONObj &val);
|
||||
/**
|
||||
max is exclusive
|
||||
*/
|
||||
Query& maxKey(const BSONObj &val);
|
||||
|
||||
/** Return explain information about execution of this query instead of the actual query results.
|
||||
Normally it is easier to use the mongo shell to run db.find(...).explain().
|
||||
*/
|
||||
Query& explain();
|
||||
|
||||
/** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or objects missed, which were
|
||||
present at both the start and end of the query's execution (if an object is new during the query, or deleted during
|
||||
the query, it may or may not be returned, even with snapshot mode).
|
||||
|
||||
Note that short query responses (less than 1MB) are always effectively snapshotted.
|
||||
|
||||
Currently, snapshot mode may not be used with sorting or explicit hints.
|
||||
*/
|
||||
Query& snapshot();
|
||||
|
||||
/** Queries to the Mongo database support a $where parameter option which contains
|
||||
a javascript function that is evaluated to see whether objects being queried match
|
||||
its criteria. Use this helper to append such a function to a query object.
|
||||
Your query may also contain other traditional Mongo query terms.
|
||||
|
||||
@param jscode The javascript function to evaluate against each potential object
|
||||
match. The function must return true for matched objects. Use the this
|
||||
variable to inspect the current object.
|
||||
@param scope SavedContext for the javascript object. List in a BSON object any
|
||||
variables you would like defined when the jscode executes. One can think
|
||||
of these as "bind variables".
|
||||
|
||||
Examples:
|
||||
conn.findOne("test.coll", Query("{a:3}").where("this.b == 2 || this.c == 3"));
|
||||
Query badBalance = Query().where("this.debits - this.credits < 0");
|
||||
*/
|
||||
Query& where(const string &jscode, BSONObj scope);
|
||||
Query& where(const string &jscode) { return where(jscode, BSONObj()); }
|
||||
|
||||
/**
|
||||
* @return true if this query has an orderby, hint, or some other field
|
||||
*/
|
||||
bool isComplex( bool * hasDollar = 0 ) const;
|
||||
|
||||
BSONObj getFilter() const;
|
||||
BSONObj getSort() const;
|
||||
BSONObj getHint() const;
|
||||
bool isExplain() const;
|
||||
|
||||
string toString() const;
|
||||
operator string() const { return toString(); }
|
||||
private:
|
||||
void makeComplex();
|
||||
template< class T >
|
||||
void appendComplex( const char *fieldName, const T& val ) {
|
||||
makeComplex();
|
||||
BSONObjBuilder b;
|
||||
b.appendElements(obj);
|
||||
b.append(fieldName, val);
|
||||
obj = b.obj();
|
||||
}
|
||||
};
|
||||
|
||||
/** Typically one uses the QUERY(...) macro to construct a Query object.
|
||||
Example: QUERY( "age" << 33 << "school" << "UCLA" )
|
||||
*/
|
||||
#define QUERY(x) mongo::Query( BSON(x) )
|
||||
|
||||
/**
|
||||
interface that handles communication with the db
|
||||
*/
|
||||
class DBConnector {
|
||||
public:
|
||||
virtual ~DBConnector() {}
|
||||
/** actualServer is set to the actual server where they call went if there was a choice (SlaveOk) */
|
||||
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 ) = 0;
|
||||
virtual void say( Message &toSend, bool isRetry = false ) = 0;
|
||||
virtual void sayPiggyBack( Message &toSend ) = 0;
|
||||
/* used by QueryOption_Exhaust. To use that your subclass must implement this. */
|
||||
virtual bool recv( Message& m ) { assert(false); return false; }
|
||||
// In general, for lazy queries, we'll need to say, recv, then checkResponse
|
||||
virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) {
|
||||
if( retry ) *retry = false; if( targetHost ) *targetHost = "";
|
||||
}
|
||||
virtual bool lazySupported() const = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
The interface that any db connection should implement
|
||||
*/
|
||||
class DBClientInterface : boost::noncopyable {
|
||||
public:
|
||||
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
|
||||
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
|
||||
|
||||
virtual void insert( const string &ns, BSONObj obj , int flags=0) = 0;
|
||||
|
||||
virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0) = 0;
|
||||
|
||||
virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0;
|
||||
|
||||
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 ) = 0;
|
||||
|
||||
virtual ~DBClientInterface() { }
|
||||
|
||||
/**
|
||||
@return a single object that matches the query. if none do, then the object is empty
|
||||
@throws AssertionException
|
||||
*/
|
||||
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
|
||||
|
||||
/** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
|
||||
query() and iterate the cursor.
|
||||
*/
|
||||
void findN(vector<BSONObj>& out, const string&ns, Query query, int nToReturn, int nToSkip = 0, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
|
||||
|
||||
virtual string getServerAddress() const = 0;
|
||||
|
||||
/** don't use this - called automatically by DBClientCursor for you */
|
||||
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
DB "commands"
|
||||
Basically just invocations of connection.$cmd.findOne({...});
|
||||
*/
|
||||
class DBClientWithCommands : public DBClientInterface {
|
||||
set<string> _seenIndexes;
|
||||
public:
|
||||
/** controls how chatty the client is about network errors & such. See log.h */
|
||||
int _logLevel;
|
||||
|
||||
DBClientWithCommands() : _logLevel(0), _cachedAvailableOptions( (enum QueryOptions)0 ), _haveCachedAvailableOptions(false) { }
|
||||
|
||||
/** helper function. run a simple command where the command expression is simply
|
||||
{ command : 1 }
|
||||
@param info -- where to put result object. may be null if caller doesn't need that info
|
||||
@param command -- command name
|
||||
@return true if the command returned "ok".
|
||||
*/
|
||||
bool simpleCommand(const string &dbname, BSONObj *info, const string &command);
|
||||
|
||||
/** Run a database command. Database commands are represented as BSON objects. Common database
|
||||
commands have prebuilt helper functions -- see below. If a helper is not available you can
|
||||
directly call runCommand.
|
||||
|
||||
@param dbname database name. Use "admin" for global administrative commands.
|
||||
@param cmd the command object to execute. For example, { ismaster : 1 }
|
||||
@param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
|
||||
set.
|
||||
@param options see enum QueryOptions - normally not needed to run a command
|
||||
@return true if the command returned "ok".
|
||||
*/
|
||||
virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
|
||||
|
||||
/** Authorize access to a particular database.
|
||||
Authentication is separate for each database on the server -- you may authenticate for any
|
||||
number of databases on a single connection.
|
||||
The "admin" database is special and once authenticated provides access to all databases on the
|
||||
server.
|
||||
@param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
|
||||
@return true if successful
|
||||
*/
|
||||
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
|
||||
|
||||
/** count number of objects in collection ns that match the query criteria specified
|
||||
throws UserAssertion if database returns an error
|
||||
*/
|
||||
virtual unsigned long long count(const string &ns, const BSONObj& query = BSONObj(), int options=0, int limit=0, int skip=0 );
|
||||
|
||||
string createPasswordDigest( const string &username , const string &clearTextPassword );
|
||||
|
||||
/** returns true in isMaster parm if this db is the current master
|
||||
of a replica pair.
|
||||
|
||||
pass in info for more details e.g.:
|
||||
{ "ismaster" : 1.0 , "msg" : "not paired" , "ok" : 1.0 }
|
||||
|
||||
returns true if command invoked successfully.
|
||||
*/
|
||||
virtual bool isMaster(bool& isMaster, BSONObj *info=0);
|
||||
|
||||
/**
|
||||
Create a new collection in the database. Normally, collection creation is automatic. You would
|
||||
use this function if you wish to specify special options on creation.
|
||||
|
||||
If the collection already exists, no action occurs.
|
||||
|
||||
@param ns fully qualified collection name
|
||||
@param size desired initial extent size for the collection.
|
||||
Must be <= 1000000000 for normal collections.
|
||||
For fixed size (capped) collections, this size is the total/max size of the
|
||||
collection.
|
||||
@param capped if true, this is a fixed size collection (where old data rolls out).
|
||||
@param max maximum number of objects if capped (optional).
|
||||
|
||||
returns true if successful.
|
||||
*/
|
||||
bool createCollection(const string &ns, long long size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
|
||||
|
||||
/** Get error result from the last write operation (insert/update/delete) on this connection.
|
||||
@return error message text, or empty string if no error.
|
||||
*/
|
||||
string getLastError();
|
||||
|
||||
/** Get error result from the last write operation (insert/update/delete) on this connection.
|
||||
@return full error object.
|
||||
*/
|
||||
virtual BSONObj getLastErrorDetailed();
|
||||
|
||||
/** Can be called with the returned value from getLastErrorDetailed to extract an error string.
|
||||
If all you need is the string, just call getLastError() instead.
|
||||
*/
|
||||
static string getLastErrorString( const BSONObj& res );
|
||||
|
||||
/** Return the last error which has occurred, even if not the very last operation.
|
||||
|
||||
@return { err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }
|
||||
|
||||
result.err will be null if no error has occurred.
|
||||
*/
|
||||
BSONObj getPrevError();
|
||||
|
||||
/** Reset the previous error state for this connection (accessed via getLastError and
|
||||
getPrevError). Useful when performing several operations at once and then checking
|
||||
for an error after attempting all operations.
|
||||
*/
|
||||
bool resetError() { return simpleCommand("admin", 0, "reseterror"); }
|
||||
|
||||
/** Delete the specified collection. */
|
||||
virtual bool dropCollection( const string &ns ) {
|
||||
string db = nsGetDB( ns );
|
||||
string coll = nsGetCollection( ns );
|
||||
uassert( 10011 , "no collection name", coll.size() );
|
||||
|
||||
BSONObj info;
|
||||
|
||||
bool res = runCommand( db.c_str() , BSON( "drop" << coll ) , info );
|
||||
resetIndexCache();
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Perform a repair and compaction of the specified database. May take a long time to run. Disk space
|
||||
must be available equal to the size of the database while repairing.
|
||||
*/
|
||||
bool repairDatabase(const string &dbname, BSONObj *info = 0) {
|
||||
return simpleCommand(dbname, info, "repairDatabase");
|
||||
}
|
||||
|
||||
/** Copy database from one server or name to another server or name.
|
||||
|
||||
Generally, you should dropDatabase() first as otherwise the copied information will MERGE
|
||||
into whatever data is already present in this database.
|
||||
|
||||
For security reasons this function only works when you are authorized to access the "admin" db. However,
|
||||
if you have access to said db, you can copy any database from one place to another.
|
||||
TODO: this needs enhancement to be more flexible in terms of security.
|
||||
|
||||
This method provides a way to "rename" a database by copying it to a new db name and
|
||||
location. The copy is "repaired" and compacted.
|
||||
|
||||
fromdb database name from which to copy.
|
||||
todb database name to copy to.
|
||||
fromhost hostname of the database (and optionally, ":port") from which to
|
||||
copy the data. copies from self if "".
|
||||
|
||||
returns true if successful
|
||||
*/
|
||||
bool copyDatabase(const string &fromdb, const string &todb, const string &fromhost = "", BSONObj *info = 0);
|
||||
|
||||
/** The Mongo database provides built-in performance profiling capabilities. Uset setDbProfilingLevel()
|
||||
to enable. Profiling information is then written to the system.profiling collection, which one can
|
||||
then query.
|
||||
*/
|
||||
enum ProfilingLevel {
|
||||
ProfileOff = 0,
|
||||
ProfileSlow = 1, // log very slow (>100ms) operations
|
||||
ProfileAll = 2
|
||||
|
||||
};
|
||||
bool setDbProfilingLevel(const string &dbname, ProfilingLevel level, BSONObj *info = 0);
|
||||
bool getDbProfilingLevel(const string &dbname, ProfilingLevel& level, BSONObj *info = 0);
|
||||
|
||||
|
||||
/** This implicitly converts from char*, string, and BSONObj to be an argument to mapreduce
|
||||
You shouldn't need to explicitly construct this
|
||||
*/
|
||||
struct MROutput {
|
||||
MROutput(const char* collection) : out(BSON("replace" << collection)) {}
|
||||
MROutput(const string& collection) : out(BSON("replace" << collection)) {}
|
||||
MROutput(const BSONObj& obj) : out(obj) {}
|
||||
|
||||
BSONObj out;
|
||||
};
|
||||
static MROutput MRInline;
|
||||
|
||||
/** Run a map/reduce job on the server.
|
||||
|
||||
See http://www.mongodb.org/display/DOCS/MapReduce
|
||||
|
||||
ns namespace (db+collection name) of input data
|
||||
jsmapf javascript map function code
|
||||
jsreducef javascript reduce function code.
|
||||
query optional query filter for the input
|
||||
output either a string collection name or an object representing output type
|
||||
if not specified uses inline output type
|
||||
|
||||
returns a result object which contains:
|
||||
{ result : <collection_name>,
|
||||
numObjects : <number_of_objects_scanned>,
|
||||
timeMillis : <job_time>,
|
||||
ok : <1_if_ok>,
|
||||
[, err : <errmsg_if_error>]
|
||||
}
|
||||
|
||||
For example one might call:
|
||||
result.getField("ok").trueValue()
|
||||
on the result to check if ok.
|
||||
*/
|
||||
BSONObj mapreduce(const string &ns, const string &jsmapf, const string &jsreducef, BSONObj query = BSONObj(), MROutput output = MRInline);
|
||||
|
||||
/** Run javascript code on the database server.
|
||||
dbname database SavedContext in which the code runs. The javascript variable 'db' will be assigned
|
||||
to this database when the function is invoked.
|
||||
jscode source code for a javascript function.
|
||||
info the command object which contains any information on the invocation result including
|
||||
the return value and other information. If an error occurs running the jscode, error
|
||||
information will be in info. (try "out() << info.toString()")
|
||||
retValue return value from the jscode function.
|
||||
args args to pass to the jscode function. when invoked, the 'args' variable will be defined
|
||||
for use by the jscode.
|
||||
|
||||
returns true if runs ok.
|
||||
|
||||
See testDbEval() in dbclient.cpp for an example of usage.
|
||||
*/
|
||||
bool eval(const string &dbname, const string &jscode, BSONObj& info, BSONElement& retValue, BSONObj *args = 0);
|
||||
|
||||
/** validate a collection, checking for errors and reporting back statistics.
|
||||
this operation is slow and blocking.
|
||||
*/
|
||||
bool validate( const string &ns , bool scandata=true ) {
|
||||
BSONObj cmd = BSON( "validate" << nsGetCollection( ns ) << "scandata" << scandata );
|
||||
BSONObj info;
|
||||
return runCommand( nsGetDB( ns ).c_str() , cmd , info );
|
||||
}
|
||||
|
||||
/* The following helpers are simply more convenient forms of eval() for certain common cases */
|
||||
|
||||
/* invocation with no return value of interest -- with or without one simple parameter */
|
||||
bool eval(const string &dbname, const string &jscode);
|
||||
template< class T >
|
||||
bool eval(const string &dbname, const string &jscode, T parm1) {
|
||||
BSONObj info;
|
||||
BSONElement retValue;
|
||||
BSONObjBuilder b;
|
||||
b.append("0", parm1);
|
||||
BSONObj args = b.done();
|
||||
return eval(dbname, jscode, info, retValue, &args);
|
||||
}
|
||||
|
||||
/** eval invocation with one parm to server and one numeric field (either int or double) returned */
|
||||
template< class T, class NumType >
|
||||
bool eval(const string &dbname, const string &jscode, T parm1, NumType& ret) {
|
||||
BSONObj info;
|
||||
BSONElement retValue;
|
||||
BSONObjBuilder b;
|
||||
b.append("0", parm1);
|
||||
BSONObj args = b.done();
|
||||
if ( !eval(dbname, jscode, info, retValue, &args) )
|
||||
return false;
|
||||
ret = (NumType) retValue.number();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
get a list of all the current databases
|
||||
uses the { listDatabases : 1 } command.
|
||||
throws on error
|
||||
*/
|
||||
list<string> getDatabaseNames();
|
||||
|
||||
/**
|
||||
get a list of all the current collections in db
|
||||
*/
|
||||
list<string> getCollectionNames( const string& db );
|
||||
|
||||
bool exists( const string& ns );
|
||||
|
||||
/** Create an index if it does not already exist.
|
||||
ensureIndex calls are remembered so it is safe/fast to call this function many
|
||||
times in your code.
|
||||
@param ns collection to be indexed
|
||||
@param keys the "key pattern" for the index. e.g., { name : 1 }
|
||||
@param unique if true, indicates that key uniqueness should be enforced for this index
|
||||
@param name if not specified, it will be created from the keys automatically (which is recommended)
|
||||
@param cache if set to false, the index cache for the connection won't remember this call
|
||||
@param background build index in the background (see mongodb docs/wiki for details)
|
||||
@param v index version. leave at default value. (unit tests set this parameter.)
|
||||
@return whether or not sent message to db.
|
||||
should be true on first call, false on subsequent unless resetIndexCache was called
|
||||
*/
|
||||
virtual bool ensureIndex( const string &ns , BSONObj keys , bool unique = false, const string &name = "",
|
||||
bool cache = true, bool background = false, int v = -1 );
|
||||
|
||||
/**
|
||||
clears the index cache, so the subsequent call to ensureIndex for any index will go to the server
|
||||
*/
|
||||
virtual void resetIndexCache();
|
||||
|
||||
virtual auto_ptr<DBClientCursor> getIndexes( const string &ns );
|
||||
|
||||
virtual void dropIndex( const string& ns , BSONObj keys );
|
||||
virtual void dropIndex( const string& ns , const string& indexName );
|
||||
|
||||
/**
|
||||
drops all indexes for the collection
|
||||
*/
|
||||
virtual void dropIndexes( const string& ns );
|
||||
|
||||
virtual void reIndex( const string& ns );
|
||||
|
||||
string genIndexName( const BSONObj& keys );
|
||||
|
||||
/** Erase / drop an entire database */
|
||||
virtual bool dropDatabase(const string &dbname, BSONObj *info = 0) {
|
||||
bool ret = simpleCommand(dbname, info, "dropDatabase");
|
||||
resetIndexCache();
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual string toString() = 0;
|
||||
|
||||
/** @return the database name portion of an ns string */
|
||||
string nsGetDB( const string &ns ) {
|
||||
string::size_type pos = ns.find( "." );
|
||||
if ( pos == string::npos )
|
||||
return ns;
|
||||
|
||||
return ns.substr( 0 , pos );
|
||||
}
|
||||
|
||||
/** @return the collection name portion of an ns string */
|
||||
string nsGetCollection( const string &ns ) {
|
||||
string::size_type pos = ns.find( "." );
|
||||
if ( pos == string::npos )
|
||||
return "";
|
||||
|
||||
return ns.substr( pos + 1 );
|
||||
}
|
||||
|
||||
protected:
|
||||
/** if the result of a command is ok*/
|
||||
bool isOk(const BSONObj&);
|
||||
|
||||
/** if the element contains a not master error */
|
||||
bool isNotMasterErrorString( const BSONElement& e );
|
||||
|
||||
BSONObj _countCmd(const string &ns, const BSONObj& query, int options, int limit, int skip );
|
||||
|
||||
enum QueryOptions availableOptions();
|
||||
|
||||
private:
|
||||
enum QueryOptions _cachedAvailableOptions;
|
||||
bool _haveCachedAvailableOptions;
|
||||
};
|
||||
|
||||
/**
|
||||
abstract class that implements the core db operations
|
||||
*/
|
||||
class DBClientBase : public DBClientWithCommands, public DBConnector {
|
||||
protected:
|
||||
WriteConcern _writeConcern;
|
||||
|
||||
public:
|
||||
DBClientBase() {
|
||||
_writeConcern = W_NORMAL;
|
||||
}
|
||||
|
||||
WriteConcern getWriteConcern() const { return _writeConcern; }
|
||||
void setWriteConcern( WriteConcern w ) { _writeConcern = w; }
|
||||
|
||||
/** send a query to the database.
|
||||
@param ns namespace to query, format is <dbname>.<collectname>[.<collectname>]*
|
||||
@param query query to perform on the collection. this is a BSONObj (binary JSON)
|
||||
You may format as
|
||||
{ query: { ... }, orderby: { ... } }
|
||||
to specify a sort order.
|
||||
@param nToReturn n to return (i.e., limit). 0 = unlimited
|
||||
@param nToSkip start with the nth item
|
||||
@param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields
|
||||
@param queryOptions see options enum at top of this file
|
||||
|
||||
@return cursor. 0 if error (connection failure)
|
||||
@throws AssertionException
|
||||
*/
|
||||
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
|
||||
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
|
||||
|
||||
/** don't use this - called automatically by DBClientCursor for you
|
||||
@param cursorId id of cursor to retrieve
|
||||
@return an handle to a previously allocated cursor
|
||||
@throws AssertionException
|
||||
*/
|
||||
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 );
|
||||
|
||||
/**
|
||||
insert an object into the database
|
||||
*/
|
||||
virtual void insert( const string &ns , BSONObj obj , int flags=0);
|
||||
|
||||
/**
|
||||
insert a vector of objects into the database
|
||||
*/
|
||||
virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
|
||||
|
||||
/**
|
||||
remove matching objects from the database
|
||||
@param justOne if this true, then once a single match is found will stop
|
||||
*/
|
||||
virtual void remove( const string &ns , Query q , bool justOne = 0 );
|
||||
|
||||
/**
|
||||
updates objects matching query
|
||||
*/
|
||||
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = false , bool multi = false );
|
||||
|
||||
virtual bool isFailed() const = 0;
|
||||
|
||||
virtual void killCursor( long long cursorID ) = 0;
|
||||
|
||||
virtual bool callRead( Message& toSend , Message& response ) = 0;
|
||||
// virtual bool callWrite( Message& toSend , Message& response ) = 0; // TODO: add this if needed
|
||||
|
||||
virtual ConnectionString::ConnectionType type() const = 0;
|
||||
|
||||
virtual double getSoTimeout() const = 0;
|
||||
|
||||
}; // DBClientBase
|
||||
|
||||
class DBClientReplicaSet;
|
||||
|
||||
class ConnectException : public UserException {
|
||||
public:
|
||||
ConnectException(string msg) : UserException(9000,msg) { }
|
||||
};
|
||||
|
||||
/**
|
||||
A basic connection to the database.
|
||||
This is the main entry point for talking to a simple Mongo setup
|
||||
*/
|
||||
class DBClientConnection : public DBClientBase {
|
||||
public:
|
||||
/**
|
||||
@param _autoReconnect if true, automatically reconnect on a connection failure
|
||||
@param cp used by DBClientReplicaSet. You do not need to specify this parameter
|
||||
@param timeout tcp timeout in seconds - this is for read/write, not connect.
|
||||
Connect timeout is fixed, but short, at 5 seconds.
|
||||
*/
|
||||
DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, double so_timeout=0) :
|
||||
clientSet(cp), _failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _so_timeout(so_timeout) {
|
||||
_numConnections++;
|
||||
}
|
||||
|
||||
virtual ~DBClientConnection() {
|
||||
_numConnections--;
|
||||
}
|
||||
|
||||
/** Connect to a Mongo database server.
|
||||
|
||||
If autoReconnect is true, you can try to use the DBClientConnection even when
|
||||
false was returned -- it will try to connect again.
|
||||
|
||||
@param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
|
||||
If you use IPv6 you must add a port number ( ::1:27017 )
|
||||
@param errmsg any relevant error message will appended to the string
|
||||
@deprecated please use HostAndPort
|
||||
@return false if fails to connect.
|
||||
*/
|
||||
virtual bool connect(const char * hostname, string& errmsg) {
|
||||
// TODO: remove this method
|
||||
HostAndPort t( hostname );
|
||||
return connect( t , errmsg );
|
||||
}
|
||||
|
||||
/** Connect to a Mongo database server.
|
||||
|
||||
If autoReconnect is true, you can try to use the DBClientConnection even when
|
||||
false was returned -- it will try to connect again.
|
||||
|
||||
@param server server to connect to.
|
||||
@param errmsg any relevant error message will appended to the string
|
||||
@return false if fails to connect.
|
||||
*/
|
||||
virtual bool connect(const HostAndPort& server, string& errmsg);
|
||||
|
||||
/** Connect to a Mongo database server. Exception throwing version.
|
||||
Throws a UserException if cannot connect.
|
||||
|
||||
If autoReconnect is true, you can try to use the DBClientConnection even when
|
||||
false was returned -- it will try to connect again.
|
||||
|
||||
@param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
|
||||
*/
|
||||
void connect(const string& serverHostname) {
|
||||
string errmsg;
|
||||
if( !connect(HostAndPort(serverHostname), errmsg) )
|
||||
throw ConnectException(string("can't connect ") + errmsg);
|
||||
}
|
||||
|
||||
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
|
||||
|
||||
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query=Query(), int nToReturn = 0, int nToSkip = 0,
|
||||
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) {
|
||||
checkConnection();
|
||||
return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions , batchSize );
|
||||
}
|
||||
|
||||
/** Uses QueryOption_Exhaust
|
||||
Exhaust mode sends back all data queries as fast as possible, with no back-and-for for OP_GETMORE. If you are certain
|
||||
you will exhaust the query, it could be useful.
|
||||
|
||||
Use DBClientCursorBatchIterator version if you want to do items in large blocks, perhaps to avoid granular locking and such.
|
||||
*/
|
||||
unsigned long long query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
|
||||
unsigned long long query( boost::function<void(DBClientCursorBatchIterator&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
|
||||
|
||||
virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
|
||||
|
||||
/**
|
||||
@return true if this connection is currently in a failed state. When autoreconnect is on,
|
||||
a connection will transition back to an ok state after reconnecting.
|
||||
*/
|
||||
bool isFailed() const { return _failed; }
|
||||
|
||||
MessagingPort& port() { assert(p); return *p; }
|
||||
|
||||
string toStringLong() const {
|
||||
stringstream ss;
|
||||
ss << _serverString;
|
||||
if ( _failed ) ss << " failed";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/** Returns the address of the server */
|
||||
string toString() { return _serverString; }
|
||||
|
||||
string getServerAddress() const { return _serverString; }
|
||||
|
||||
virtual void killCursor( long long cursorID );
|
||||
virtual bool callRead( Message& toSend , Message& response ) { return call( toSend , response ); }
|
||||
virtual void say( Message &toSend, bool isRetry = false );
|
||||
virtual bool recv( Message& m );
|
||||
virtual void checkResponse( const char *data, int nReturned, bool* retry = NULL, string* host = NULL );
|
||||
virtual bool call( Message &toSend, Message &response, bool assertOk = true , string * actualServer = 0 );
|
||||
virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
|
||||
void setSoTimeout(double to) { _so_timeout = to; }
|
||||
double getSoTimeout() const { return _so_timeout; }
|
||||
|
||||
virtual bool lazySupported() const { return true; }
|
||||
|
||||
static int getNumConnections() {
|
||||
return _numConnections;
|
||||
}
|
||||
|
||||
static void setLazyKillCursor( bool lazy ) { _lazyKillCursor = lazy; }
|
||||
static bool getLazyKillCursor() { return _lazyKillCursor; }
|
||||
|
||||
protected:
|
||||
friend class SyncClusterConnection;
|
||||
virtual void sayPiggyBack( Message &toSend );
|
||||
|
||||
DBClientReplicaSet *clientSet;
|
||||
boost::scoped_ptr<MessagingPort> p;
|
||||
boost::scoped_ptr<SockAddr> server;
|
||||
bool _failed;
|
||||
const bool autoReconnect;
|
||||
time_t lastReconnectTry;
|
||||
HostAndPort _server; // remember for reconnects
|
||||
string _serverString;
|
||||
void _checkConnection();
|
||||
|
||||
// throws SocketException if in failed state and not reconnecting or if waiting to reconnect
|
||||
void checkConnection() { if( _failed ) _checkConnection(); }
|
||||
|
||||
map< string, pair<string,string> > authCache;
|
||||
double _so_timeout;
|
||||
bool _connect( string& errmsg );
|
||||
|
||||
static AtomicUInt _numConnections;
|
||||
static bool _lazyKillCursor; // lazy means we piggy back kill cursors on next op
|
||||
|
||||
#ifdef MONGO_SSL
|
||||
static SSLManager* sslManager();
|
||||
static SSLManager* _sslManager;
|
||||
#endif
|
||||
};
|
||||
|
||||
/** pings server to check if it's up
|
||||
*/
|
||||
bool serverAlive( const string &uri );
|
||||
|
||||
DBClientBase * createDirectClient();
|
||||
|
||||
BSONElement getErrField( const BSONObj& result );
|
||||
bool hasErrField( const BSONObj& result );
|
||||
|
||||
} // namespace mongo
|
||||
|
||||
#include "dbclientcursor.h"
|
||||
#include "dbclient_rs.h"
|
||||
#include "undef_macros.h"
|
||||
1097
client/dbclient_rs.cpp
Normal file
1097
client/dbclient_rs.cpp
Normal file
File diff suppressed because it is too large
Load diff
384
client/dbclient_rs.h
Normal file
384
client/dbclient_rs.h
Normal file
|
|
@ -0,0 +1,384 @@
|
|||
/** @file dbclient_rs.h Connect to a Replica Set, from C++ */
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../pch.h"
|
||||
#include "dbclient.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
class ReplicaSetMonitor;
|
||||
typedef shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr;
|
||||
typedef pair<set<string>,set<int> > NodeDiff;
|
||||
|
||||
/**
|
||||
* manages state about a replica set for client
|
||||
* keeps tabs on whose master and what slaves are up
|
||||
* can hand a slave to someone for SLAVE_OK
|
||||
* one instace per process per replica set
|
||||
* TODO: we might be able to use a regular Node * to avoid _lock
|
||||
*/
|
||||
class ReplicaSetMonitor {
|
||||
public:
|
||||
|
||||
typedef boost::function1<void,const ReplicaSetMonitor*> ConfigChangeHook;
|
||||
|
||||
/**
|
||||
* gets a cached Monitor per name or will create if doesn't exist
|
||||
*/
|
||||
static ReplicaSetMonitorPtr get( const string& name , const vector<HostAndPort>& servers );
|
||||
|
||||
/**
|
||||
* gets a cached Monitor per name or will return none if it doesn't exist
|
||||
*/
|
||||
static ReplicaSetMonitorPtr get( const string& name );
|
||||
|
||||
|
||||
/**
|
||||
* checks all sets for current master and new secondaries
|
||||
* usually only called from a BackgroundJob
|
||||
*/
|
||||
static void checkAll( bool checkAllSecondaries );
|
||||
|
||||
/**
|
||||
* this is called whenever the config of any repclia set changes
|
||||
* currently only 1 globally
|
||||
* asserts if one already exists
|
||||
* ownership passes to ReplicaSetMonitor and the hook will actually never be deleted
|
||||
*/
|
||||
static void setConfigChangeHook( ConfigChangeHook hook );
|
||||
|
||||
~ReplicaSetMonitor();
|
||||
|
||||
/** @return HostAndPort or throws an exception */
|
||||
HostAndPort getMaster();
|
||||
|
||||
/**
|
||||
* notify the monitor that server has faild
|
||||
*/
|
||||
void notifyFailure( const HostAndPort& server );
|
||||
|
||||
/** @return prev if its still ok, and if not returns a random slave that is ok for reads */
|
||||
HostAndPort getSlave( const HostAndPort& prev );
|
||||
|
||||
/** @return a random slave that is ok for reads */
|
||||
HostAndPort getSlave();
|
||||
|
||||
|
||||
/**
|
||||
* notify the monitor that server has faild
|
||||
*/
|
||||
void notifySlaveFailure( const HostAndPort& server );
|
||||
|
||||
/**
|
||||
* checks for current master and new secondaries
|
||||
*/
|
||||
void check( bool checkAllSecondaries );
|
||||
|
||||
string getName() const { return _name; }
|
||||
|
||||
string getServerAddress() const;
|
||||
|
||||
bool contains( const string& server ) const;
|
||||
|
||||
void appendInfo( BSONObjBuilder& b ) const;
|
||||
|
||||
private:
|
||||
/**
|
||||
* This populates a list of hosts from the list of seeds (discarding the
|
||||
* seed list).
|
||||
* @param name set name
|
||||
* @param servers seeds
|
||||
*/
|
||||
ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers );
|
||||
|
||||
/**
|
||||
* Checks all connections from the host list and sets the current
|
||||
* master.
|
||||
*
|
||||
* @param checkAllSecondaries if set to false, stop immediately when
|
||||
* the master is found or when _master is not -1.
|
||||
*/
|
||||
void _check( bool checkAllSecondaries );
|
||||
|
||||
/**
|
||||
* Use replSetGetStatus command to make sure hosts in host list are up
|
||||
* and readable. Sets Node::ok appropriately.
|
||||
*/
|
||||
void _checkStatus( const string& hostAddr );
|
||||
|
||||
/**
|
||||
* Add array of hosts to host list. Doesn't do anything if hosts are
|
||||
* already in host list.
|
||||
* @param hostList the list of hosts to add
|
||||
* @param changed if new hosts were added
|
||||
*/
|
||||
void _checkHosts(const BSONObj& hostList, bool& changed);
|
||||
|
||||
/**
|
||||
* Updates host list.
|
||||
* Invariant: if nodesOffset is >= 0, _nodes[nodesOffset].conn should be
|
||||
* equal to conn.
|
||||
*
|
||||
* @param conn the connection to check
|
||||
* @param maybePrimary OUT
|
||||
* @param verbose
|
||||
* @param nodesOffset - offset into _nodes array, -1 for not in it
|
||||
*
|
||||
* @return true if the connection is good or false if invariant
|
||||
* is broken
|
||||
*/
|
||||
bool _checkConnection( DBClientConnection* conn, string& maybePrimary,
|
||||
bool verbose, int nodesOffset );
|
||||
|
||||
string _getServerAddress_inlock() const;
|
||||
|
||||
NodeDiff _getHostDiff_inlock( const BSONObj& hostList );
|
||||
bool _shouldChangeHosts( const BSONObj& hostList, bool inlock );
|
||||
|
||||
/**
|
||||
* @return the index to _nodes corresponding to the server address.
|
||||
*/
|
||||
int _find( const string& server ) const ;
|
||||
int _find_inlock( const string& server ) const ;
|
||||
|
||||
/**
|
||||
* Checks whether the given connection matches the connection stored in _nodes.
|
||||
* Mainly used for sanity checking to confirm that nodeOffset still
|
||||
* refers to the right connection after releasing and reacquiring
|
||||
* a mutex.
|
||||
*/
|
||||
bool _checkConnMatch_inlock( DBClientConnection* conn, size_t nodeOffset ) const;
|
||||
|
||||
// protects _nodes and indices pointing to it (_master & _nextSlave)
|
||||
mutable mongo::mutex _lock;
|
||||
|
||||
/**
|
||||
* "Synchronizes" the _checkConnection method. Should ideally be one mutex per
|
||||
* connection object being used. The purpose of this lock is to make sure that
|
||||
* the reply from the connection the lock holder got is the actual response
|
||||
* to what it sent.
|
||||
*
|
||||
* Deadlock WARNING: never acquire this while holding _lock
|
||||
*/
|
||||
mutable mongo::mutex _checkConnectionLock;
|
||||
|
||||
string _name;
|
||||
struct Node {
|
||||
Node( const HostAndPort& a , DBClientConnection* c )
|
||||
: addr( a ) , conn(c) , ok( c != NULL ),
|
||||
ismaster(false), secondary( false ) , hidden( false ) , pingTimeMillis(0) {
|
||||
}
|
||||
|
||||
bool okForSecondaryQueries() const {
|
||||
return ok && secondary && ! hidden;
|
||||
}
|
||||
|
||||
BSONObj toBSON() const {
|
||||
return BSON( "addr" << addr.toString() <<
|
||||
"isMaster" << ismaster <<
|
||||
"secondary" << secondary <<
|
||||
"hidden" << hidden <<
|
||||
"ok" << ok );
|
||||
}
|
||||
|
||||
string toString() const {
|
||||
return toBSON().toString();
|
||||
}
|
||||
|
||||
HostAndPort addr;
|
||||
shared_ptr<DBClientConnection> conn;
|
||||
|
||||
// if this node is in a failure state
|
||||
// used for slave routing
|
||||
// this is too simple, should make it better
|
||||
bool ok;
|
||||
|
||||
// as reported by ismaster
|
||||
BSONObj lastIsMaster;
|
||||
|
||||
bool ismaster;
|
||||
bool secondary;
|
||||
bool hidden;
|
||||
|
||||
int pingTimeMillis;
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* Host list.
|
||||
*/
|
||||
vector<Node> _nodes;
|
||||
|
||||
int _master; // which node is the current master. -1 means no master is known
|
||||
int _nextSlave; // which node is the current slave
|
||||
|
||||
static mongo::mutex _setsLock; // protects _sets
|
||||
static map<string,ReplicaSetMonitorPtr> _sets; // set name to Monitor
|
||||
|
||||
static ConfigChangeHook _hook;
|
||||
};
|
||||
|
||||
/** Use this class to connect to a replica set of servers. The class will manage
|
||||
checking for which server in a replica set is master, and do failover automatically.
|
||||
|
||||
This can also be used to connect to replica pairs since pairs are a subset of sets
|
||||
|
||||
On a failover situation, expect at least one operation to return an error (throw
|
||||
an exception) before the failover is complete. Operations are not retried.
|
||||
*/
|
||||
class DBClientReplicaSet : public DBClientBase {
|
||||
|
||||
public:
|
||||
/** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
|
||||
DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout=0 );
|
||||
virtual ~DBClientReplicaSet();
|
||||
|
||||
/** Returns false if nomember of the set were reachable, or neither is
|
||||
* master, although,
|
||||
* when false returned, you can still try to use this connection object, it will
|
||||
* try reconnects.
|
||||
*/
|
||||
bool connect();
|
||||
|
||||
/** Authorize. Authorizes all nodes as needed
|
||||
*/
|
||||
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true );
|
||||
|
||||
// ----------- simple functions --------------
|
||||
|
||||
/** throws userassertion "no master found" */
|
||||
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
|
||||
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
|
||||
|
||||
/** throws userassertion "no master found" */
|
||||
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
|
||||
|
||||
virtual void insert( const string &ns , BSONObj obj , int flags=0);
|
||||
|
||||
/** insert multiple objects. Note that single object insert is asynchronous, so this version
|
||||
is only nominally faster and not worth a special effort to try to use. */
|
||||
virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
|
||||
|
||||
virtual void remove( const string &ns , Query obj , bool justOne = 0 );
|
||||
|
||||
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 );
|
||||
|
||||
virtual void killCursor( long long cursorID );
|
||||
|
||||
// ---- access raw connections ----
|
||||
|
||||
DBClientConnection& masterConn();
|
||||
DBClientConnection& slaveConn();
|
||||
|
||||
// ---- callback pieces -------
|
||||
|
||||
virtual void say( Message &toSend, bool isRetry = false );
|
||||
virtual bool recv( Message &toRecv );
|
||||
virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL );
|
||||
|
||||
/* this is the callback from our underlying connections to notify us that we got a "not master" error.
|
||||
*/
|
||||
void isntMaster();
|
||||
|
||||
/* this is used to indicate we got a "not master or secondary" error from a secondary.
|
||||
*/
|
||||
void isntSecondary();
|
||||
|
||||
// ----- status ------
|
||||
|
||||
virtual bool isFailed() const { return ! _master || _master->isFailed(); }
|
||||
|
||||
// ----- informational ----
|
||||
|
||||
double getSoTimeout() const { return _so_timeout; }
|
||||
|
||||
string toString() { return getServerAddress(); }
|
||||
|
||||
string getServerAddress() const { return _monitor->getServerAddress(); }
|
||||
|
||||
virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
|
||||
virtual bool lazySupported() const { return true; }
|
||||
|
||||
// ---- low level ------
|
||||
|
||||
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 );
|
||||
virtual bool callRead( Message& toSend , Message& response ) { return checkMaster()->callRead( toSend , response ); }
|
||||
|
||||
|
||||
protected:
|
||||
virtual void sayPiggyBack( Message &toSend ) { checkMaster()->say( toSend ); }
|
||||
|
||||
private:
|
||||
|
||||
// Used to simplify slave-handling logic on errors
|
||||
auto_ptr<DBClientCursor> checkSlaveQueryResult( auto_ptr<DBClientCursor> result );
|
||||
|
||||
DBClientConnection * checkMaster();
|
||||
DBClientConnection * checkSlave();
|
||||
|
||||
void _auth( DBClientConnection * conn );
|
||||
|
||||
ReplicaSetMonitorPtr _monitor;
|
||||
|
||||
HostAndPort _masterHost;
|
||||
scoped_ptr<DBClientConnection> _master;
|
||||
|
||||
HostAndPort _slaveHost;
|
||||
scoped_ptr<DBClientConnection> _slave;
|
||||
|
||||
double _so_timeout;
|
||||
|
||||
/**
|
||||
* for storing authentication info
|
||||
* fields are exactly for DBClientConnection::auth
|
||||
*/
|
||||
struct AuthInfo {
|
||||
AuthInfo( string d , string u , string p , bool di )
|
||||
: dbname( d ) , username( u ) , pwd( p ) , digestPassword( di ) {}
|
||||
string dbname;
|
||||
string username;
|
||||
string pwd;
|
||||
bool digestPassword;
|
||||
};
|
||||
|
||||
// we need to store so that when we connect to a new node on failure
|
||||
// we can re-auth
|
||||
// this could be a security issue, as the password is stored in memory
|
||||
// not sure if/how we should handle
|
||||
list<AuthInfo> _auths;
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* for storing (non-threadsafe) information between lazy calls
|
||||
*/
|
||||
class LazyState {
|
||||
public:
|
||||
LazyState() : _lastClient( NULL ), _lastOp( -1 ), _slaveOk( false ), _retries( 0 ) {}
|
||||
DBClientConnection* _lastClient;
|
||||
int _lastOp;
|
||||
bool _slaveOk;
|
||||
int _retries;
|
||||
|
||||
} _lazyState;
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
319
client/dbclientcursor.cpp
Normal file
319
client/dbclientcursor.cpp
Normal file
|
|
@ -0,0 +1,319 @@
|
|||
// dbclient.cpp - connect to a Mongo database as a database, from C++
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "pch.h"
|
||||
#include "dbclient.h"
|
||||
#include "../db/dbmessage.h"
|
||||
#include "../db/cmdline.h"
|
||||
#include "connpool.h"
|
||||
#include "../s/shard.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend );
|
||||
|
||||
int DBClientCursor::nextBatchSize() {
|
||||
|
||||
if ( nToReturn == 0 )
|
||||
return batchSize;
|
||||
|
||||
if ( batchSize == 0 )
|
||||
return nToReturn;
|
||||
|
||||
return batchSize < nToReturn ? batchSize : nToReturn;
|
||||
}
|
||||
|
||||
void DBClientCursor::_assembleInit( Message& toSend ) {
|
||||
if ( !cursorId ) {
|
||||
assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
|
||||
}
|
||||
else {
|
||||
BufBuilder b;
|
||||
b.appendNum( opts );
|
||||
b.appendStr( ns );
|
||||
b.appendNum( nToReturn );
|
||||
b.appendNum( cursorId );
|
||||
toSend.setData( dbGetMore, b.buf(), b.len() );
|
||||
}
|
||||
}
|
||||
|
||||
bool DBClientCursor::init() {
|
||||
Message toSend;
|
||||
_assembleInit( toSend );
|
||||
|
||||
if ( !_client->call( toSend, *b.m, false ) ) {
|
||||
// log msg temp?
|
||||
log() << "DBClientCursor::init call() failed" << endl;
|
||||
return false;
|
||||
}
|
||||
if ( b.m->empty() ) {
|
||||
// log msg temp?
|
||||
log() << "DBClientCursor::init message from call() was empty" << endl;
|
||||
return false;
|
||||
}
|
||||
dataReceived();
|
||||
return true;
|
||||
}
|
||||
|
||||
void DBClientCursor::initLazy( bool isRetry ) {
|
||||
verify( 15875 , _client->lazySupported() );
|
||||
Message toSend;
|
||||
_assembleInit( toSend );
|
||||
_client->say( toSend, isRetry );
|
||||
}
|
||||
|
||||
bool DBClientCursor::initLazyFinish( bool& retry ) {
|
||||
|
||||
bool recvd = _client->recv( *b.m );
|
||||
|
||||
// If we get a bad response, return false
|
||||
if ( ! recvd || b.m->empty() ) {
|
||||
|
||||
if( !recvd )
|
||||
log() << "DBClientCursor::init lazy say() failed" << endl;
|
||||
if( b.m->empty() )
|
||||
log() << "DBClientCursor::init message from say() was empty" << endl;
|
||||
|
||||
_client->checkResponse( NULL, -1, &retry, &_lazyHost );
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
dataReceived( retry, _lazyHost );
|
||||
return ! retry;
|
||||
}
|
||||
|
||||
void DBClientCursor::requestMore() {
|
||||
assert( cursorId && b.pos == b.nReturned );
|
||||
|
||||
if (haveLimit) {
|
||||
nToReturn -= b.nReturned;
|
||||
assert(nToReturn > 0);
|
||||
}
|
||||
BufBuilder b;
|
||||
b.appendNum(opts);
|
||||
b.appendStr(ns);
|
||||
b.appendNum(nextBatchSize());
|
||||
b.appendNum(cursorId);
|
||||
|
||||
Message toSend;
|
||||
toSend.setData(dbGetMore, b.buf(), b.len());
|
||||
auto_ptr<Message> response(new Message());
|
||||
|
||||
if ( _client ) {
|
||||
_client->call( toSend, *response );
|
||||
this->b.m = response;
|
||||
dataReceived();
|
||||
}
|
||||
else {
|
||||
assert( _scopedHost.size() );
|
||||
ScopedDbConnection conn( _scopedHost );
|
||||
conn->call( toSend , *response );
|
||||
_client = conn.get();
|
||||
this->b.m = response;
|
||||
dataReceived();
|
||||
_client = 0;
|
||||
conn.done();
|
||||
}
|
||||
}
|
||||
|
||||
/** with QueryOption_Exhaust, the server just blasts data at us (marked at end with cursorid==0). */
|
||||
void DBClientCursor::exhaustReceiveMore() {
|
||||
assert( cursorId && b.pos == b.nReturned );
|
||||
assert( !haveLimit );
|
||||
auto_ptr<Message> response(new Message());
|
||||
assert( _client );
|
||||
if ( _client->recv(*response) ) {
|
||||
b.m = response;
|
||||
dataReceived();
|
||||
}
|
||||
}
|
||||
|
||||
void DBClientCursor::dataReceived( bool& retry, string& host ) {
|
||||
|
||||
QueryResult *qr = (QueryResult *) b.m->singleData();
|
||||
resultFlags = qr->resultFlags();
|
||||
|
||||
if ( qr->resultFlags() & ResultFlag_ErrSet ) {
|
||||
wasError = true;
|
||||
}
|
||||
|
||||
if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
|
||||
// cursor id no longer valid at the server.
|
||||
assert( qr->cursorId == 0 );
|
||||
cursorId = 0; // 0 indicates no longer valid (dead)
|
||||
if ( ! ( opts & QueryOption_CursorTailable ) )
|
||||
throw UserException( 13127 , "getMore: cursor didn't exist on server, possible restart or timeout?" );
|
||||
}
|
||||
|
||||
if ( cursorId == 0 || ! ( opts & QueryOption_CursorTailable ) ) {
|
||||
// only set initially: we don't want to kill it on end of data
|
||||
// if it's a tailable cursor
|
||||
cursorId = qr->cursorId;
|
||||
}
|
||||
|
||||
b.nReturned = qr->nReturned;
|
||||
b.pos = 0;
|
||||
b.data = qr->data();
|
||||
|
||||
_client->checkResponse( b.data, b.nReturned, &retry, &host ); // watches for "not master"
|
||||
|
||||
/* this assert would fire the way we currently work:
|
||||
assert( nReturned || cursorId == 0 );
|
||||
*/
|
||||
}
|
||||
|
||||
/** If true, safe to call next(). Requests more from server if necessary. */
|
||||
bool DBClientCursor::more() {
|
||||
_assertIfNull();
|
||||
|
||||
if ( !_putBack.empty() )
|
||||
return true;
|
||||
|
||||
if (haveLimit && b.pos >= nToReturn)
|
||||
return false;
|
||||
|
||||
if ( b.pos < b.nReturned )
|
||||
return true;
|
||||
|
||||
if ( cursorId == 0 )
|
||||
return false;
|
||||
|
||||
requestMore();
|
||||
return b.pos < b.nReturned;
|
||||
}
|
||||
|
||||
BSONObj DBClientCursor::next() {
|
||||
DEV _assertIfNull();
|
||||
if ( !_putBack.empty() ) {
|
||||
BSONObj ret = _putBack.top();
|
||||
_putBack.pop();
|
||||
return ret;
|
||||
}
|
||||
|
||||
uassert(13422, "DBClientCursor next() called but more() is false", b.pos < b.nReturned);
|
||||
|
||||
b.pos++;
|
||||
BSONObj o(b.data);
|
||||
b.data += o.objsize();
|
||||
/* todo would be good to make data null at end of batch for safety */
|
||||
return o;
|
||||
}
|
||||
|
||||
void DBClientCursor::peek(vector<BSONObj>& v, int atMost) {
|
||||
int m = atMost;
|
||||
|
||||
/*
|
||||
for( stack<BSONObj>::iterator i = _putBack.begin(); i != _putBack.end(); i++ ) {
|
||||
if( m == 0 )
|
||||
return;
|
||||
v.push_back(*i);
|
||||
m--;
|
||||
n++;
|
||||
}
|
||||
*/
|
||||
|
||||
int p = b.pos;
|
||||
const char *d = b.data;
|
||||
while( m && p < b.nReturned ) {
|
||||
BSONObj o(d);
|
||||
d += o.objsize();
|
||||
p++;
|
||||
m--;
|
||||
v.push_back(o);
|
||||
}
|
||||
}
|
||||
|
||||
bool DBClientCursor::peekError(BSONObj* error){
|
||||
if( ! wasError ) return false;
|
||||
|
||||
vector<BSONObj> v;
|
||||
peek(v, 1);
|
||||
|
||||
assert( v.size() == 1 );
|
||||
assert( hasErrField( v[0] ) );
|
||||
|
||||
if( error ) *error = v[0].getOwned();
|
||||
return true;
|
||||
}
|
||||
|
||||
void DBClientCursor::attach( AScopedConnection * conn ) {
|
||||
assert( _scopedHost.size() == 0 );
|
||||
assert( conn );
|
||||
assert( conn->get() );
|
||||
|
||||
if ( conn->get()->type() == ConnectionString::SET ||
|
||||
conn->get()->type() == ConnectionString::SYNC ) {
|
||||
if( _lazyHost.size() > 0 )
|
||||
_scopedHost = _lazyHost;
|
||||
else if( _client )
|
||||
_scopedHost = _client->getServerAddress();
|
||||
else
|
||||
massert(14821, "No client or lazy client specified, cannot store multi-host connection.", false);
|
||||
}
|
||||
else {
|
||||
_scopedHost = conn->getHost();
|
||||
}
|
||||
|
||||
conn->done();
|
||||
_client = 0;
|
||||
_lazyHost = "";
|
||||
}
|
||||
|
||||
DBClientCursor::~DBClientCursor() {
|
||||
if (!this)
|
||||
return;
|
||||
|
||||
DESTRUCTOR_GUARD (
|
||||
|
||||
if ( cursorId && _ownCursor && ! inShutdown() ) {
|
||||
BufBuilder b;
|
||||
b.appendNum( (int)0 ); // reserved
|
||||
b.appendNum( (int)1 ); // number
|
||||
b.appendNum( cursorId );
|
||||
|
||||
Message m;
|
||||
m.setData( dbKillCursors , b.buf() , b.len() );
|
||||
|
||||
if ( _client ) {
|
||||
|
||||
// Kill the cursor the same way the connection itself would. Usually, non-lazily
|
||||
if( DBClientConnection::getLazyKillCursor() )
|
||||
_client->sayPiggyBack( m );
|
||||
else
|
||||
_client->say( m );
|
||||
|
||||
}
|
||||
else {
|
||||
assert( _scopedHost.size() );
|
||||
ScopedDbConnection conn( _scopedHost );
|
||||
|
||||
if( DBClientConnection::getLazyKillCursor() )
|
||||
conn->sayPiggyBack( m );
|
||||
else
|
||||
conn->say( m );
|
||||
|
||||
conn.done();
|
||||
}
|
||||
}
|
||||
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
} // namespace mongo
|
||||
|
|
@ -17,28 +17,26 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "mongo/pch.h"
|
||||
|
||||
#include "../pch.h"
|
||||
#include "../util/net/message.h"
|
||||
#include "../db/jsobj.h"
|
||||
#include "../db/json.h"
|
||||
#include <stack>
|
||||
|
||||
#include "mongo/client/dbclientinterface.h"
|
||||
#include "mongo/db/jsobj.h"
|
||||
#include "mongo/db/json.h"
|
||||
#include "mongo/util/net/message.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
class AScopedConnection;
|
||||
|
||||
/** for mock purposes only -- do not create variants of DBClientCursor, nor hang code here
|
||||
@see DBClientMockCursor
|
||||
*/
|
||||
class DBClientCursorInterface : boost::noncopyable {
|
||||
/** for mock purposes only -- do not create variants of DBClientCursor, nor hang code here */
|
||||
class DBClientCursorInterface {
|
||||
public:
|
||||
virtual ~DBClientCursorInterface() {}
|
||||
|
||||
virtual bool more() = 0;
|
||||
virtual BSONObj next() = 0;
|
||||
|
||||
// TODO bring more of the DBClientCursor interface to here
|
||||
|
||||
protected:
|
||||
DBClientCursorInterface() {}
|
||||
};
|
||||
|
|
@ -54,7 +52,7 @@ namespace mongo {
|
|||
if you want to exhaust whatever data has been fetched to the client already but
|
||||
then perhaps stop.
|
||||
*/
|
||||
int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + batch.nReturned - batch.pos; }
|
||||
int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + b.nReturned - b.pos; }
|
||||
bool moreInCurrentBatch() { return objsLeftInBatch() > 0; }
|
||||
|
||||
/** next
|
||||
|
|
@ -62,9 +60,6 @@ namespace mongo {
|
|||
on an error at the remote server, you will get back:
|
||||
{ $err: <string> }
|
||||
if you do not want to handle that yourself, call nextSafe().
|
||||
|
||||
Warning: The returned BSONObj will become invalid after the next batch
|
||||
is fetched or when this cursor is destroyed.
|
||||
*/
|
||||
BSONObj next();
|
||||
|
||||
|
|
@ -92,9 +87,6 @@ namespace mongo {
|
|||
*/
|
||||
void peek(vector<BSONObj>&, int atMost);
|
||||
|
||||
// Peeks at first element, if exists
|
||||
BSONObj peekFirst();
|
||||
|
||||
/**
|
||||
* peek ahead and see if an error occurred, and get the error if so.
|
||||
*/
|
||||
|
|
@ -141,11 +133,9 @@ namespace mongo {
|
|||
fieldsToReturn(_fieldsToReturn),
|
||||
opts(queryOptions),
|
||||
batchSize(bs==1?2:bs),
|
||||
resultFlags(0),
|
||||
cursorId(),
|
||||
_ownCursor( true ),
|
||||
wasError( false ) {
|
||||
_finishConsInit();
|
||||
}
|
||||
|
||||
DBClientCursor( DBClientBase* client, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
|
||||
|
|
@ -153,15 +143,9 @@ namespace mongo {
|
|||
ns(_ns),
|
||||
nToReturn( _nToReturn ),
|
||||
haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
|
||||
nToSkip(0),
|
||||
fieldsToReturn(0),
|
||||
opts( options ),
|
||||
batchSize(0),
|
||||
resultFlags(0),
|
||||
cursorId(_cursorId),
|
||||
_ownCursor(true),
|
||||
wasError(false) {
|
||||
_finishConsInit();
|
||||
_ownCursor( true ) {
|
||||
}
|
||||
|
||||
virtual ~DBClientCursor();
|
||||
|
|
@ -175,22 +159,6 @@ namespace mongo {
|
|||
|
||||
void attach( AScopedConnection * conn );
|
||||
|
||||
string originalHost() const { return _originalHost; }
|
||||
|
||||
string getns() const { return ns; }
|
||||
|
||||
Message* getMessage(){ return batch.m.get(); }
|
||||
|
||||
/**
|
||||
* Used mainly to run commands on connections that doesn't support lazy initialization and
|
||||
* does not support commands through the call interface.
|
||||
*
|
||||
* @param cmd The BSON representation of the command to send.
|
||||
*
|
||||
* @return true if command was sent successfully
|
||||
*/
|
||||
bool initCommand();
|
||||
|
||||
/**
|
||||
* actually does the query
|
||||
*/
|
||||
|
|
@ -214,11 +182,9 @@ namespace mongo {
|
|||
friend class DBClientConnection;
|
||||
|
||||
int nextBatchSize();
|
||||
void _finishConsInit();
|
||||
|
||||
Batch batch;
|
||||
Batch b;
|
||||
DBClientBase* _client;
|
||||
string _originalHost;
|
||||
string ns;
|
||||
BSONObj query;
|
||||
int nToReturn;
|
||||
|
|
@ -270,3 +236,4 @@ namespace mongo {
|
|||
|
||||
} // namespace mongo
|
||||
|
||||
#include "undef_macros.h"
|
||||
967
client/distlock.cpp
Normal file
967
client/distlock.cpp
Normal file
|
|
@ -0,0 +1,967 @@
|
|||
// @file distlock.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "pch.h"
|
||||
#include "dbclient.h"
|
||||
#include "distlock.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
LabeledLevel DistributedLock::logLvl( 1 );
|
||||
DistributedLock::LastPings DistributedLock::lastPings;
|
||||
|
||||
ThreadLocalValue<string> distLockIds("");
|
||||
|
||||
/* ==================
|
||||
* Module initialization
|
||||
*/
|
||||
|
||||
boost::once_flag _init = BOOST_ONCE_INIT;
|
||||
static string* _cachedProcessString = NULL;
|
||||
|
||||
static void initModule() {
|
||||
// cache process string
|
||||
stringstream ss;
|
||||
ss << getHostName() << ":" << cmdLine.port << ":" << time(0) << ":" << rand();
|
||||
_cachedProcessString = new string( ss.str() );
|
||||
}
|
||||
|
||||
/* =================== */
|
||||
|
||||
string getDistLockProcess() {
|
||||
boost::call_once( initModule, _init );
|
||||
assert( _cachedProcessString );
|
||||
return *_cachedProcessString;
|
||||
}
|
||||
|
||||
string getDistLockId() {
|
||||
string s = distLockIds.get();
|
||||
if ( s.empty() ) {
|
||||
stringstream ss;
|
||||
ss << getDistLockProcess() << ":" << getThreadName() << ":" << rand();
|
||||
s = ss.str();
|
||||
distLockIds.set( s );
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
class DistributedLockPinger {
|
||||
public:
|
||||
|
||||
DistributedLockPinger()
|
||||
: _mutex( "DistributedLockPinger" ) {
|
||||
}
|
||||
|
||||
void _distLockPingThread( ConnectionString addr, string process, unsigned long long sleepTime ) {
|
||||
|
||||
setThreadName( "LockPinger" );
|
||||
|
||||
string pingId = pingThreadId( addr, process );
|
||||
|
||||
log( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
|
||||
<< " and process " << process
|
||||
<< " (sleeping for " << sleepTime << "ms)" << endl;
|
||||
|
||||
static int loops = 0;
|
||||
while( ! inShutdown() && ! shouldKill( addr, process ) ) {
|
||||
|
||||
log( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
|
||||
|
||||
Date_t pingTime;
|
||||
|
||||
try {
|
||||
ScopedDbConnection conn( addr, 30.0 );
|
||||
|
||||
pingTime = jsTime();
|
||||
|
||||
// refresh the entry corresponding to this process in the lockpings collection
|
||||
conn->update( DistributedLock::lockPingNS ,
|
||||
BSON( "_id" << process ) ,
|
||||
BSON( "$set" << BSON( "ping" << pingTime ) ) ,
|
||||
true );
|
||||
|
||||
string err = conn->getLastError();
|
||||
if ( ! err.empty() ) {
|
||||
warning() << "pinging failed for distributed lock pinger '" << pingId << "'."
|
||||
<< causedBy( err ) << endl;
|
||||
conn.done();
|
||||
|
||||
// Sleep for normal ping time
|
||||
sleepmillis(sleepTime);
|
||||
continue;
|
||||
}
|
||||
|
||||
// remove really old entries from the lockpings collection if they're not holding a lock
|
||||
// (this may happen if an instance of a process was taken down and no new instance came up to
|
||||
// replace it for a quite a while)
|
||||
// if the lock is taken, the take-over mechanism should handle the situation
|
||||
auto_ptr<DBClientCursor> c = conn->query( DistributedLock::locksNS , BSONObj() );
|
||||
// TODO: Would be good to make clear whether query throws or returns empty on errors
|
||||
uassert( 16060, str::stream() << "cannot query locks collection on config server " << conn.getHost(), c.get() );
|
||||
|
||||
set<string> pids;
|
||||
while ( c->more() ) {
|
||||
BSONObj lock = c->next();
|
||||
if ( ! lock["process"].eoo() ) {
|
||||
pids.insert( lock["process"].valuestrsafe() );
|
||||
}
|
||||
}
|
||||
|
||||
Date_t fourDays = pingTime - ( 4 * 86400 * 1000 ); // 4 days
|
||||
conn->remove( DistributedLock::lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
|
||||
err = conn->getLastError();
|
||||
if ( ! err.empty() ) {
|
||||
warning() << "ping cleanup for distributed lock pinger '" << pingId << " failed."
|
||||
<< causedBy( err ) << endl;
|
||||
conn.done();
|
||||
|
||||
// Sleep for normal ping time
|
||||
sleepmillis(sleepTime);
|
||||
continue;
|
||||
}
|
||||
|
||||
// create index so remove is fast even with a lot of servers
|
||||
if ( loops++ == 0 ) {
|
||||
conn->ensureIndex( DistributedLock::lockPingNS , BSON( "ping" << 1 ) );
|
||||
}
|
||||
|
||||
log( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
|
||||
<< " by distributed lock pinger '" << pingId
|
||||
<< "', sleeping for " << sleepTime << "ms" << endl;
|
||||
|
||||
// Remove old locks, if possible
|
||||
// Make sure no one else is adding to this list at the same time
|
||||
scoped_lock lk( _mutex );
|
||||
|
||||
int numOldLocks = _oldLockOIDs.size();
|
||||
if( numOldLocks > 0 )
|
||||
log( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
|
||||
|
||||
bool removed = false;
|
||||
for( list<OID>::iterator i = _oldLockOIDs.begin(); i != _oldLockOIDs.end();
|
||||
i = ( removed ? _oldLockOIDs.erase( i ) : ++i ) ) {
|
||||
removed = false;
|
||||
try {
|
||||
// Got OID from lock with id, so we don't need to specify id again
|
||||
conn->update( DistributedLock::locksNS ,
|
||||
BSON( "ts" << *i ),
|
||||
BSON( "$set" << BSON( "state" << 0 ) ) );
|
||||
|
||||
// Either the update went through or it didn't, either way we're done trying to
|
||||
// unlock
|
||||
log( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
|
||||
removed = true;
|
||||
}
|
||||
catch( UpdateNotTheSame& ) {
|
||||
log( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
|
||||
removed = true;
|
||||
}
|
||||
catch ( std::exception& e) {
|
||||
warning() << "could not remove old distributed lock with ts " << *i
|
||||
<< causedBy( e ) << endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if( numOldLocks > 0 && _oldLockOIDs.size() > 0 ){
|
||||
log( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
|
||||
}
|
||||
|
||||
conn.done();
|
||||
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
warning() << "distributed lock pinger '" << pingId << "' detected an exception while pinging."
|
||||
<< causedBy( e ) << endl;
|
||||
}
|
||||
|
||||
sleepmillis(sleepTime);
|
||||
}
|
||||
|
||||
warning() << "removing distributed lock ping thread '" << pingId << "'" << endl;
|
||||
|
||||
|
||||
if( shouldKill( addr, process ) )
|
||||
finishKill( addr, process );
|
||||
|
||||
}
|
||||
|
||||
void distLockPingThread( ConnectionString addr, long long clockSkew, string processId, unsigned long long sleepTime ) {
|
||||
try {
|
||||
jsTimeVirtualThreadSkew( clockSkew );
|
||||
_distLockPingThread( addr, processId, sleepTime );
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
error() << "unexpected error while running distributed lock pinger for " << addr << ", process " << processId << causedBy( e ) << endl;
|
||||
}
|
||||
catch ( ... ) {
|
||||
error() << "unknown error while running distributed lock pinger for " << addr << ", process " << processId << endl;
|
||||
}
|
||||
}
|
||||
|
||||
string pingThreadId( const ConnectionString& conn, const string& processId ) {
|
||||
return conn.toString() + "/" + processId;
|
||||
}
|
||||
|
||||
string got( DistributedLock& lock, unsigned long long sleepTime ) {
|
||||
|
||||
// Make sure we don't start multiple threads for a process id
|
||||
scoped_lock lk( _mutex );
|
||||
|
||||
const ConnectionString& conn = lock.getRemoteConnection();
|
||||
const string& processId = lock.getProcessId();
|
||||
string s = pingThreadId( conn, processId );
|
||||
|
||||
// Ignore if we already have a pinging thread for this process.
|
||||
if ( _seen.count( s ) > 0 ) return s;
|
||||
|
||||
// Check our clock skew
|
||||
try {
|
||||
if( lock.isRemoteTimeSkewed() ) {
|
||||
throw LockException( str::stream() << "clock skew of the cluster " << conn.toString() << " is too far out of bounds to allow distributed locking." , 13650 );
|
||||
}
|
||||
}
|
||||
catch( LockException& e) {
|
||||
throw LockException( str::stream() << "error checking clock skew of cluster " << conn.toString() << causedBy( e ) , 13651);
|
||||
}
|
||||
|
||||
boost::thread t( boost::bind( &DistributedLockPinger::distLockPingThread, this, conn, getJSTimeVirtualThreadSkew(), processId, sleepTime) );
|
||||
|
||||
_seen.insert( s );
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
void addUnlockOID( const OID& oid ) {
|
||||
// Modifying the lock from some other thread
|
||||
scoped_lock lk( _mutex );
|
||||
_oldLockOIDs.push_back( oid );
|
||||
}
|
||||
|
||||
bool willUnlockOID( const OID& oid ) {
|
||||
scoped_lock lk( _mutex );
|
||||
return find( _oldLockOIDs.begin(), _oldLockOIDs.end(), oid ) != _oldLockOIDs.end();
|
||||
}
|
||||
|
||||
void kill( const ConnectionString& conn, const string& processId ) {
|
||||
// Make sure we're in a consistent state before other threads can see us
|
||||
scoped_lock lk( _mutex );
|
||||
|
||||
string pingId = pingThreadId( conn, processId );
|
||||
|
||||
assert( _seen.count( pingId ) > 0 );
|
||||
_kill.insert( pingId );
|
||||
|
||||
}
|
||||
|
||||
bool shouldKill( const ConnectionString& conn, const string& processId ) {
|
||||
return _kill.count( pingThreadId( conn, processId ) ) > 0;
|
||||
}
|
||||
|
||||
void finishKill( const ConnectionString& conn, const string& processId ) {
|
||||
// Make sure we're in a consistent state before other threads can see us
|
||||
scoped_lock lk( _mutex );
|
||||
|
||||
string pingId = pingThreadId( conn, processId );
|
||||
|
||||
_kill.erase( pingId );
|
||||
_seen.erase( pingId );
|
||||
|
||||
}
|
||||
|
||||
set<string> _kill;
|
||||
set<string> _seen;
|
||||
mongo::mutex _mutex;
|
||||
list<OID> _oldLockOIDs;
|
||||
|
||||
} distLockPinger;
|
||||
|
||||
|
||||
const string DistributedLock::lockPingNS = "config.lockpings";
|
||||
const string DistributedLock::locksNS = "config.locks";
|
||||
|
||||
/**
|
||||
* Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom sleep time is
|
||||
* specified (time between pings)
|
||||
*/
|
||||
DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout, bool asProcess )
|
||||
: _conn(conn) , _name(name) , _id( BSON( "_id" << name ) ), _processId( asProcess ? getDistLockId() : getDistLockProcess() ),
|
||||
_lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ), _maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ), _lockPing( _maxClockSkew ),
|
||||
_mutex( "DistributedLock" )
|
||||
{
|
||||
log( logLvl ) << "created new distributed lock for " << name << " on " << conn
|
||||
<< " ( lock timeout : " << _lockTimeout
|
||||
<< ", ping interval : " << _lockPing << ", process : " << asProcess << " )"
|
||||
<< endl;
|
||||
|
||||
|
||||
}
|
||||
|
||||
DistributedLock::PingData DistributedLock::LastPings::getLastPing( const ConnectionString& conn, const string& lockName ){
|
||||
scoped_lock lock( _mutex );
|
||||
return _lastPings[ std::pair< string, string >( conn.toString(), lockName ) ];
|
||||
}
|
||||
|
||||
void DistributedLock::LastPings::setLastPing( const ConnectionString& conn, const string& lockName, const PingData& pd ){
|
||||
scoped_lock lock( _mutex );
|
||||
_lastPings[ std::pair< string, string >( conn.toString(), lockName ) ] = pd;
|
||||
}
|
||||
|
||||
Date_t DistributedLock::getRemoteTime() {
|
||||
return DistributedLock::remoteTime( _conn, _maxNetSkew );
|
||||
}
|
||||
|
||||
bool DistributedLock::isRemoteTimeSkewed() {
|
||||
return !DistributedLock::checkSkew( _conn, NUM_LOCK_SKEW_CHECKS, _maxClockSkew, _maxNetSkew );
|
||||
}
|
||||
|
||||
const ConnectionString& DistributedLock::getRemoteConnection() {
|
||||
return _conn;
|
||||
}
|
||||
|
||||
const string& DistributedLock::getProcessId() {
|
||||
return _processId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the remote time as reported by the cluster or server. The maximum difference between the reported time
|
||||
* and the actual time on the remote server (at the completion of the function) is the maxNetSkew
|
||||
*/
|
||||
Date_t DistributedLock::remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew ) {
|
||||
|
||||
ConnectionString server( *cluster.getServers().begin() );
|
||||
ScopedDbConnection conn( server );
|
||||
|
||||
BSONObj result;
|
||||
long long delay;
|
||||
|
||||
try {
|
||||
Date_t then = jsTime();
|
||||
bool success = conn->runCommand( string("admin"), BSON( "serverStatus" << 1 ), result );
|
||||
delay = jsTime() - then;
|
||||
|
||||
if( !success )
|
||||
throw TimeNotFoundException( str::stream() << "could not get status from server "
|
||||
<< server.toString() << " in cluster " << cluster.toString()
|
||||
<< " to check time", 13647 );
|
||||
|
||||
// Make sure that our delay is not more than 2x our maximum network skew, since this is the max our remote
|
||||
// time value can be off by if we assume a response in the middle of the delay.
|
||||
if( delay > (long long) (maxNetSkew * 2) )
|
||||
throw TimeNotFoundException( str::stream() << "server " << server.toString()
|
||||
<< " in cluster " << cluster.toString()
|
||||
<< " did not respond within max network delay of "
|
||||
<< maxNetSkew << "ms", 13648 );
|
||||
}
|
||||
catch(...) {
|
||||
conn.done();
|
||||
throw;
|
||||
}
|
||||
|
||||
conn.done();
|
||||
|
||||
return result["localTime"].Date() - (delay / 2);
|
||||
|
||||
}
|
||||
|
||||
bool DistributedLock::checkSkew( const ConnectionString& cluster, unsigned skewChecks, unsigned long long maxClockSkew, unsigned long long maxNetSkew ) {
|
||||
|
||||
vector<HostAndPort> servers = cluster.getServers();
|
||||
|
||||
if(servers.size() < 1) return true;
|
||||
|
||||
vector<long long> avgSkews;
|
||||
|
||||
for(unsigned i = 0; i < skewChecks; i++) {
|
||||
|
||||
// Find the average skew for each server
|
||||
unsigned s = 0;
|
||||
for(vector<HostAndPort>::iterator si = servers.begin(); si != servers.end(); ++si,s++) {
|
||||
|
||||
if(i == 0) avgSkews.push_back(0);
|
||||
|
||||
// Could check if this is self, but shouldn't matter since local network connection should be fast.
|
||||
ConnectionString server( *si );
|
||||
|
||||
vector<long long> skew;
|
||||
|
||||
BSONObj result;
|
||||
|
||||
Date_t remote = remoteTime( server, maxNetSkew );
|
||||
Date_t local = jsTime();
|
||||
|
||||
// Remote time can be delayed by at most MAX_NET_SKEW
|
||||
|
||||
// Skew is how much time we'd have to add to local to get to remote
|
||||
avgSkews[s] += (long long) (remote - local);
|
||||
|
||||
log( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze skews
|
||||
|
||||
long long serverMaxSkew = 0;
|
||||
long long serverMinSkew = 0;
|
||||
|
||||
for(unsigned s = 0; s < avgSkews.size(); s++) {
|
||||
|
||||
long long avgSkew = (avgSkews[s] /= skewChecks);
|
||||
|
||||
// Keep track of max and min skews
|
||||
if(s == 0) {
|
||||
serverMaxSkew = avgSkew;
|
||||
serverMinSkew = avgSkew;
|
||||
}
|
||||
else {
|
||||
if(avgSkew > serverMaxSkew)
|
||||
serverMaxSkew = avgSkew;
|
||||
if(avgSkew < serverMinSkew)
|
||||
serverMinSkew = avgSkew;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
long long totalSkew = serverMaxSkew - serverMinSkew;
|
||||
|
||||
// Make sure our max skew is not more than our pre-set limit
|
||||
if(totalSkew > (long long) maxClockSkew) {
|
||||
log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
|
||||
return true;
|
||||
}
|
||||
|
||||
// For use in testing, ping thread should run indefinitely in practice.
|
||||
bool DistributedLock::killPinger( DistributedLock& lock ) {
|
||||
if( lock._threadId == "") return false;
|
||||
|
||||
distLockPinger.kill( lock._conn, lock._processId );
|
||||
return true;
|
||||
}
|
||||
|
||||
// Semantics of this method are basically that if the lock cannot be acquired, returns false, can be retried.
|
||||
// If the lock should not be tried again (some unexpected error) a LockException is thrown.
|
||||
// If we are only trying to re-enter a currently held lock, reenter should be true.
|
||||
// Note: reenter doesn't actually make this lock re-entrant in the normal sense, since it can still only
|
||||
// be unlocked once, instead it is used to verify that the lock is already held.
|
||||
bool DistributedLock::lock_try( const string& why , bool reenter, BSONObj * other ) {
|
||||
|
||||
// TODO: Start pinging only when we actually get the lock?
|
||||
// If we don't have a thread pinger, make sure we shouldn't have one
|
||||
if( _threadId == "" ){
|
||||
scoped_lock lk( _mutex );
|
||||
_threadId = distLockPinger.got( *this, _lockPing );
|
||||
}
|
||||
|
||||
// This should always be true, if not, we are using the lock incorrectly.
|
||||
assert( _name != "" );
|
||||
|
||||
log( logLvl ) << "trying to acquire new distributed lock for " << _name << " on " << _conn
|
||||
<< " ( lock timeout : " << _lockTimeout
|
||||
<< ", ping interval : " << _lockPing << ", process : " << _processId << " )"
|
||||
<< endl;
|
||||
|
||||
// write to dummy if 'other' is null
|
||||
BSONObj dummyOther;
|
||||
if ( other == NULL )
|
||||
other = &dummyOther;
|
||||
|
||||
ScopedDbConnection conn( _conn );
|
||||
|
||||
BSONObjBuilder queryBuilder;
|
||||
queryBuilder.appendElements( _id );
|
||||
queryBuilder.append( "state" , 0 );
|
||||
|
||||
{
|
||||
// make sure its there so we can use simple update logic below
|
||||
BSONObj o = conn->findOne( locksNS , _id ).getOwned();
|
||||
|
||||
// Case 1: No locks
|
||||
if ( o.isEmpty() ) {
|
||||
try {
|
||||
log( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
|
||||
conn->insert( locksNS , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
|
||||
}
|
||||
catch ( UserException& e ) {
|
||||
warning() << "could not insert initial doc for distributed lock " << _name << causedBy( e ) << endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Case 2: A set lock that we might be able to force
|
||||
else if ( o["state"].numberInt() > 0 ) {
|
||||
|
||||
string lockName = o["_id"].String() + string("/") + o["process"].String();
|
||||
|
||||
bool canReenter = reenter && o["process"].String() == _processId && ! distLockPinger.willUnlockOID( o["ts"].OID() ) && o["state"].numberInt() == 2;
|
||||
if( reenter && ! canReenter ) {
|
||||
log( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
|
||||
if( o["process"].String() != _processId ) log( logLvl - 1 ) << ", different process " << _processId << endl;
|
||||
else if( o["state"].numberInt() == 2 ) log( logLvl - 1 ) << ", state not finalized" << endl;
|
||||
else log( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
|
||||
|
||||
// reset since we've been bounced by a previous lock not being where we thought it was,
|
||||
// and should go through full forcing process if required.
|
||||
// (in theory we should never see a ping here if used correctly)
|
||||
*other = o; other->getOwned(); conn.done(); resetLastPing();
|
||||
return false;
|
||||
}
|
||||
|
||||
BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
|
||||
if ( lastPing.isEmpty() ) {
|
||||
log( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
|
||||
// TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
|
||||
lastPing = BSON( "_id" << o["process"].String() << "ping" << (Date_t) 0 );
|
||||
}
|
||||
|
||||
unsigned long long elapsed = 0;
|
||||
unsigned long long takeover = _lockTimeout;
|
||||
PingData _lastPingCheck = getLastPing();
|
||||
|
||||
log( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.get<0>() << " and ping " << _lastPingCheck.get<1>() << endl;
|
||||
|
||||
try {
|
||||
|
||||
Date_t remote = remoteTime( _conn );
|
||||
|
||||
// Timeout the elapsed time using comparisons of remote clock
|
||||
// For non-finalized locks, timeout 15 minutes since last seen (ts)
|
||||
// For finalized locks, timeout 15 minutes since last ping
|
||||
bool recPingChange = o["state"].numberInt() == 2 && ( _lastPingCheck.get<0>() != lastPing["_id"].String() || _lastPingCheck.get<1>() != lastPing["ping"].Date() );
|
||||
bool recTSChange = _lastPingCheck.get<3>() != o["ts"].OID();
|
||||
|
||||
if( recPingChange || recTSChange ) {
|
||||
// If the ping has changed since we last checked, mark the current date and time
|
||||
setLastPing( PingData( lastPing["_id"].String().c_str(), lastPing["ping"].Date(), remote, o["ts"].OID() ) );
|
||||
}
|
||||
else {
|
||||
|
||||
// GOTCHA! Due to network issues, it is possible that the current time
|
||||
// is less than the remote time. We *have* to check this here, otherwise
|
||||
// we overflow and our lock breaks.
|
||||
if(_lastPingCheck.get<2>() >= remote)
|
||||
elapsed = 0;
|
||||
else
|
||||
elapsed = remote - _lastPingCheck.get<2>();
|
||||
}
|
||||
}
|
||||
catch( LockException& e ) {
|
||||
|
||||
// Remote server cannot be found / is not responsive
|
||||
warning() << "Could not get remote time from " << _conn << causedBy( e );
|
||||
// If our config server is having issues, forget all the pings until we can see it again
|
||||
resetLastPing();
|
||||
|
||||
}
|
||||
|
||||
if ( elapsed <= takeover && ! canReenter ) {
|
||||
log( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
|
||||
*other = o; other->getOwned(); conn.done();
|
||||
return false;
|
||||
}
|
||||
else if( elapsed > takeover && canReenter ) {
|
||||
log( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
|
||||
*other = o; other->getOwned(); conn.done();
|
||||
return false;
|
||||
}
|
||||
|
||||
log( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
|
||||
<< ( canReenter ? "re-entering is allowed, " : "" )
|
||||
<< "elapsed time " << elapsed << " > takeover time " << takeover << endl;
|
||||
|
||||
if( elapsed > takeover ) {
|
||||
|
||||
// Lock may forced, reset our timer if succeeds or fails
|
||||
// Ensures that another timeout must happen if something borks up here, and resets our pristine
|
||||
// ping state if acquired.
|
||||
resetLastPing();
|
||||
|
||||
try {
|
||||
|
||||
// Check the clock skew again. If we check this before we get a lock
|
||||
// and after the lock times out, we can be pretty sure the time is
|
||||
// increasing at the same rate on all servers and therefore our
|
||||
// timeout is accurate
|
||||
uassert( 14023, str::stream() << "remote time in cluster " << _conn.toString() << " is now skewed, cannot force lock.", !isRemoteTimeSkewed() );
|
||||
|
||||
// Make sure we break the lock with the correct "ts" (OID) value, otherwise
|
||||
// we can overwrite a new lock inserted in the meantime.
|
||||
conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << o["state"].numberInt() << "ts" << o["ts"] ),
|
||||
BSON( "$set" << BSON( "state" << 0 ) ) );
|
||||
|
||||
BSONObj err = conn->getLastErrorDetailed();
|
||||
string errMsg = DBClientWithCommands::getLastErrorString(err);
|
||||
|
||||
// TODO: Clean up all the extra code to exit this method, probably with a refactor
|
||||
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
|
||||
( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
|
||||
<< ( !errMsg.empty() ? causedBy(errMsg) : string("(another force won)") ) << endl;
|
||||
*other = o; other->getOwned(); conn.done();
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
catch( UpdateNotTheSame& ) {
|
||||
// Ok to continue since we know we forced at least one lock document, and all lock docs
|
||||
// are required for a lock to be held.
|
||||
warning() << "lock forcing " << lockName << " inconsistent" << endl;
|
||||
}
|
||||
catch( std::exception& e ) {
|
||||
conn.done();
|
||||
throw LockException( str::stream() << "exception forcing distributed lock "
|
||||
<< lockName << causedBy( e ), 13660);
|
||||
}
|
||||
|
||||
}
|
||||
else {
|
||||
|
||||
assert( canReenter );
|
||||
|
||||
// Lock may be re-entered, reset our timer if succeeds or fails
|
||||
// Not strictly necessary, but helpful for small timeouts where thread scheduling is significant.
|
||||
// This ensures that two attempts are still required for a force if not acquired, and resets our
|
||||
// state if we are acquired.
|
||||
resetLastPing();
|
||||
|
||||
// Test that the lock is held by trying to update the finalized state of the lock to the same state
|
||||
// if it does not update or does not update on all servers, we can't re-enter.
|
||||
try {
|
||||
|
||||
// Test the lock with the correct "ts" (OID) value
|
||||
conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << 2 << "ts" << o["ts"] ),
|
||||
BSON( "$set" << BSON( "state" << 2 ) ) );
|
||||
|
||||
BSONObj err = conn->getLastErrorDetailed();
|
||||
string errMsg = DBClientWithCommands::getLastErrorString(err);
|
||||
|
||||
// TODO: Clean up all the extra code to exit this method, probably with a refactor
|
||||
if ( ! errMsg.empty() || ! err["n"].type() || err["n"].numberInt() < 1 ) {
|
||||
( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
|
||||
<< ( !errMsg.empty() ? causedBy(errMsg) : string("(not sure lock is held)") )
|
||||
<< " gle: " << err
|
||||
<< endl;
|
||||
*other = o; other->getOwned(); conn.done();
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
catch( UpdateNotTheSame& ) {
|
||||
// NOT ok to continue since our lock isn't held by all servers, so isn't valid.
|
||||
warning() << "inconsistent state re-entering lock, lock " << lockName << " not held" << endl;
|
||||
*other = o; other->getOwned(); conn.done();
|
||||
return false;
|
||||
}
|
||||
catch( std::exception& e ) {
|
||||
conn.done();
|
||||
throw LockException( str::stream() << "exception re-entering distributed lock "
|
||||
<< lockName << causedBy( e ), 13660);
|
||||
}
|
||||
|
||||
log( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
|
||||
*other = o; other->getOwned(); conn.done();
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
log( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
|
||||
|
||||
// We don't need the ts value in the query, since we will only ever replace locks with state=0.
|
||||
}
|
||||
// Case 3: We have an expired lock
|
||||
else if ( o["ts"].type() ) {
|
||||
queryBuilder.append( o["ts"] );
|
||||
}
|
||||
}
|
||||
|
||||
// Always reset our ping if we're trying to get a lock, since getting a lock implies the lock state is open
|
||||
// and no locks need to be forced. If anything goes wrong, we don't want to remember an old lock.
|
||||
resetLastPing();
|
||||
|
||||
bool gotLock = false;
|
||||
BSONObj currLock;
|
||||
|
||||
BSONObj lockDetails = BSON( "state" << 1 << "who" << getDistLockId() << "process" << _processId <<
|
||||
"when" << jsTime() << "why" << why << "ts" << OID::gen() );
|
||||
BSONObj whatIWant = BSON( "$set" << lockDetails );
|
||||
|
||||
BSONObj query = queryBuilder.obj();
|
||||
|
||||
string lockName = _name + string("/") + _processId;
|
||||
|
||||
try {
|
||||
|
||||
// Main codepath to acquire lock
|
||||
|
||||
log( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
|
||||
<< lockDetails.jsonString(Strict, true) << "\n"
|
||||
<< query.jsonString(Strict, true) << endl;
|
||||
|
||||
conn->update( locksNS , query , whatIWant );
|
||||
|
||||
BSONObj err = conn->getLastErrorDetailed();
|
||||
string errMsg = DBClientWithCommands::getLastErrorString(err);
|
||||
|
||||
currLock = conn->findOne( locksNS , _id );
|
||||
|
||||
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
|
||||
( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
|
||||
<< ( !errMsg.empty() ? causedBy( errMsg ) : string("(another update won)") ) << endl;
|
||||
*other = currLock;
|
||||
other->getOwned();
|
||||
gotLock = false;
|
||||
}
|
||||
else {
|
||||
gotLock = true;
|
||||
}
|
||||
|
||||
}
|
||||
catch ( UpdateNotTheSame& up ) {
|
||||
|
||||
// this means our update got through on some, but not others
|
||||
warning() << "distributed lock '" << lockName << " did not propagate properly." << causedBy( up ) << endl;
|
||||
|
||||
// Overall protection derives from:
|
||||
// All unlocking updates use the ts value when setting state to 0
|
||||
// This ensures that during locking, we can override all smaller ts locks with
|
||||
// our own safe ts value and not be unlocked afterward.
|
||||
for ( unsigned i = 0; i < up.size(); i++ ) {
|
||||
|
||||
ScopedDbConnection indDB( up[i].first );
|
||||
BSONObj indUpdate;
|
||||
|
||||
try {
|
||||
|
||||
indUpdate = indDB->findOne( locksNS , _id );
|
||||
|
||||
// If we override this lock in any way, grab and protect it.
|
||||
// We assume/ensure that if a process does not have all lock documents, it is no longer
|
||||
// holding the lock.
|
||||
// Note - finalized locks may compete too, but we know they've won already if competing
|
||||
// in this round. Cleanup of crashes during finalizing may take a few tries.
|
||||
if( indUpdate["ts"] < lockDetails["ts"] || indUpdate["state"].numberInt() == 0 ) {
|
||||
|
||||
BSONObj grabQuery = BSON( "_id" << _id["_id"].String() << "ts" << indUpdate["ts"].OID() );
|
||||
|
||||
// Change ts so we won't be forced, state so we won't be relocked
|
||||
BSONObj grabChanges = BSON( "ts" << lockDetails["ts"].OID() << "state" << 1 );
|
||||
|
||||
// Either our update will succeed, and we'll grab the lock, or it will fail b/c some other
|
||||
// process grabbed the lock (which will change the ts), but the lock will be set until forcing
|
||||
indDB->update( locksNS, grabQuery, BSON( "$set" << grabChanges ) );
|
||||
|
||||
indUpdate = indDB->findOne( locksNS, _id );
|
||||
|
||||
// Our lock should now be set until forcing.
|
||||
assert( indUpdate["state"].numberInt() == 1 );
|
||||
|
||||
}
|
||||
// else our lock is the same, in which case we're safe, or it's a bigger lock,
|
||||
// in which case we won't need to protect anything since we won't have the lock.
|
||||
|
||||
}
|
||||
catch( std::exception& e ) {
|
||||
conn.done();
|
||||
throw LockException( str::stream() << "distributed lock " << lockName
|
||||
<< " had errors communicating with individual server "
|
||||
<< up[1].first << causedBy( e ), 13661 );
|
||||
}
|
||||
|
||||
assert( !indUpdate.isEmpty() );
|
||||
|
||||
// Find max TS value
|
||||
if ( currLock.isEmpty() || currLock["ts"] < indUpdate["ts"] ) {
|
||||
currLock = indUpdate.getOwned();
|
||||
}
|
||||
|
||||
indDB.done();
|
||||
|
||||
}
|
||||
|
||||
// Locks on all servers are now set and safe until forcing
|
||||
|
||||
if ( currLock["ts"] == lockDetails["ts"] ) {
|
||||
log( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
|
||||
gotLock = true;
|
||||
}
|
||||
else {
|
||||
log( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
|
||||
|
||||
// Register the lock for deletion, to speed up failover
|
||||
// Not strictly necessary, but helpful
|
||||
distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
|
||||
|
||||
gotLock = false;
|
||||
}
|
||||
}
|
||||
catch( std::exception& e ) {
|
||||
conn.done();
|
||||
throw LockException( str::stream() << "exception creating distributed lock "
|
||||
<< lockName << causedBy( e ), 13663 );
|
||||
}
|
||||
|
||||
// Complete lock propagation
|
||||
if( gotLock ) {
|
||||
|
||||
// This is now safe, since we know that no new locks will be placed on top of the ones we've checked for at
|
||||
// least 15 minutes. Sets the state = 2, so that future clients can determine that the lock is truly set.
|
||||
// The invariant for rollbacks is that we will never force locks with state = 2 and active pings, since that
|
||||
// indicates the lock is active, but this means the process creating/destroying them must explicitly poll
|
||||
// when something goes wrong.
|
||||
try {
|
||||
|
||||
BSONObjBuilder finalLockDetails;
|
||||
BSONObjIterator bi( lockDetails );
|
||||
while( bi.more() ) {
|
||||
BSONElement el = bi.next();
|
||||
if( (string) ( el.fieldName() ) == "state" )
|
||||
finalLockDetails.append( "state", 2 );
|
||||
else finalLockDetails.append( el );
|
||||
}
|
||||
|
||||
conn->update( locksNS , _id , BSON( "$set" << finalLockDetails.obj() ) );
|
||||
|
||||
BSONObj err = conn->getLastErrorDetailed();
|
||||
string errMsg = DBClientWithCommands::getLastErrorString(err);
|
||||
|
||||
currLock = conn->findOne( locksNS , _id );
|
||||
|
||||
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
|
||||
warning() << "could not finalize winning lock " << lockName
|
||||
<< ( !errMsg.empty() ? causedBy( errMsg ) : " (did not update lock) " ) << endl;
|
||||
gotLock = false;
|
||||
}
|
||||
else {
|
||||
// SUCCESS!
|
||||
gotLock = true;
|
||||
}
|
||||
|
||||
}
|
||||
catch( std::exception& e ) {
|
||||
conn.done();
|
||||
|
||||
// Register the bad final lock for deletion, in case it exists
|
||||
distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
|
||||
|
||||
throw LockException( str::stream() << "exception finalizing winning lock"
|
||||
<< causedBy( e ), 13662 );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
*other = currLock;
|
||||
other->getOwned();
|
||||
|
||||
// Log our lock results
|
||||
if(gotLock)
|
||||
log( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
|
||||
else
|
||||
log( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
|
||||
|
||||
conn.done();
|
||||
|
||||
return gotLock;
|
||||
}
|
||||
|
||||
// Unlock now takes an optional pointer to the lock, so you can be specific about which
|
||||
// particular lock you want to unlock. This is required when the config server is down,
|
||||
// and so cannot tell you what lock ts you should try later.
|
||||
void DistributedLock::unlock( BSONObj* oldLockPtr ) {
|
||||
|
||||
assert( _name != "" );
|
||||
|
||||
string lockName = _name + string("/") + _processId;
|
||||
|
||||
const int maxAttempts = 3;
|
||||
int attempted = 0;
|
||||
|
||||
BSONObj oldLock;
|
||||
if( oldLockPtr ) oldLock = *oldLockPtr;
|
||||
|
||||
while ( ++attempted <= maxAttempts ) {
|
||||
|
||||
ScopedDbConnection conn( _conn );
|
||||
|
||||
try {
|
||||
|
||||
if( oldLock.isEmpty() )
|
||||
oldLock = conn->findOne( locksNS, _id );
|
||||
|
||||
if( oldLock["state"].eoo() || oldLock["state"].numberInt() != 2 || oldLock["ts"].eoo() ) {
|
||||
warning() << "cannot unlock invalid distributed lock " << oldLock << endl;
|
||||
conn.done();
|
||||
break;
|
||||
}
|
||||
|
||||
// Use ts when updating lock, so that new locks can be sure they won't get trampled.
|
||||
conn->update( locksNS ,
|
||||
BSON( "_id" << _id["_id"].String() << "ts" << oldLock["ts"].OID() ),
|
||||
BSON( "$set" << BSON( "state" << 0 ) ) );
|
||||
|
||||
// Check that the lock was actually unlocked... if not, try again
|
||||
BSONObj err = conn->getLastErrorDetailed();
|
||||
string errMsg = DBClientWithCommands::getLastErrorString(err);
|
||||
|
||||
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ){
|
||||
warning() << "distributed lock unlock update failed, retrying "
|
||||
<< ( errMsg.empty() ? causedBy( "( update not registered )" ) : causedBy( errMsg ) ) << endl;
|
||||
conn.done();
|
||||
continue;
|
||||
}
|
||||
|
||||
log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
|
||||
conn.done();
|
||||
return;
|
||||
}
|
||||
catch( UpdateNotTheSame& ) {
|
||||
log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
|
||||
conn.done();
|
||||
break;
|
||||
}
|
||||
catch ( std::exception& e) {
|
||||
warning() << "distributed lock '" << lockName << "' failed unlock attempt."
|
||||
<< causedBy( e ) << endl;
|
||||
|
||||
conn.done();
|
||||
// TODO: If our lock timeout is small, sleeping this long may be unsafe.
|
||||
if( attempted != maxAttempts) sleepsecs(1 << attempted);
|
||||
}
|
||||
}
|
||||
|
||||
if( attempted > maxAttempts && ! oldLock.isEmpty() && ! oldLock["ts"].eoo() ) {
|
||||
|
||||
log( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
|
||||
<< ", will attempt again later" << endl;
|
||||
|
||||
// We couldn't unlock the lock at all, so try again later in the pinging thread...
|
||||
distLockPinger.addUnlockOID( oldLock["ts"].OID() );
|
||||
}
|
||||
else if( attempted > maxAttempts ) {
|
||||
warning() << "could not unlock untracked distributed lock, a manual force may be required" << endl;
|
||||
}
|
||||
|
||||
warning() << "distributed lock '" << lockName << "' couldn't consummate unlock request. "
|
||||
<< "lock may be taken over after " << ( _lockTimeout / (60 * 1000) )
|
||||
<< " minutes timeout." << endl;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
244
client/distlock.h
Normal file
244
client/distlock.h
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
// distlock.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../pch.h"
|
||||
#include "dbclient.h"
|
||||
#include "connpool.h"
|
||||
#include "redef_macros.h"
|
||||
#include "syncclusterconnection.h"
|
||||
|
||||
#define LOCK_TIMEOUT (15 * 60 * 1000)
|
||||
#define LOCK_SKEW_FACTOR (30)
|
||||
#define LOCK_PING (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
|
||||
#define MAX_LOCK_NET_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
|
||||
#define MAX_LOCK_CLOCK_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
|
||||
#define NUM_LOCK_SKEW_CHECKS (3)
|
||||
|
||||
// The maximum clock skew we need to handle between config servers is
|
||||
// 2 * MAX_LOCK_NET_SKEW + MAX_LOCK_CLOCK_SKEW.
|
||||
|
||||
// Net effect of *this* clock being slow is effectively a multiplier on the max net skew
|
||||
// and a linear increase or decrease of the max clock skew.
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/**
|
||||
* Exception class to encapsulate exceptions while managing distributed locks
|
||||
*/
|
||||
class LockException : public DBException {
|
||||
public:
|
||||
LockException( const char * msg , int code ) : DBException( msg, code ) {}
|
||||
LockException( const string& msg, int code ) : DBException( msg, code ) {}
|
||||
virtual ~LockException() throw() { }
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates an error in retrieving time values from remote servers.
|
||||
*/
|
||||
class TimeNotFoundException : public LockException {
|
||||
public:
|
||||
TimeNotFoundException( const char * msg , int code ) : LockException( msg, code ) {}
|
||||
TimeNotFoundException( const string& msg, int code ) : LockException( msg, code ) {}
|
||||
virtual ~TimeNotFoundException() throw() { }
|
||||
};
|
||||
|
||||
/**
|
||||
* The distributed lock is a configdb backed way of synchronizing system-wide tasks. A task must be identified by a
|
||||
* unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks
|
||||
* collection with that name.
|
||||
*
|
||||
* To be maintained, each taken lock needs to be revalidaded ("pinged") within a pre-established amount of time. This
|
||||
* class does this maintenance automatically once a DistributedLock object was constructed.
|
||||
*/
|
||||
class DistributedLock {
|
||||
public:
|
||||
|
||||
static LabeledLevel logLvl;
|
||||
|
||||
typedef boost::tuple<string, Date_t, Date_t, OID> PingData;
|
||||
|
||||
class LastPings {
|
||||
public:
|
||||
LastPings() : _mutex( "DistributedLock::LastPings" ) {}
|
||||
~LastPings(){}
|
||||
|
||||
PingData getLastPing( const ConnectionString& conn, const string& lockName );
|
||||
void setLastPing( const ConnectionString& conn, const string& lockName, const PingData& pd );
|
||||
|
||||
mongo::mutex _mutex;
|
||||
map< std::pair<string, string>, PingData > _lastPings;
|
||||
};
|
||||
|
||||
static LastPings lastPings;
|
||||
|
||||
/**
|
||||
* The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
|
||||
* Construction does trigger a lock "pinging" mechanism, though.
|
||||
*
|
||||
* @param conn address of config(s) server(s)
|
||||
* @param name identifier for the lock
|
||||
* @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it (in minutes).
|
||||
* @param lockPing how long to wait between lock pings
|
||||
* @param legacy use legacy logic
|
||||
*
|
||||
*/
|
||||
DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout = 0, bool asProcess = false );
|
||||
~DistributedLock(){};
|
||||
|
||||
/**
|
||||
* Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
|
||||
* consider using the dist_lock_try construct to acquire this lock in an exception safe way.
|
||||
*
|
||||
* @param why human readable description of why the lock is being taken (used to log)
|
||||
* @param whether this is a lock re-entry or a new lock
|
||||
* @param other configdb's lock document that is currently holding the lock, if lock is taken, or our own lock
|
||||
* details if not
|
||||
* @return true if it managed to grab the lock
|
||||
*/
|
||||
bool lock_try( const string& why , bool reenter = false, BSONObj * other = 0 );
|
||||
|
||||
/**
|
||||
* Releases a previously taken lock.
|
||||
*/
|
||||
void unlock( BSONObj* oldLockPtr = NULL );
|
||||
|
||||
Date_t getRemoteTime();
|
||||
|
||||
bool isRemoteTimeSkewed();
|
||||
|
||||
const string& getProcessId();
|
||||
|
||||
const ConnectionString& getRemoteConnection();
|
||||
|
||||
/**
|
||||
* Check the skew between a cluster of servers
|
||||
*/
|
||||
static bool checkSkew( const ConnectionString& cluster, unsigned skewChecks = NUM_LOCK_SKEW_CHECKS, unsigned long long maxClockSkew = MAX_LOCK_CLOCK_SKEW, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
|
||||
|
||||
/**
|
||||
* Get the remote time from a server or cluster
|
||||
*/
|
||||
static Date_t remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
|
||||
|
||||
static bool killPinger( DistributedLock& lock );
|
||||
|
||||
/**
|
||||
* Namespace for lock pings
|
||||
*/
|
||||
static const string lockPingNS;
|
||||
|
||||
/**
|
||||
* Namespace for locks
|
||||
*/
|
||||
static const string locksNS;
|
||||
|
||||
const ConnectionString _conn;
|
||||
const string _name;
|
||||
const BSONObj _id;
|
||||
const string _processId;
|
||||
|
||||
// Timeout for lock, usually LOCK_TIMEOUT
|
||||
const unsigned long long _lockTimeout;
|
||||
const unsigned long long _maxClockSkew;
|
||||
const unsigned long long _maxNetSkew;
|
||||
const unsigned long long _lockPing;
|
||||
|
||||
private:
|
||||
|
||||
void resetLastPing(){ lastPings.setLastPing( _conn, _name, PingData() ); }
|
||||
void setLastPing( const PingData& pd ){ lastPings.setLastPing( _conn, _name, pd ); }
|
||||
PingData getLastPing(){ return lastPings.getLastPing( _conn, _name ); }
|
||||
|
||||
// May or may not exist, depending on startup
|
||||
mongo::mutex _mutex;
|
||||
string _threadId;
|
||||
|
||||
};
|
||||
|
||||
class dist_lock_try {
|
||||
public:
|
||||
|
||||
dist_lock_try() : _lock(NULL), _got(false) {}
|
||||
|
||||
dist_lock_try( const dist_lock_try& that ) : _lock(that._lock), _got(that._got), _other(that._other) {
|
||||
_other.getOwned();
|
||||
|
||||
// Make sure the lock ownership passes to this object,
|
||||
// so we only unlock once.
|
||||
((dist_lock_try&) that)._got = false;
|
||||
((dist_lock_try&) that)._lock = NULL;
|
||||
((dist_lock_try&) that)._other = BSONObj();
|
||||
}
|
||||
|
||||
// Needed so we can handle lock exceptions in context of lock try.
|
||||
dist_lock_try& operator=( const dist_lock_try& that ){
|
||||
|
||||
if( this == &that ) return *this;
|
||||
|
||||
_lock = that._lock;
|
||||
_got = that._got;
|
||||
_other = that._other;
|
||||
_other.getOwned();
|
||||
_why = that._why;
|
||||
|
||||
// Make sure the lock ownership passes to this object,
|
||||
// so we only unlock once.
|
||||
((dist_lock_try&) that)._got = false;
|
||||
((dist_lock_try&) that)._lock = NULL;
|
||||
((dist_lock_try&) that)._other = BSONObj();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
dist_lock_try( DistributedLock * lock , string why )
|
||||
: _lock(lock), _why(why) {
|
||||
_got = _lock->lock_try( why , false , &_other );
|
||||
}
|
||||
|
||||
~dist_lock_try() {
|
||||
if ( _got ) {
|
||||
assert( ! _other.isEmpty() );
|
||||
_lock->unlock( &_other );
|
||||
}
|
||||
}
|
||||
|
||||
bool reestablish(){
|
||||
return retry();
|
||||
}
|
||||
|
||||
bool retry() {
|
||||
assert( _lock );
|
||||
assert( _got );
|
||||
assert( ! _other.isEmpty() );
|
||||
|
||||
return _got = _lock->lock_try( _why , true, &_other );
|
||||
}
|
||||
|
||||
bool got() const { return _got; }
|
||||
BSONObj other() const { return _other; }
|
||||
|
||||
private:
|
||||
DistributedLock * _lock;
|
||||
bool _got;
|
||||
BSONObj _other;
|
||||
string _why;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// distlock_test.cpp
|
||||
// distlock_test.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
|
|
@ -15,21 +15,12 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "pch.h"
|
||||
|
||||
#include "distlock.h"
|
||||
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include <boost/thread/thread.hpp>
|
||||
|
||||
#include "mongo/base/init.h"
|
||||
#include "mongo/db/auth/action_set.h"
|
||||
#include "mongo/db/auth/action_type.h"
|
||||
#include "mongo/db/auth/privilege.h"
|
||||
#include "mongo/db/commands.h"
|
||||
#include "mongo/util/bson_util.h"
|
||||
#include "mongo/util/timer.h"
|
||||
#include "../pch.h"
|
||||
#include "dbclient.h"
|
||||
#include "distlock.h"
|
||||
#include "../db/commands.h"
|
||||
#include "../util/bson_util.h"
|
||||
|
||||
// Modify some config options for the RNG, since they cause MSVC to fail
|
||||
#include <boost/config.hpp>
|
||||
|
|
@ -76,11 +67,7 @@ namespace mongo {
|
|||
virtual LockType locktype() const {
|
||||
return NONE;
|
||||
}
|
||||
// No auth needed because it only works when enabled via command line.
|
||||
virtual bool requiresAuth() { return false; }
|
||||
virtual void addRequiredPrivileges(const std::string& dbname,
|
||||
const BSONObj& cmdObj,
|
||||
std::vector<Privilege>* out) {}
|
||||
|
||||
static void runThread() {
|
||||
while (keepGoing) {
|
||||
if (current->lock_try( "test" )) {
|
||||
|
|
@ -143,14 +130,7 @@ namespace mongo {
|
|||
|
||||
static bool keepGoing;
|
||||
|
||||
};
|
||||
MONGO_INITIALIZER(RegisterDistLockWithSyncCmd)(InitializerContext* context) {
|
||||
if (Command::testCommandsEnabled) {
|
||||
// Leaked intentionally: a Command registers itself when constructed.
|
||||
new TestDistLockWithSync();
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
} testDistLockWithSyncCmd;
|
||||
|
||||
DistributedLock * TestDistLockWithSync::current;
|
||||
AtomicUInt TestDistLockWithSync::count;
|
||||
|
|
@ -181,11 +161,6 @@ namespace mongo {
|
|||
virtual LockType locktype() const {
|
||||
return NONE;
|
||||
}
|
||||
// No auth needed because it only works when enabled via command line.
|
||||
virtual bool requiresAuth() { return false; }
|
||||
virtual void addRequiredPrivileges(const std::string& dbname,
|
||||
const BSONObj& cmdObj,
|
||||
std::vector<Privilege>* out) {}
|
||||
|
||||
void runThread(ConnectionString& hostConn, unsigned threadId, unsigned seed,
|
||||
BSONObj& cmdObj, BSONObjBuilder& result) {
|
||||
|
|
@ -288,7 +263,7 @@ namespace mongo {
|
|||
}
|
||||
else {
|
||||
log() << "**** Not unlocking for thread " << threadId << endl;
|
||||
verify( DistributedLock::killPinger( *myLock ) );
|
||||
assert( DistributedLock::killPinger( *myLock ) );
|
||||
// We're simulating a crashed process...
|
||||
break;
|
||||
}
|
||||
|
|
@ -387,36 +362,32 @@ namespace mongo {
|
|||
bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
|
||||
}
|
||||
else {
|
||||
LOG( logLvl ) << "No host clocks to skew." << endl;
|
||||
log( logLvl ) << "No host clocks to skew." << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
LOG( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
|
||||
log( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
|
||||
|
||||
unsigned s = 0;
|
||||
for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
|
||||
|
||||
ConnectionString server( cluster.getServers()[s] );
|
||||
scoped_ptr<ScopedDbConnection> conn(
|
||||
ScopedDbConnection::getInternalScopedDbConnection( server.toString() ) );
|
||||
ScopedDbConnection conn( server );
|
||||
|
||||
BSONObj result;
|
||||
try {
|
||||
bool success = conn->get()->runCommand( string("admin"),
|
||||
BSON( "_skewClockCommand" << 1
|
||||
<< "skew" << *i ),
|
||||
result );
|
||||
bool success = conn->runCommand( string("admin"), BSON( "_skewClockCommand" << 1 << "skew" << *i ), result );
|
||||
|
||||
uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
|
||||
|
||||
LOG( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
|
||||
log( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
|
||||
}
|
||||
catch(...) {
|
||||
conn->done();
|
||||
conn.done();
|
||||
throw;
|
||||
}
|
||||
|
||||
conn->done();
|
||||
conn.done();
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -427,14 +398,8 @@ namespace mongo {
|
|||
AtomicUInt count;
|
||||
bool keepGoing;
|
||||
|
||||
};
|
||||
MONGO_INITIALIZER(RegisterDistLockWithSkewCmd)(InitializerContext* context) {
|
||||
if (Command::testCommandsEnabled) {
|
||||
// Leaked intentionally: a Command registers itself when constructed.
|
||||
new TestDistLockWithSkew();
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
} testDistLockWithSkewCmd;
|
||||
|
||||
|
||||
/**
|
||||
* Utility command to virtually skew the clock of a mongo server a particular amount.
|
||||
|
|
@ -458,11 +423,6 @@ namespace mongo {
|
|||
virtual LockType locktype() const {
|
||||
return NONE;
|
||||
}
|
||||
// No auth needed because it only works when enabled via command line.
|
||||
virtual bool requiresAuth() { return false; }
|
||||
virtual void addRequiredPrivileges(const std::string& dbname,
|
||||
const BSONObj& cmdObj,
|
||||
std::vector<Privilege>* out) {}
|
||||
|
||||
bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
|
||||
BSONObjBuilder& result, bool) {
|
||||
|
|
@ -479,13 +439,7 @@ namespace mongo {
|
|||
|
||||
}
|
||||
|
||||
};
|
||||
MONGO_INITIALIZER(RegisterSkewClockCmd)(InitializerContext* context) {
|
||||
if (Command::testCommandsEnabled) {
|
||||
// Leaked intentionally: a Command registers itself when constructed.
|
||||
new SkewClockCommand();
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
} testSkewClockCommand;
|
||||
|
||||
}
|
||||
|
||||
54
client/examples/authTest.cpp
Normal file
54
client/examples/authTest.cpp
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
// authTest.cpp
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "client/dbclient.h"
|
||||
|
||||
using namespace mongo;
|
||||
|
||||
int main( int argc, const char **argv ) {
|
||||
|
||||
const char *port = "27017";
|
||||
if ( argc != 1 ) {
|
||||
if ( argc != 3 )
|
||||
throw -12;
|
||||
port = argv[ 2 ];
|
||||
}
|
||||
|
||||
DBClientConnection conn;
|
||||
string errmsg;
|
||||
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
|
||||
cout << "couldn't connect : " << errmsg << endl;
|
||||
throw -11;
|
||||
}
|
||||
|
||||
{
|
||||
// clean up old data from any previous tests
|
||||
conn.remove( "test.system.users" , BSONObj() );
|
||||
}
|
||||
|
||||
conn.insert( "test.system.users" , BSON( "user" << "eliot" << "pwd" << conn.createPasswordDigest( "eliot" , "bar" ) ) );
|
||||
|
||||
errmsg.clear();
|
||||
bool ok = conn.auth( "test" , "eliot" , "bar" , errmsg );
|
||||
if ( ! ok )
|
||||
cout << errmsg << endl;
|
||||
MONGO_assert( ok );
|
||||
|
||||
MONGO_assert( ! conn.auth( "test" , "eliot" , "bars" , errmsg ) );
|
||||
}
|
||||
279
client/examples/clientTest.cpp
Normal file
279
client/examples/clientTest.cpp
Normal file
|
|
@ -0,0 +1,279 @@
|
|||
// clientTest.cpp
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* a simple test for the c++ driver
|
||||
*/
|
||||
|
||||
// this header should be first to ensure that it includes cleanly in any context
|
||||
#include "client/dbclient.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#ifndef assert
|
||||
# define assert(x) MONGO_assert(x)
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
using namespace mongo;
|
||||
|
||||
int main( int argc, const char **argv ) {
|
||||
|
||||
const char *port = "27017";
|
||||
if ( argc != 1 ) {
|
||||
if ( argc != 3 )
|
||||
throw -12;
|
||||
port = argv[ 2 ];
|
||||
}
|
||||
|
||||
DBClientConnection conn;
|
||||
string errmsg;
|
||||
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
|
||||
cout << "couldn't connect : " << errmsg << endl;
|
||||
throw -11;
|
||||
}
|
||||
|
||||
const char * ns = "test.test1";
|
||||
|
||||
conn.dropCollection(ns);
|
||||
|
||||
// clean up old data from any previous tests
|
||||
conn.remove( ns, BSONObj() );
|
||||
assert( conn.findOne( ns , BSONObj() ).isEmpty() );
|
||||
|
||||
// test insert
|
||||
conn.insert( ns ,BSON( "name" << "eliot" << "num" << 1 ) );
|
||||
assert( ! conn.findOne( ns , BSONObj() ).isEmpty() );
|
||||
|
||||
// test remove
|
||||
conn.remove( ns, BSONObj() );
|
||||
assert( conn.findOne( ns , BSONObj() ).isEmpty() );
|
||||
|
||||
|
||||
// insert, findOne testing
|
||||
conn.insert( ns , BSON( "name" << "eliot" << "num" << 1 ) );
|
||||
{
|
||||
BSONObj res = conn.findOne( ns , BSONObj() );
|
||||
assert( strstr( res.getStringField( "name" ) , "eliot" ) );
|
||||
assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
|
||||
assert( 1 == res.getIntField( "num" ) );
|
||||
}
|
||||
|
||||
|
||||
// cursor
|
||||
conn.insert( ns ,BSON( "name" << "sara" << "num" << 2 ) );
|
||||
{
|
||||
auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() );
|
||||
int count = 0;
|
||||
while ( cursor->more() ) {
|
||||
count++;
|
||||
BSONObj obj = cursor->next();
|
||||
}
|
||||
assert( count == 2 );
|
||||
}
|
||||
|
||||
{
|
||||
auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 1 ) );
|
||||
int count = 0;
|
||||
while ( cursor->more() ) {
|
||||
count++;
|
||||
BSONObj obj = cursor->next();
|
||||
}
|
||||
assert( count == 1 );
|
||||
}
|
||||
|
||||
{
|
||||
auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 3 ) );
|
||||
int count = 0;
|
||||
while ( cursor->more() ) {
|
||||
count++;
|
||||
BSONObj obj = cursor->next();
|
||||
}
|
||||
assert( count == 0 );
|
||||
}
|
||||
|
||||
// update
|
||||
{
|
||||
BSONObj res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
|
||||
assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
|
||||
|
||||
BSONObj after = BSONObjBuilder().appendElements( res ).append( "name2" , "h" ).obj();
|
||||
|
||||
conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after );
|
||||
res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
|
||||
assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
|
||||
assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
|
||||
|
||||
conn.update( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() , after );
|
||||
res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
|
||||
assert( strstr( res.getStringField( "name" ) , "eliot" ) );
|
||||
assert( strstr( res.getStringField( "name2" ) , "h" ) );
|
||||
assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
|
||||
|
||||
// upsert
|
||||
conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after , 1 );
|
||||
assert( ! conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ).isEmpty() );
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
// ensure index
|
||||
assert( conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
|
||||
assert( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
|
||||
}
|
||||
|
||||
{
|
||||
// hint related tests
|
||||
assert( conn.findOne(ns, "{}")["name"].str() == "sara" );
|
||||
|
||||
assert( conn.findOne(ns, "{ name : 'eliot' }")["name"].str() == "eliot" );
|
||||
assert( conn.getLastError() == "" );
|
||||
|
||||
// nonexistent index test
|
||||
bool asserted = false;
|
||||
try {
|
||||
conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}"));
|
||||
}
|
||||
catch ( ... ) {
|
||||
asserted = true;
|
||||
}
|
||||
assert( asserted );
|
||||
|
||||
//existing index
|
||||
assert( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") );
|
||||
|
||||
// run validate
|
||||
assert( conn.validate( ns ) );
|
||||
}
|
||||
|
||||
{
|
||||
// timestamp test
|
||||
|
||||
const char * tsns = "test.tstest1";
|
||||
conn.dropCollection( tsns );
|
||||
|
||||
{
|
||||
mongo::BSONObjBuilder b;
|
||||
b.appendTimestamp( "ts" );
|
||||
conn.insert( tsns , b.obj() );
|
||||
}
|
||||
|
||||
mongo::BSONObj out = conn.findOne( tsns , mongo::BSONObj() );
|
||||
Date_t oldTime = out["ts"].timestampTime();
|
||||
unsigned int oldInc = out["ts"].timestampInc();
|
||||
|
||||
{
|
||||
mongo::BSONObjBuilder b1;
|
||||
b1.append( out["_id"] );
|
||||
|
||||
mongo::BSONObjBuilder b2;
|
||||
b2.append( out["_id"] );
|
||||
b2.appendTimestamp( "ts" );
|
||||
|
||||
conn.update( tsns , b1.obj() , b2.obj() );
|
||||
}
|
||||
|
||||
BSONObj found = conn.findOne( tsns , mongo::BSONObj() );
|
||||
cout << "old: " << out << "\nnew: " << found << endl;
|
||||
assert( ( oldTime < found["ts"].timestampTime() ) ||
|
||||
( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) );
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
// check that killcursors doesn't affect last error
|
||||
assert( conn.getLastError().empty() );
|
||||
|
||||
BufBuilder b;
|
||||
b.appendNum( (int)0 ); // reserved
|
||||
b.appendNum( (int)-1 ); // invalid # of cursors triggers exception
|
||||
b.appendNum( (int)-1 ); // bogus cursor id
|
||||
|
||||
Message m;
|
||||
m.setData( dbKillCursors, b.buf(), b.len() );
|
||||
|
||||
// say() is protected in DBClientConnection, so get superclass
|
||||
static_cast< DBConnector* >( &conn )->say( m );
|
||||
|
||||
assert( conn.getLastError().empty() );
|
||||
}
|
||||
|
||||
{
|
||||
list<string> l = conn.getDatabaseNames();
|
||||
for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) {
|
||||
cout << "db name : " << *i << endl;
|
||||
}
|
||||
|
||||
l = conn.getCollectionNames( "test" );
|
||||
for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) {
|
||||
cout << "coll name : " << *i << endl;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
//Map Reduce (this mostly just tests that it compiles with all output types)
|
||||
const string ns = "test.mr";
|
||||
conn.insert(ns, BSON("a" << 1));
|
||||
conn.insert(ns, BSON("a" << 1));
|
||||
|
||||
const char* map = "function() { emit(this.a, 1); }";
|
||||
const char* reduce = "function(key, values) { return Array.sum(values); }";
|
||||
|
||||
const string outcoll = ns + ".out";
|
||||
|
||||
BSONObj out;
|
||||
out = conn.mapreduce(ns, map, reduce, BSONObj()); // default to inline
|
||||
//MONGO_PRINT(out);
|
||||
out = conn.mapreduce(ns, map, reduce, BSONObj(), outcoll);
|
||||
//MONGO_PRINT(out);
|
||||
out = conn.mapreduce(ns, map, reduce, BSONObj(), outcoll.c_str());
|
||||
//MONGO_PRINT(out);
|
||||
out = conn.mapreduce(ns, map, reduce, BSONObj(), BSON("reduce" << outcoll));
|
||||
//MONGO_PRINT(out);
|
||||
}
|
||||
|
||||
{
|
||||
// test timeouts
|
||||
|
||||
DBClientConnection conn( true , 0 , 2 );
|
||||
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
|
||||
cout << "couldn't connect : " << errmsg << endl;
|
||||
throw -11;
|
||||
}
|
||||
conn.insert( "test.totest" , BSON( "x" << 1 ) );
|
||||
BSONObj res;
|
||||
|
||||
bool gotError = false;
|
||||
assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
|
||||
try {
|
||||
conn.eval( "test" , "sleep(5000); return db.totest.findOne().x" , res );
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
gotError = true;
|
||||
log() << e.what() << endl;
|
||||
}
|
||||
assert( gotError );
|
||||
// sleep so the server isn't locked anymore
|
||||
sleepsecs( 4 );
|
||||
|
||||
assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
|
||||
|
||||
|
||||
}
|
||||
|
||||
cout << "client test finished!" << endl;
|
||||
}
|
||||
|
|
@ -20,9 +20,8 @@
|
|||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "mongo/client/dbclient.h"
|
||||
#include "client/dbclient.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
|
@ -31,21 +30,14 @@ void insert( mongo::DBClientConnection & conn , const char * name , int num ) {
|
|||
obj.append( "name" , name );
|
||||
obj.append( "num" , num );
|
||||
conn.insert( "test.people" , obj.obj() );
|
||||
std::string e = conn.getLastError();
|
||||
if( !e.empty() ) {
|
||||
cout << "insert failed: " << e << endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
int main( int argc, const char **argv ) {
|
||||
|
||||
const char *port = "27017";
|
||||
if ( argc != 1 ) {
|
||||
if ( argc != 3 ) {
|
||||
cout << "need to pass port as second param" << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
if ( argc != 3 )
|
||||
throw -12;
|
||||
port = argv[ 2 ];
|
||||
}
|
||||
|
||||
|
|
@ -53,7 +45,7 @@ int main( int argc, const char **argv ) {
|
|||
string errmsg;
|
||||
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
|
||||
cout << "couldn't connect : " << errmsg << endl;
|
||||
return EXIT_FAILURE;
|
||||
throw -11;
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -68,11 +60,6 @@ int main( int argc, const char **argv ) {
|
|||
{
|
||||
mongo::BSONObjBuilder query;
|
||||
auto_ptr<mongo::DBClientCursor> cursor = conn.query( "test.people" , query.obj() );
|
||||
if (!cursor.get()) {
|
||||
cout << "query failure" << endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
cout << "using cursor" << endl;
|
||||
while ( cursor->more() ) {
|
||||
mongo::BSONObj obj = cursor->next();
|
||||
|
|
@ -95,4 +82,5 @@ int main( int argc, const char **argv ) {
|
|||
cout << res.isEmpty() << "\t" << res.jsonString() << endl;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#include <iostream>
|
||||
|
||||
#include "mongo/client/dbclient.h"
|
||||
#include "client/dbclient.h"
|
||||
#include "util/net/httpclient.h"
|
||||
|
||||
using namespace mongo;
|
||||
|
|
@ -27,10 +27,10 @@ void play( string url ) {
|
|||
|
||||
HttpClient c;
|
||||
HttpClient::Result r;
|
||||
MONGO_verify( c.get( url , &r ) == 200 );
|
||||
MONGO_assert( c.get( url , &r ) == 200 );
|
||||
|
||||
HttpClient::Headers h = r.getHeaders();
|
||||
MONGO_verify( h["Content-Type"].find( "text/html" ) == 0 );
|
||||
MONGO_assert( h["Content-Type"].find( "text/html" ) == 0 );
|
||||
|
||||
cout << "\tHeaders" << endl;
|
||||
for ( HttpClient::Headers::iterator i = h.begin() ; i != h.end(); ++i ) {
|
||||
47
client/examples/insert_demo.cpp
Normal file
47
client/examples/insert_demo.cpp
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
C++ client program which inserts documents in a MongoDB database.
|
||||
|
||||
How to build and run:
|
||||
|
||||
Using mongo_client_lib.cpp:
|
||||
g++ -I .. -I ../.. insert_demo.cpp ../mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
|
||||
./a.out
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include "dbclient.h" // the mongo c++ driver
|
||||
|
||||
using namespace std;
|
||||
using namespace mongo;
|
||||
using namespace bson;
|
||||
|
||||
int main() {
|
||||
try {
|
||||
cout << "connecting to localhost..." << endl;
|
||||
DBClientConnection c;
|
||||
c.connect("localhost");
|
||||
cout << "connected ok" << endl;
|
||||
|
||||
bo o = BSON( "hello" << "world" );
|
||||
|
||||
cout << "inserting..." << endl;
|
||||
|
||||
time_t start = time(0);
|
||||
for( unsigned i = 0; i < 1000000; i++ ) {
|
||||
c.insert("test.foo", o);
|
||||
}
|
||||
|
||||
// wait until all operations applied
|
||||
cout << "getlasterror returns: \"" << c.getLastError() << '"' << endl;
|
||||
|
||||
time_t done = time(0);
|
||||
time_t dt = done-start;
|
||||
cout << dt << " seconds " << 1000000/dt << " per second" << endl;
|
||||
}
|
||||
catch(DBException& e) {
|
||||
cout << "caught DBException " << e.toString() << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
118
client/examples/rs.cpp
Normal file
118
client/examples/rs.cpp
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
// rs.cpp
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* example of using replica sets from c++
|
||||
*/
|
||||
|
||||
#include "client/dbclient.h"
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace mongo;
|
||||
using namespace std;
|
||||
|
||||
void workerThread( string collName , bool print , DBClientReplicaSet * conn ) {
|
||||
|
||||
while ( true ) {
|
||||
try {
|
||||
conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
|
||||
|
||||
BSONObj x = conn->findOne( collName , BSONObj() );
|
||||
|
||||
if ( print ) {
|
||||
cout << x << endl;
|
||||
}
|
||||
|
||||
BSONObj a = conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
|
||||
BSONObj b = conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
|
||||
|
||||
if ( print ) {
|
||||
cout << "\t A " << a << endl;
|
||||
cout << "\t B " << b << endl;
|
||||
}
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
cout << "ERROR: " << e.what() << endl;
|
||||
}
|
||||
sleepmillis( 10 );
|
||||
}
|
||||
}
|
||||
|
||||
int main( int argc , const char ** argv ) {
|
||||
|
||||
unsigned nThreads = 1;
|
||||
bool print = false;
|
||||
bool testTimeout = false;
|
||||
|
||||
for ( int i=1; i<argc; i++ ) {
|
||||
if ( mongoutils::str::equals( "--threads" , argv[i] ) ) {
|
||||
nThreads = atoi( argv[++i] );
|
||||
}
|
||||
else if ( mongoutils::str::equals( "--print" , argv[i] ) ) {
|
||||
print = true;
|
||||
}
|
||||
// Run a special mode to demonstrate the DBClientReplicaSet so_timeout option.
|
||||
else if ( mongoutils::str::equals( "--testTimeout" , argv[i] ) ) {
|
||||
testTimeout = true;
|
||||
}
|
||||
else {
|
||||
cerr << "unknown option: " << argv[i] << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
string errmsg;
|
||||
ConnectionString cs = ConnectionString::parse( "foo/127.0.0.1" , errmsg );
|
||||
if ( ! cs.isValid() ) {
|
||||
cout << "error parsing url: " << errmsg << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
DBClientReplicaSet * conn = dynamic_cast<DBClientReplicaSet*>(cs.connect( errmsg, testTimeout ? 10 : 0 ));
|
||||
if ( ! conn ) {
|
||||
cout << "error connecting: " << errmsg << endl;
|
||||
return 2;
|
||||
}
|
||||
|
||||
string collName = "test.rs1";
|
||||
|
||||
conn->dropCollection( collName );
|
||||
|
||||
if ( testTimeout ) {
|
||||
conn->insert( collName, BSONObj() );
|
||||
try {
|
||||
conn->count( collName, BSON( "$where" << "sleep(40000)" ) );
|
||||
} catch( DBException& ) {
|
||||
return 0;
|
||||
}
|
||||
cout << "expected socket exception" << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
vector<boost::shared_ptr<boost::thread> > threads;
|
||||
for ( unsigned i=0; i<nThreads; i++ ) {
|
||||
string errmsg;
|
||||
threads.push_back( boost::shared_ptr<boost::thread>( new boost::thread( boost::bind( workerThread , collName , print , (DBClientReplicaSet*)cs.connect(errmsg) ) ) ) );
|
||||
}
|
||||
|
||||
for ( unsigned i=0; i<threads.size(); i++ ) {
|
||||
threads[i]->join();
|
||||
}
|
||||
|
||||
}
|
||||
56
client/examples/second.cpp
Normal file
56
client/examples/second.cpp
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
// second.cpp
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "client/dbclient.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace mongo;
|
||||
|
||||
int main( int argc, const char **argv ) {
|
||||
|
||||
const char *port = "27017";
|
||||
if ( argc != 1 ) {
|
||||
if ( argc != 3 )
|
||||
throw -12;
|
||||
port = argv[ 2 ];
|
||||
}
|
||||
|
||||
DBClientConnection conn;
|
||||
string errmsg;
|
||||
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
|
||||
cout << "couldn't connect : " << errmsg << endl;
|
||||
throw -11;
|
||||
}
|
||||
|
||||
const char * ns = "test.second";
|
||||
|
||||
conn.remove( ns , BSONObj() );
|
||||
|
||||
conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) );
|
||||
conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) );
|
||||
|
||||
auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() );
|
||||
cout << "using cursor" << endl;
|
||||
while ( cursor->more() ) {
|
||||
BSONObj obj = cursor->next();
|
||||
cout << "\t" << obj.jsonString() << endl;
|
||||
}
|
||||
|
||||
conn.ensureIndex( ns , BSON( "name" << 1 << "num" << -1 ) );
|
||||
}
|
||||
92
client/examples/simple_client_demo.vcxproj
Executable file
92
client/examples/simple_client_demo.vcxproj
Executable file
|
|
@ -0,0 +1,92 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|Win32">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{89C30BC3-2874-4F2C-B4DA-EB04E9782236}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>simple_client_demo</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<IncludePath>..\..;..\..\pcre-7.4;$(IncludePath)</IncludePath>
|
||||
<LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
<IncludePath>..\..;..\..\pcre-7.4;$(IncludePath)</IncludePath>
|
||||
<LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\mongo_client_lib.cpp" />
|
||||
<ClCompile Include="..\simple_client_demo.cpp" />
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
21
client/examples/simple_client_demo.vcxproj.filters
Executable file
21
client/examples/simple_client_demo.vcxproj.filters
Executable file
|
|
@ -0,0 +1,21 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup>
|
||||
<Filter Include="Source Files">
|
||||
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
|
||||
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
|
||||
</Filter>
|
||||
<Filter Include="Header Files">
|
||||
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
|
||||
<Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\simple_client_demo.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\mongo_client_lib.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
|
@ -16,10 +16,9 @@
|
|||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include "mongo/client/dbclient.h"
|
||||
#include "../../client/dbclient.h"
|
||||
|
||||
// g++ src/mongo/client/examples/tutorial.cpp -pthread -Isrc -Isrc/mongo -lmongoclient -lboost_thread-mt -lboost_system -lboost_filesystem -L[path to libmongoclient.a] -o tutorial
|
||||
//g++ tutorial.cpp -L[mongo directory] -L/opt/local/lib -lmongoclient -lboost_thread-mt -lboost_filesystem -lboost_system -I/opt/local/include -o tutorial
|
||||
// g++ tutorial.cpp -lmongoclient -lboost_thread -lboost_filesystem -o tutorial
|
||||
|
||||
using namespace mongo;
|
||||
|
||||
|
|
@ -41,8 +40,6 @@ void run() {
|
|||
c.insert("tutorial.persons", p);
|
||||
p = BSON( "name" << "Abe" << "age" << 33 );
|
||||
c.insert("tutorial.persons", p);
|
||||
p = BSON( "name" << "Methuselah" << "age" << BSONNULL);
|
||||
c.insert("tutorial.persons", p);
|
||||
p = BSON( "name" << "Samantha" << "age" << 21 << "city" << "Los Angeles" << "state" << "CA" );
|
||||
c.insert("tutorial.persons", p);
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
// @file whereExample.cpp
|
||||
// @see http://dochub.mongodb.org/core/serversidecodeexecution
|
||||
// @see http://www.mongodb.org/display/DOCS/Server-side+Code+Execution
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#include <iostream>
|
||||
|
||||
#include "mongo/client/dbclient.h"
|
||||
#include "client/dbclient.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace mongo;
|
||||
|
|
@ -65,5 +65,5 @@ int main( int argc, const char **argv ) {
|
|||
cout << "\t" << obj.jsonString() << endl;
|
||||
num++;
|
||||
}
|
||||
MONGO_verify( num == 1 );
|
||||
MONGO_assert( num == 1 );
|
||||
}
|
||||
|
|
@ -16,14 +16,11 @@
|
|||
*/
|
||||
|
||||
#include "pch.h"
|
||||
|
||||
#include <boost/smart_ptr.hpp>
|
||||
#include <fcntl.h>
|
||||
#include <fstream>
|
||||
#include <utility>
|
||||
|
||||
#include "mongo/client/gridfs.h"
|
||||
#include "mongo/client/dbclientcursor.h"
|
||||
#include "gridfs.h"
|
||||
#include <boost/smart_ptr.hpp>
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <io.h>
|
||||
|
|
@ -33,8 +30,6 @@
|
|||
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
|
||||
#endif
|
||||
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
|
||||
namespace mongo {
|
||||
|
||||
const unsigned DEFAULT_CHUNK_SIZE = 256 * 1024;
|
||||
|
|
@ -58,7 +53,7 @@ namespace mongo {
|
|||
_chunkSize = DEFAULT_CHUNK_SIZE;
|
||||
|
||||
client.ensureIndex( _filesNS , BSON( "filename" << 1 ) );
|
||||
client.ensureIndex( _chunksNS , BSON( "files_id" << 1 << "n" << 1 ) , /*unique=*/true );
|
||||
client.ensureIndex( _chunksNS , BSON( "files_id" << 1 << "n" << 1 ) );
|
||||
}
|
||||
|
||||
GridFS::~GridFS() {
|
||||
|
|
@ -66,14 +61,10 @@ namespace mongo {
|
|||
}
|
||||
|
||||
void GridFS::setChunkSize(unsigned int size) {
|
||||
massert( 13296 , "invalid chunk size is specified", (size != 0 ));
|
||||
massert( 13296 , "invalid chunk size is specified", (size == 0));
|
||||
_chunkSize = size;
|
||||
}
|
||||
|
||||
unsigned int GridFS::getChunkSize() const {
|
||||
return _chunkSize;
|
||||
}
|
||||
|
||||
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType) {
|
||||
char const * const end = data + length;
|
||||
|
||||
|
|
@ -121,7 +112,7 @@ namespace mongo {
|
|||
chunkLen += readLen;
|
||||
bufPos += readLen;
|
||||
|
||||
verify(chunkLen <= _chunkSize);
|
||||
assert(chunkLen <= _chunkSize);
|
||||
}
|
||||
|
||||
GridFSChunk c(idObj, chunkNumber, buf, chunkLen);
|
||||
|
|
@ -139,12 +130,6 @@ namespace mongo {
|
|||
}
|
||||
|
||||
BSONObj GridFS::insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType) {
|
||||
// Wait for any pending writebacks to finish
|
||||
BSONObj errObj = _client.getLastErrorDetailed();
|
||||
uassert( 16428,
|
||||
str::stream() << "Error storing GridFS chunk for file: " << name
|
||||
<< ", error: " << errObj,
|
||||
DBClientWithCommands::getLastErrorString(errObj) == "" );
|
||||
|
||||
BSONObj res;
|
||||
if ( ! _client.runCommand( _dbName.c_str() , BSON( "filemd5" << id << "root" << _prefix ) , res ) )
|
||||
|
|
@ -184,29 +169,29 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
|
||||
GridFile::GridFile(const GridFS * grid , BSONObj obj ) {
|
||||
GridFile::GridFile( GridFS * grid , BSONObj obj ) {
|
||||
_grid = grid;
|
||||
_obj = obj;
|
||||
}
|
||||
|
||||
GridFile GridFS::findFile( const string& fileName ) const {
|
||||
GridFile GridFS::findFile( const string& fileName ) {
|
||||
return findFile( BSON( "filename" << fileName ) );
|
||||
};
|
||||
|
||||
GridFile GridFS::findFile( BSONObj query ) const {
|
||||
GridFile GridFS::findFile( BSONObj query ) {
|
||||
query = BSON("query" << query << "orderby" << BSON("uploadDate" << -1));
|
||||
return GridFile( this , _client.findOne( _filesNS.c_str() , query ) );
|
||||
}
|
||||
|
||||
auto_ptr<DBClientCursor> GridFS::list() const {
|
||||
auto_ptr<DBClientCursor> GridFS::list() {
|
||||
return _client.query( _filesNS.c_str() , BSONObj() );
|
||||
}
|
||||
|
||||
auto_ptr<DBClientCursor> GridFS::list( BSONObj o ) const {
|
||||
auto_ptr<DBClientCursor> GridFS::list( BSONObj o ) {
|
||||
return _client.query( _filesNS.c_str() , o );
|
||||
}
|
||||
|
||||
BSONObj GridFile::getMetadata() const {
|
||||
BSONObj GridFile::getMetadata() {
|
||||
BSONElement meta_element = _obj["metadata"];
|
||||
if( meta_element.eoo() ) {
|
||||
return BSONObj();
|
||||
|
|
@ -215,7 +200,7 @@ namespace mongo {
|
|||
return meta_element.embeddedObject();
|
||||
}
|
||||
|
||||
GridFSChunk GridFile::getChunk( int n ) const {
|
||||
GridFSChunk GridFile::getChunk( int n ) {
|
||||
_exists();
|
||||
BSONObjBuilder b;
|
||||
b.appendAs( _obj["_id"] , "files_id" );
|
||||
|
|
@ -226,7 +211,7 @@ namespace mongo {
|
|||
return GridFSChunk(o);
|
||||
}
|
||||
|
||||
gridfs_offset GridFile::write( ostream & out ) const {
|
||||
gridfs_offset GridFile::write( ostream & out ) {
|
||||
_exists();
|
||||
|
||||
const int num = getNumChunks();
|
||||
|
|
@ -242,7 +227,7 @@ namespace mongo {
|
|||
return getContentLength();
|
||||
}
|
||||
|
||||
gridfs_offset GridFile::write( const string& where ) const {
|
||||
gridfs_offset GridFile::write( const string& where ) {
|
||||
if (where == "-") {
|
||||
return write( cout );
|
||||
}
|
||||
|
|
@ -253,7 +238,7 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
|
||||
void GridFile::_exists() const {
|
||||
void GridFile::_exists() {
|
||||
uassert( 10015 , "doesn't exists" , exists() );
|
||||
}
|
||||
|
||||
|
|
@ -17,9 +17,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "mongo/bson/bsonelement.h"
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "mongo/client/dbclientinterface.h"
|
||||
#include "dbclient.h"
|
||||
#include "redef_macros.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -33,13 +32,13 @@ namespace mongo {
|
|||
GridFSChunk( BSONObj data );
|
||||
GridFSChunk( BSONObj fileId , int chunkNumber , const char * data , int len );
|
||||
|
||||
int len() const {
|
||||
int len() {
|
||||
int len;
|
||||
_data["data"].binDataClean( len );
|
||||
return len;
|
||||
}
|
||||
|
||||
const char * data( int & len ) const {
|
||||
const char * data( int & len ) {
|
||||
return _data["data"].binDataClean( len );
|
||||
}
|
||||
|
||||
|
|
@ -51,7 +50,7 @@ namespace mongo {
|
|||
|
||||
/**
|
||||
GridFS is for storing large file-style objects in MongoDB.
|
||||
@see http://dochub.mongodb.org/core/gridfsspec
|
||||
@see http://www.mongodb.org/display/DOCS/GridFS+Specification
|
||||
*/
|
||||
class GridFS {
|
||||
public:
|
||||
|
|
@ -68,8 +67,6 @@ namespace mongo {
|
|||
*/
|
||||
void setChunkSize(unsigned int size);
|
||||
|
||||
unsigned int getChunkSize() const;
|
||||
|
||||
/**
|
||||
* puts the file reference by fileName into the db
|
||||
* @param fileName local filename relative to process
|
||||
|
|
@ -103,22 +100,22 @@ namespace mongo {
|
|||
/**
|
||||
* returns a file object matching the query
|
||||
*/
|
||||
GridFile findFile( BSONObj query ) const;
|
||||
GridFile findFile( BSONObj query );
|
||||
|
||||
/**
|
||||
* equiv to findFile( { filename : filename } )
|
||||
*/
|
||||
GridFile findFile( const string& fileName ) const;
|
||||
GridFile findFile( const string& fileName );
|
||||
|
||||
/**
|
||||
* convenience method to get all the files
|
||||
*/
|
||||
auto_ptr<DBClientCursor> list() const;
|
||||
auto_ptr<DBClientCursor> list();
|
||||
|
||||
/**
|
||||
* convenience method to get all the files with a filter
|
||||
*/
|
||||
auto_ptr<DBClientCursor> list( BSONObj query ) const;
|
||||
auto_ptr<DBClientCursor> list( BSONObj query );
|
||||
|
||||
private:
|
||||
DBClientBase& _client;
|
||||
|
|
@ -143,64 +140,66 @@ namespace mongo {
|
|||
* @return whether or not this file exists
|
||||
* findFile will always return a GriFile, so need to check this
|
||||
*/
|
||||
bool exists() const {
|
||||
bool exists() {
|
||||
return ! _obj.isEmpty();
|
||||
}
|
||||
|
||||
string getFilename() const {
|
||||
string getFilename() {
|
||||
return _obj["filename"].str();
|
||||
}
|
||||
|
||||
int getChunkSize() const {
|
||||
int getChunkSize() {
|
||||
return (int)(_obj["chunkSize"].number());
|
||||
}
|
||||
|
||||
gridfs_offset getContentLength() const {
|
||||
gridfs_offset getContentLength() {
|
||||
return (gridfs_offset)(_obj["length"].number());
|
||||
}
|
||||
|
||||
string getContentType() const {
|
||||
string getContentType() {
|
||||
return _obj["contentType"].valuestr();
|
||||
}
|
||||
|
||||
Date_t getUploadDate() const {
|
||||
Date_t getUploadDate() {
|
||||
return _obj["uploadDate"].date();
|
||||
}
|
||||
|
||||
string getMD5() const {
|
||||
string getMD5() {
|
||||
return _obj["md5"].str();
|
||||
}
|
||||
|
||||
BSONElement getFileField( const string& name ) const {
|
||||
BSONElement getFileField( const string& name ) {
|
||||
return _obj[name];
|
||||
}
|
||||
|
||||
BSONObj getMetadata() const;
|
||||
BSONObj getMetadata();
|
||||
|
||||
int getNumChunks() const {
|
||||
int getNumChunks() {
|
||||
return (int) ceil( (double)getContentLength() / (double)getChunkSize() );
|
||||
}
|
||||
|
||||
GridFSChunk getChunk( int n ) const;
|
||||
GridFSChunk getChunk( int n );
|
||||
|
||||
/**
|
||||
write the file to the output stream
|
||||
*/
|
||||
gridfs_offset write( ostream & out ) const;
|
||||
gridfs_offset write( ostream & out );
|
||||
|
||||
/**
|
||||
write the file to this filename
|
||||
*/
|
||||
gridfs_offset write( const string& where ) const;
|
||||
gridfs_offset write( const string& where );
|
||||
|
||||
private:
|
||||
GridFile(const GridFS * grid , BSONObj obj );
|
||||
GridFile( GridFS * grid , BSONObj obj );
|
||||
|
||||
void _exists() const;
|
||||
void _exists();
|
||||
|
||||
const GridFS * _grid;
|
||||
BSONObj _obj;
|
||||
GridFS * _grid;
|
||||
BSONObj _obj;
|
||||
|
||||
friend class GridFS;
|
||||
};
|
||||
}
|
||||
|
||||
#include "undef_macros.h"
|
||||
|
|
@ -22,11 +22,10 @@
|
|||
namespace mongo {
|
||||
|
||||
bool Model::load(BSONObj& query) {
|
||||
scoped_ptr<ScopedDbConnection> conn(
|
||||
ScopedDbConnection::getScopedDbConnection (modelServer() ) );
|
||||
ScopedDbConnection conn( modelServer() );
|
||||
|
||||
BSONObj b = conn->get()->findOne(getNS(), query);
|
||||
conn->done();
|
||||
BSONObj b = conn->findOne(getNS(), query);
|
||||
conn.done();
|
||||
|
||||
if ( b.isEmpty() )
|
||||
return false;
|
||||
|
|
@ -39,23 +38,21 @@ namespace mongo {
|
|||
void Model::remove( bool safe ) {
|
||||
uassert( 10016 , "_id isn't set - needed for remove()" , _id["_id"].type() );
|
||||
|
||||
scoped_ptr<ScopedDbConnection> conn(
|
||||
ScopedDbConnection::getScopedDbConnection (modelServer() ) );
|
||||
conn->get()->remove( getNS() , _id );
|
||||
ScopedDbConnection conn( modelServer() );
|
||||
conn->remove( getNS() , _id );
|
||||
|
||||
string errmsg = "";
|
||||
if ( safe )
|
||||
errmsg = conn->get()->getLastError();
|
||||
errmsg = conn->getLastError();
|
||||
|
||||
conn->done();
|
||||
conn.done();
|
||||
|
||||
if ( safe && errmsg.size() )
|
||||
throw UserException( 9002 , (string)"error on Model::remove: " + errmsg );
|
||||
}
|
||||
|
||||
void Model::save( bool safe ) {
|
||||
scoped_ptr<ScopedDbConnection> conn(
|
||||
ScopedDbConnection::getScopedDbConnection (modelServer() ) );
|
||||
ScopedDbConnection conn( modelServer() );
|
||||
|
||||
BSONObjBuilder b;
|
||||
serialize( b );
|
||||
|
|
@ -91,10 +88,10 @@ namespace mongo {
|
|||
b.appendOID( "_id" , &oid );
|
||||
|
||||
BSONObj o = b.obj();
|
||||
conn->get()->insert( getNS() , o );
|
||||
conn->insert( getNS() , o );
|
||||
_id = o["_id"].wrap().getOwned();
|
||||
|
||||
LOG(4) << "inserted new model " << getNS() << " " << o << endl;
|
||||
log(4) << "inserted new model " << getNS() << " " << o << endl;
|
||||
}
|
||||
else {
|
||||
if ( myId.eoo() ) {
|
||||
|
|
@ -102,7 +99,7 @@ namespace mongo {
|
|||
b.append( myId );
|
||||
}
|
||||
|
||||
verify( ! myId.eoo() );
|
||||
assert( ! myId.eoo() );
|
||||
|
||||
BSONObjBuilder qb;
|
||||
qb.append( myId );
|
||||
|
|
@ -110,17 +107,17 @@ namespace mongo {
|
|||
BSONObj q = qb.obj();
|
||||
BSONObj o = b.obj();
|
||||
|
||||
LOG(4) << "updated model" << getNS() << " " << q << " " << o << endl;
|
||||
log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
|
||||
|
||||
conn->get()->update( getNS() , q , o , true );
|
||||
conn->update( getNS() , q , o , true );
|
||||
|
||||
}
|
||||
|
||||
string errmsg = "";
|
||||
if ( safe )
|
||||
errmsg = conn->get()->getLastError();
|
||||
errmsg = conn->getLastError();
|
||||
|
||||
conn->done();
|
||||
conn.done();
|
||||
|
||||
if ( safe && errmsg.size() )
|
||||
throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
|
||||
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "mongo/bson/bsonelement.h"
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "dbclient.h"
|
||||
#include "redef_macros.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -59,3 +59,4 @@ namespace mongo {
|
|||
|
||||
} // namespace mongo
|
||||
|
||||
#include "undef_macros.h"
|
||||
|
|
@ -1,21 +1,4 @@
|
|||
/* @file client_lib.cpp */
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
/* @file client_lib.cpp
|
||||
|
||||
MongoDB C++ Driver
|
||||
|
||||
|
|
@ -40,6 +23,21 @@
|
|||
- Linker.Input.Additional Dependencies - add ws2_32.lib for the Winsock library.
|
||||
*/
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#if defined(_WIN32)
|
||||
// C4800 forcing value to bool 'true' or 'false' (performance warning)
|
||||
#pragma warning( disable : 4800 )
|
||||
|
|
@ -48,7 +46,6 @@
|
|||
#include "../util/md5main.cpp"
|
||||
|
||||
#define MONGO_EXPOSE_MACROS
|
||||
|
||||
#include "../pch.h"
|
||||
|
||||
#include "../util/assert_util.cpp"
|
||||
|
|
@ -60,49 +57,33 @@
|
|||
#include "../util/log.cpp"
|
||||
#include "../util/password.cpp"
|
||||
#include "../util/net/message_port.cpp"
|
||||
|
||||
#include "../util/concurrency/thread_pool.cpp"
|
||||
#include "../util/concurrency/vars.cpp"
|
||||
#include "../util/concurrency/task.cpp"
|
||||
#include "../util/concurrency/spin_lock.cpp"
|
||||
|
||||
// in client/ directory:
|
||||
#include "authentication_table.cpp"
|
||||
#include "connpool.cpp"
|
||||
#include "syncclusterconnection.cpp"
|
||||
#include "dbclient.cpp"
|
||||
#include "clientOnly.cpp"
|
||||
#include "gridfs.cpp"
|
||||
#include "dbclientcursor.cpp"
|
||||
#include "dbclient_rs.cpp"
|
||||
|
||||
#include "../util/text.cpp"
|
||||
#include "dbclient_rs.cpp"
|
||||
#include "../bson/oid.cpp"
|
||||
|
||||
#include "../db/lasterror.cpp"
|
||||
#include "../db/json.cpp"
|
||||
#include "../db/jsobj.cpp"
|
||||
#include "../db/dbmessage.cpp"
|
||||
//#include "../db/common.cpp"
|
||||
#include "../db/nonce.cpp"
|
||||
#include "../db/commands.cpp"
|
||||
|
||||
#include "../pch.cpp"
|
||||
|
||||
extern "C" {
|
||||
#include "../util/md5.cpp"
|
||||
#include "../util/md5.c"
|
||||
}
|
||||
|
||||
// in client/ directory:
|
||||
#include "clientAndShell.cpp"
|
||||
#include "connection_factory.cpp"
|
||||
|
||||
#include "../util/time_support.cpp"
|
||||
#include "../util/timer.cpp"
|
||||
#include "../util/concurrency/mutexdebugger.cpp"
|
||||
#include "../util/stringutils.cpp"
|
||||
|
||||
/* these should probably be in clientOnly.cpp. however as a first step putting here so that there
|
||||
is no risk we break the LIB build of cpp client. so this is interim.
|
||||
*/
|
||||
namespace mongo {
|
||||
void printStackTrace(class std::basic_ostream<char,struct std::char_traits<char> > &) { }
|
||||
void mongo_breakpoint() { }
|
||||
void printStackAndExit( int signalNum ) {
|
||||
::_exit( EXIT_ABRUPT );
|
||||
}
|
||||
}
|
||||
804
client/parallel.cpp
Normal file
804
client/parallel.cpp
Normal file
|
|
@ -0,0 +1,804 @@
|
|||
// parallel.cpp
|
||||
/*
|
||||
* Copyright 2010 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
#include "pch.h"
|
||||
#include "parallel.h"
|
||||
#include "connpool.h"
|
||||
#include "../db/queryutil.h"
|
||||
#include "../db/dbmessage.h"
|
||||
#include "../s/util.h"
|
||||
#include "../s/shard.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
// -------- ClusteredCursor -----------
|
||||
|
||||
ClusteredCursor::ClusteredCursor( QueryMessage& q ) {
|
||||
_ns = q.ns;
|
||||
_query = q.query.copy();
|
||||
_options = q.queryOptions;
|
||||
_fields = q.fields.copy();
|
||||
_batchSize = q.ntoreturn;
|
||||
if ( _batchSize == 1 )
|
||||
_batchSize = 2;
|
||||
|
||||
_done = false;
|
||||
_didInit = false;
|
||||
}
|
||||
|
||||
ClusteredCursor::ClusteredCursor( const string& ns , const BSONObj& q , int options , const BSONObj& fields ) {
|
||||
_ns = ns;
|
||||
_query = q.getOwned();
|
||||
_options = options;
|
||||
_fields = fields.getOwned();
|
||||
_batchSize = 0;
|
||||
|
||||
_done = false;
|
||||
_didInit = false;
|
||||
}
|
||||
|
||||
ClusteredCursor::~ClusteredCursor() {
|
||||
_done = true; // just in case
|
||||
}
|
||||
|
||||
void ClusteredCursor::init() {
|
||||
if ( _didInit )
|
||||
return;
|
||||
_didInit = true;
|
||||
_init();
|
||||
}
|
||||
|
||||
void ClusteredCursor::_checkCursor( DBClientCursor * cursor ) {
|
||||
assert( cursor );
|
||||
|
||||
if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
|
||||
throw StaleConfigException( _ns , "ClusteredCursor::_checkCursor" );
|
||||
}
|
||||
|
||||
if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ) {
|
||||
BSONObj o = cursor->next();
|
||||
throw UserException( o["code"].numberInt() , o["$err"].String() );
|
||||
}
|
||||
}
|
||||
|
||||
auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft , bool lazy ) {
|
||||
uassert( 10017 , "cursor already done" , ! _done );
|
||||
assert( _didInit );
|
||||
|
||||
BSONObj q = _query;
|
||||
if ( ! extra.isEmpty() ) {
|
||||
q = concatQuery( q , extra );
|
||||
}
|
||||
|
||||
try {
|
||||
ShardConnection conn( server , _ns );
|
||||
|
||||
if ( conn.setVersion() ) {
|
||||
conn.done();
|
||||
throw StaleConfigException( _ns , "ClusteredCursor::query" , true );
|
||||
}
|
||||
|
||||
LOG(5) << "ClusteredCursor::query (" << type() << ") server:" << server
|
||||
<< " ns:" << _ns << " query:" << q << " num:" << num
|
||||
<< " _fields:" << _fields << " options: " << _options << endl;
|
||||
|
||||
auto_ptr<DBClientCursor> cursor =
|
||||
conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
|
||||
|
||||
if ( ! cursor.get() && _options & QueryOption_PartialResults ) {
|
||||
_done = true;
|
||||
conn.done();
|
||||
return cursor;
|
||||
}
|
||||
|
||||
massert( 13633 , str::stream() << "error querying server: " << server , cursor.get() );
|
||||
|
||||
cursor->attach( &conn ); // this calls done on conn
|
||||
assert( ! conn.ok() );
|
||||
_checkCursor( cursor.get() );
|
||||
return cursor;
|
||||
}
|
||||
catch ( SocketException& e ) {
|
||||
if ( ! ( _options & QueryOption_PartialResults ) )
|
||||
throw e;
|
||||
_done = true;
|
||||
return auto_ptr<DBClientCursor>();
|
||||
}
|
||||
}
|
||||
|
||||
BSONObj ClusteredCursor::explain( const string& server , BSONObj extra ) {
|
||||
BSONObj q = _query;
|
||||
if ( ! extra.isEmpty() ) {
|
||||
q = concatQuery( q , extra );
|
||||
}
|
||||
|
||||
BSONObj o;
|
||||
|
||||
ShardConnection conn( server , _ns );
|
||||
auto_ptr<DBClientCursor> cursor = conn->query( _ns , Query( q ).explain() , abs( _batchSize ) * -1 , 0 , _fields.isEmpty() ? 0 : &_fields );
|
||||
if ( cursor.get() && cursor->more() )
|
||||
o = cursor->next().getOwned();
|
||||
conn.done();
|
||||
return o;
|
||||
}
|
||||
|
||||
BSONObj ClusteredCursor::concatQuery( const BSONObj& query , const BSONObj& extraFilter ) {
|
||||
if ( ! query.hasField( "query" ) )
|
||||
return _concatFilter( query , extraFilter );
|
||||
|
||||
BSONObjBuilder b;
|
||||
BSONObjIterator i( query );
|
||||
while ( i.more() ) {
|
||||
BSONElement e = i.next();
|
||||
|
||||
if ( strcmp( e.fieldName() , "query" ) ) {
|
||||
b.append( e );
|
||||
continue;
|
||||
}
|
||||
|
||||
b.append( "query" , _concatFilter( e.embeddedObjectUserCheck() , extraFilter ) );
|
||||
}
|
||||
return b.obj();
|
||||
}
|
||||
|
||||
BSONObj ClusteredCursor::_concatFilter( const BSONObj& filter , const BSONObj& extra ) {
|
||||
BSONObjBuilder b;
|
||||
b.appendElements( filter );
|
||||
b.appendElements( extra );
|
||||
return b.obj();
|
||||
// TODO: should do some simplification here if possibl ideally
|
||||
}
|
||||
|
||||
BSONObj ClusteredCursor::explain() {
|
||||
// Note: by default we filter out allPlans and oldPlan in the shell's
|
||||
// explain() function. If you add any recursive structures, make sure to
|
||||
// edit the JS to make sure everything gets filtered.
|
||||
|
||||
BSONObjBuilder b;
|
||||
b.append( "clusteredType" , type() );
|
||||
|
||||
long long millis = 0;
|
||||
double numExplains = 0;
|
||||
|
||||
map<string,long long> counters;
|
||||
|
||||
map<string,list<BSONObj> > out;
|
||||
{
|
||||
_explain( out );
|
||||
|
||||
BSONObjBuilder x( b.subobjStart( "shards" ) );
|
||||
for ( map<string,list<BSONObj> >::iterator i=out.begin(); i!=out.end(); ++i ) {
|
||||
string shard = i->first;
|
||||
list<BSONObj> l = i->second;
|
||||
BSONArrayBuilder y( x.subarrayStart( shard ) );
|
||||
for ( list<BSONObj>::iterator j=l.begin(); j!=l.end(); ++j ) {
|
||||
BSONObj temp = *j;
|
||||
y.append( temp );
|
||||
|
||||
BSONObjIterator k( temp );
|
||||
while ( k.more() ) {
|
||||
BSONElement z = k.next();
|
||||
if ( z.fieldName()[0] != 'n' )
|
||||
continue;
|
||||
long long& c = counters[z.fieldName()];
|
||||
c += z.numberLong();
|
||||
}
|
||||
|
||||
millis += temp["millis"].numberLong();
|
||||
numExplains++;
|
||||
}
|
||||
y.done();
|
||||
}
|
||||
x.done();
|
||||
}
|
||||
|
||||
for ( map<string,long long>::iterator i=counters.begin(); i!=counters.end(); ++i )
|
||||
b.appendNumber( i->first , i->second );
|
||||
|
||||
b.appendNumber( "millisTotal" , millis );
|
||||
b.append( "millisAvg" , (int)((double)millis / numExplains ) );
|
||||
b.append( "numQueries" , (int)numExplains );
|
||||
b.append( "numShards" , (int)out.size() );
|
||||
|
||||
return b.obj();
|
||||
}
|
||||
|
||||
// -------- FilteringClientCursor -----------
|
||||
FilteringClientCursor::FilteringClientCursor( const BSONObj filter )
|
||||
: _matcher( filter ) , _done( true ) {
|
||||
}
|
||||
|
||||
FilteringClientCursor::FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter )
|
||||
: _matcher( filter ) , _cursor( cursor ) , _done( cursor.get() == 0 ) {
|
||||
}
|
||||
|
||||
FilteringClientCursor::FilteringClientCursor( DBClientCursor* cursor , const BSONObj filter )
|
||||
: _matcher( filter ) , _cursor( cursor ) , _done( cursor == 0 ) {
|
||||
}
|
||||
|
||||
|
||||
FilteringClientCursor::~FilteringClientCursor() {
|
||||
}
|
||||
|
||||
void FilteringClientCursor::reset( auto_ptr<DBClientCursor> cursor ) {
|
||||
_cursor = cursor;
|
||||
_next = BSONObj();
|
||||
_done = _cursor.get() == 0;
|
||||
}
|
||||
|
||||
void FilteringClientCursor::reset( DBClientCursor* cursor ) {
|
||||
_cursor.reset( cursor );
|
||||
_next = BSONObj();
|
||||
_done = cursor == 0;
|
||||
}
|
||||
|
||||
|
||||
bool FilteringClientCursor::more() {
|
||||
if ( ! _next.isEmpty() )
|
||||
return true;
|
||||
|
||||
if ( _done )
|
||||
return false;
|
||||
|
||||
_advance();
|
||||
return ! _next.isEmpty();
|
||||
}
|
||||
|
||||
BSONObj FilteringClientCursor::next() {
|
||||
assert( ! _next.isEmpty() );
|
||||
assert( ! _done );
|
||||
|
||||
BSONObj ret = _next;
|
||||
_next = BSONObj();
|
||||
_advance();
|
||||
return ret;
|
||||
}
|
||||
|
||||
BSONObj FilteringClientCursor::peek() {
|
||||
if ( _next.isEmpty() )
|
||||
_advance();
|
||||
return _next;
|
||||
}
|
||||
|
||||
void FilteringClientCursor::_advance() {
|
||||
assert( _next.isEmpty() );
|
||||
if ( ! _cursor.get() || _done )
|
||||
return;
|
||||
|
||||
while ( _cursor->more() ) {
|
||||
_next = _cursor->next();
|
||||
if ( _matcher.matches( _next ) ) {
|
||||
if ( ! _cursor->moreInCurrentBatch() )
|
||||
_next = _next.getOwned();
|
||||
return;
|
||||
}
|
||||
_next = BSONObj();
|
||||
}
|
||||
_done = true;
|
||||
}
|
||||
|
||||
// -------- SerialServerClusteredCursor -----------
|
||||
|
||||
SerialServerClusteredCursor::SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder) : ClusteredCursor( q ) {
|
||||
for ( set<ServerAndQuery>::const_iterator i = servers.begin(); i!=servers.end(); i++ )
|
||||
_servers.push_back( *i );
|
||||
|
||||
if ( sortOrder > 0 )
|
||||
sort( _servers.begin() , _servers.end() );
|
||||
else if ( sortOrder < 0 )
|
||||
sort( _servers.rbegin() , _servers.rend() );
|
||||
|
||||
_serverIndex = 0;
|
||||
|
||||
_needToSkip = q.ntoskip;
|
||||
}
|
||||
|
||||
bool SerialServerClusteredCursor::more() {
|
||||
|
||||
// TODO: optimize this by sending on first query and then back counting
|
||||
// tricky in case where 1st server doesn't have any after
|
||||
// need it to send n skipped
|
||||
while ( _needToSkip > 0 && _current.more() ) {
|
||||
_current.next();
|
||||
_needToSkip--;
|
||||
}
|
||||
|
||||
if ( _current.more() )
|
||||
return true;
|
||||
|
||||
if ( _serverIndex >= _servers.size() ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ServerAndQuery& sq = _servers[_serverIndex++];
|
||||
|
||||
_current.reset( query( sq._server , 0 , sq._extra ) );
|
||||
return more();
|
||||
}
|
||||
|
||||
BSONObj SerialServerClusteredCursor::next() {
|
||||
uassert( 10018 , "no more items" , more() );
|
||||
return _current.next();
|
||||
}
|
||||
|
||||
void SerialServerClusteredCursor::_explain( map< string,list<BSONObj> >& out ) {
|
||||
for ( unsigned i=0; i<_servers.size(); i++ ) {
|
||||
ServerAndQuery& sq = _servers[i];
|
||||
list<BSONObj> & l = out[sq._server];
|
||||
l.push_back( explain( sq._server , sq._extra ) );
|
||||
}
|
||||
}
|
||||
|
||||
// -------- ParallelSortClusteredCursor -----------
|
||||
|
||||
ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q ,
|
||||
const BSONObj& sortKey )
|
||||
: ClusteredCursor( q ) , _servers( servers ) {
|
||||
_sortKey = sortKey.getOwned();
|
||||
_needToSkip = q.ntoskip;
|
||||
_finishCons();
|
||||
}
|
||||
|
||||
ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
|
||||
const Query& q ,
|
||||
int options , const BSONObj& fields )
|
||||
: ClusteredCursor( ns , q.obj , options , fields ) , _servers( servers ) {
|
||||
_sortKey = q.getSort().copy();
|
||||
_needToSkip = 0;
|
||||
_finishCons();
|
||||
}
|
||||
|
||||
void ParallelSortClusteredCursor::_finishCons() {
|
||||
_numServers = _servers.size();
|
||||
_lastFrom = 0;
|
||||
_cursors = 0;
|
||||
|
||||
if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ) {
|
||||
// we need to make sure the sort key is in the projection
|
||||
|
||||
set<string> sortKeyFields;
|
||||
_sortKey.getFieldNames(sortKeyFields);
|
||||
|
||||
BSONObjBuilder b;
|
||||
bool isNegative = false;
|
||||
{
|
||||
BSONObjIterator i( _fields );
|
||||
while ( i.more() ) {
|
||||
BSONElement e = i.next();
|
||||
b.append( e );
|
||||
|
||||
string fieldName = e.fieldName();
|
||||
|
||||
// exact field
|
||||
bool found = sortKeyFields.erase(fieldName);
|
||||
|
||||
// subfields
|
||||
set<string>::const_iterator begin = sortKeyFields.lower_bound(fieldName + ".\x00");
|
||||
set<string>::const_iterator end = sortKeyFields.lower_bound(fieldName + ".\xFF");
|
||||
sortKeyFields.erase(begin, end);
|
||||
|
||||
if ( ! e.trueValue() ) {
|
||||
uassert( 13431 , "have to have sort key in projection and removing it" , !found && begin == end );
|
||||
}
|
||||
else if (!e.isABSONObj()) {
|
||||
isNegative = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isNegative) {
|
||||
for (set<string>::const_iterator it(sortKeyFields.begin()), end(sortKeyFields.end()); it != end; ++it) {
|
||||
b.append(*it, 1);
|
||||
}
|
||||
}
|
||||
|
||||
_fields = b.obj();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Merge with futures API? We do a lot of error checking here that would be useful elsewhere.
|
||||
void ParallelSortClusteredCursor::_init() {
|
||||
|
||||
// log() << "Starting parallel search..." << endl;
|
||||
|
||||
// make sure we're not already initialized
|
||||
assert( ! _cursors );
|
||||
_cursors = new FilteringClientCursor[_numServers];
|
||||
|
||||
bool returnPartial = ( _options & QueryOption_PartialResults );
|
||||
|
||||
vector<ServerAndQuery> queries( _servers.begin(), _servers.end() );
|
||||
set<int> retryQueries;
|
||||
int finishedQueries = 0;
|
||||
|
||||
vector< shared_ptr<ShardConnection> > conns;
|
||||
vector<string> servers;
|
||||
|
||||
// Since we may get all sorts of errors, record them all as they come and throw them later if necessary
|
||||
vector<string> staleConfigExs;
|
||||
vector<string> socketExs;
|
||||
vector<string> otherExs;
|
||||
bool allConfigStale = false;
|
||||
|
||||
int retries = -1;
|
||||
|
||||
// Loop through all the queries until we've finished or gotten a socket exception on all of them
|
||||
// We break early for non-socket exceptions, and socket exceptions if we aren't returning partial results
|
||||
do {
|
||||
retries++;
|
||||
|
||||
bool firstPass = retryQueries.size() == 0;
|
||||
|
||||
if( ! firstPass ){
|
||||
log() << "retrying " << ( returnPartial ? "(partial) " : "" ) << "parallel connection to ";
|
||||
for( set<int>::iterator it = retryQueries.begin(); it != retryQueries.end(); ++it ){
|
||||
log() << queries[*it]._server << ", ";
|
||||
}
|
||||
log() << finishedQueries << " finished queries." << endl;
|
||||
}
|
||||
|
||||
size_t num = 0;
|
||||
for ( vector<ServerAndQuery>::iterator it = queries.begin(); it != queries.end(); ++it ) {
|
||||
size_t i = num++;
|
||||
|
||||
const ServerAndQuery& sq = *it;
|
||||
|
||||
// If we're not retrying this cursor on later passes, continue
|
||||
if( ! firstPass && retryQueries.find( i ) == retryQueries.end() ) continue;
|
||||
|
||||
// log() << "Querying " << _query << " from " << _ns << " for " << sq._server << endl;
|
||||
|
||||
BSONObj q = _query;
|
||||
if ( ! sq._extra.isEmpty() ) {
|
||||
q = concatQuery( q , sq._extra );
|
||||
}
|
||||
|
||||
string errLoc = " @ " + sq._server;
|
||||
|
||||
if( firstPass ){
|
||||
|
||||
// This may be the first time connecting to this shard, if so we can get an error here
|
||||
try {
|
||||
conns.push_back( shared_ptr<ShardConnection>( new ShardConnection( sq._server , _ns ) ) );
|
||||
}
|
||||
catch( std::exception& e ){
|
||||
socketExs.push_back( e.what() + errLoc );
|
||||
if( ! returnPartial ){
|
||||
num--;
|
||||
break;
|
||||
}
|
||||
conns.push_back( shared_ptr<ShardConnection>() );
|
||||
continue;
|
||||
}
|
||||
|
||||
servers.push_back( sq._server );
|
||||
}
|
||||
|
||||
if ( conns[i]->setVersion() ) {
|
||||
conns[i]->done();
|
||||
staleConfigExs.push_back( (string)"stale config detected for " + StaleConfigException( _ns , "ParallelCursor::_init" , true ).what() + errLoc );
|
||||
break;
|
||||
}
|
||||
|
||||
LOG(5) << "ParallelSortClusteredCursor::init server:" << sq._server << " ns:" << _ns
|
||||
<< " query:" << q << " _fields:" << _fields << " options: " << _options << endl;
|
||||
|
||||
if( ! _cursors[i].raw() )
|
||||
_cursors[i].reset( new DBClientCursor( conns[i]->get() , _ns , q ,
|
||||
0 , // nToReturn
|
||||
0 , // nToSkip
|
||||
_fields.isEmpty() ? 0 : &_fields , // fieldsToReturn
|
||||
_options ,
|
||||
// NtoReturn is weird.
|
||||
// If zero, it means use default size, so we do that for all cursors
|
||||
// If positive, it's the batch size (we don't want this cursor limiting results), tha
|
||||
// done at a higher level
|
||||
// If negative, it's the batch size, but we don't create a cursor - so we don't want
|
||||
// to create a child cursor either.
|
||||
// Either way, if non-zero, we want to pull back the batch size + the skip amount as
|
||||
// quickly as possible. Potentially, for a cursor on a single shard or if we keep be
|
||||
// chunks, we can actually add the skip value into the cursor and/or make some assump
|
||||
// return value size ( (batch size + skip amount) / num_servers ).
|
||||
_batchSize == 0 ? 0 :
|
||||
( _batchSize > 0 ? _batchSize + _needToSkip :
|
||||
_batchSize - _needToSkip ) // batchSize
|
||||
) );
|
||||
|
||||
try{
|
||||
_cursors[i].raw()->initLazy( ! firstPass );
|
||||
}
|
||||
catch( SocketException& e ){
|
||||
socketExs.push_back( e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
if( ! returnPartial ) break;
|
||||
}
|
||||
catch( std::exception& e){
|
||||
otherExs.push_back( e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Go through all the potentially started cursors and finish initializing them or log any errors and
|
||||
// potentially retry
|
||||
// TODO: Better error classification would make this easier, errors are indicated in all sorts of ways
|
||||
// here that we need to trap.
|
||||
for ( size_t i = 0; i < num; i++ ) {
|
||||
|
||||
// log() << "Finishing query for " << cons[i].get()->getHost() << endl;
|
||||
string errLoc = " @ " + queries[i]._server;
|
||||
|
||||
if( ! _cursors[i].raw() || ( ! firstPass && retryQueries.find( i ) == retryQueries.end() ) ){
|
||||
if( conns[i] ) conns[i].get()->done();
|
||||
continue;
|
||||
}
|
||||
|
||||
assert( conns[i] );
|
||||
retryQueries.erase( i );
|
||||
|
||||
bool retry = false;
|
||||
|
||||
try {
|
||||
|
||||
if( ! _cursors[i].raw()->initLazyFinish( retry ) ) {
|
||||
|
||||
warning() << "invalid result from " << conns[i]->getHost() << ( retry ? ", retrying" : "" ) << endl;
|
||||
_cursors[i].reset( NULL );
|
||||
|
||||
if( ! retry ){
|
||||
socketExs.push_back( str::stream() << "error querying server: " << servers[i] );
|
||||
conns[i]->done();
|
||||
}
|
||||
else {
|
||||
retryQueries.insert( i );
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
catch ( MsgAssertionException& e ){
|
||||
socketExs.push_back( e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
continue;
|
||||
}
|
||||
catch ( SocketException& e ) {
|
||||
socketExs.push_back( e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
continue;
|
||||
}
|
||||
catch( std::exception& e ){
|
||||
otherExs.push_back( e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
_cursors[i].raw()->attach( conns[i].get() ); // this calls done on conn
|
||||
_checkCursor( _cursors[i].raw() );
|
||||
|
||||
finishedQueries++;
|
||||
}
|
||||
catch ( StaleConfigException& e ){
|
||||
|
||||
// Our stored configuration data is actually stale, we need to reload it
|
||||
// when we throw our exception
|
||||
allConfigStale = true;
|
||||
|
||||
staleConfigExs.push_back( (string)"stale config detected for " + e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
continue;
|
||||
}
|
||||
catch( std::exception& e ){
|
||||
otherExs.push_back( e.what() + errLoc );
|
||||
_cursors[i].reset( NULL );
|
||||
conns[i]->done();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Don't exceed our max retries, should not happen
|
||||
assert( retries < 5 );
|
||||
}
|
||||
while( retryQueries.size() > 0 /* something to retry */ &&
|
||||
( socketExs.size() == 0 || returnPartial ) /* no conn issues */ &&
|
||||
staleConfigExs.size() == 0 /* no config issues */ &&
|
||||
otherExs.size() == 0 /* no other issues */);
|
||||
|
||||
// Assert that our conns are all closed!
|
||||
for( vector< shared_ptr<ShardConnection> >::iterator i = conns.begin(); i < conns.end(); ++i ){
|
||||
assert( ! (*i) || ! (*i)->ok() );
|
||||
}
|
||||
|
||||
// Handle errors we got during initialization.
|
||||
// If we're returning partial results, we can ignore socketExs, but nothing else
|
||||
// Log a warning in any case, so we don't lose these messages
|
||||
bool throwException = ( socketExs.size() > 0 && ! returnPartial ) || staleConfigExs.size() > 0 || otherExs.size() > 0;
|
||||
|
||||
if( socketExs.size() > 0 || staleConfigExs.size() > 0 || otherExs.size() > 0 ) {
|
||||
|
||||
vector<string> errMsgs;
|
||||
|
||||
errMsgs.insert( errMsgs.end(), staleConfigExs.begin(), staleConfigExs.end() );
|
||||
errMsgs.insert( errMsgs.end(), otherExs.begin(), otherExs.end() );
|
||||
errMsgs.insert( errMsgs.end(), socketExs.begin(), socketExs.end() );
|
||||
|
||||
stringstream errMsg;
|
||||
errMsg << "could not initialize cursor across all shards because : ";
|
||||
for( vector<string>::iterator i = errMsgs.begin(); i != errMsgs.end(); i++ ){
|
||||
if( i != errMsgs.begin() ) errMsg << " :: and :: ";
|
||||
errMsg << *i;
|
||||
}
|
||||
|
||||
if( throwException && staleConfigExs.size() > 0 )
|
||||
throw StaleConfigException( _ns , errMsg.str() , ! allConfigStale );
|
||||
else if( throwException )
|
||||
throw DBException( errMsg.str(), 14827 );
|
||||
else
|
||||
warning() << errMsg.str() << endl;
|
||||
}
|
||||
|
||||
if( retries > 0 )
|
||||
log() << "successfully finished parallel query after " << retries << " retries" << endl;
|
||||
|
||||
}
|
||||
|
||||
ParallelSortClusteredCursor::~ParallelSortClusteredCursor() {
|
||||
delete [] _cursors;
|
||||
_cursors = 0;
|
||||
}
|
||||
|
||||
bool ParallelSortClusteredCursor::more() {
|
||||
|
||||
if ( _needToSkip > 0 ) {
|
||||
int n = _needToSkip;
|
||||
_needToSkip = 0;
|
||||
|
||||
while ( n > 0 && more() ) {
|
||||
BSONObj x = next();
|
||||
n--;
|
||||
}
|
||||
|
||||
_needToSkip = n;
|
||||
}
|
||||
|
||||
for ( int i=0; i<_numServers; i++ ) {
|
||||
if ( _cursors[i].more() )
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BSONObj ParallelSortClusteredCursor::next() {
|
||||
BSONObj best = BSONObj();
|
||||
int bestFrom = -1;
|
||||
|
||||
for( int j = 0; j < _numServers; j++ ){
|
||||
|
||||
// Iterate _numServers times, starting one past the last server we used.
|
||||
// This means we actually start at server #1, not #0, but shouldn't matter
|
||||
|
||||
int i = ( j + _lastFrom + 1 ) % _numServers;
|
||||
|
||||
if ( ! _cursors[i].more() )
|
||||
continue;
|
||||
|
||||
BSONObj me = _cursors[i].peek();
|
||||
|
||||
if ( best.isEmpty() ) {
|
||||
best = me;
|
||||
bestFrom = i;
|
||||
if( _sortKey.isEmpty() ) break;
|
||||
continue;
|
||||
}
|
||||
|
||||
int comp = best.woSortOrder( me , _sortKey , true );
|
||||
if ( comp < 0 )
|
||||
continue;
|
||||
|
||||
best = me;
|
||||
bestFrom = i;
|
||||
}
|
||||
|
||||
_lastFrom = bestFrom;
|
||||
|
||||
uassert( 10019 , "no more elements" , ! best.isEmpty() );
|
||||
_cursors[bestFrom].next();
|
||||
|
||||
return best;
|
||||
}
|
||||
|
||||
void ParallelSortClusteredCursor::_explain( map< string,list<BSONObj> >& out ) {
|
||||
for ( set<ServerAndQuery>::iterator i=_servers.begin(); i!=_servers.end(); ++i ) {
|
||||
const ServerAndQuery& sq = *i;
|
||||
list<BSONObj> & l = out[sq._server];
|
||||
l.push_back( explain( sq._server , sq._extra ) );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -----------------
|
||||
// ---- Future -----
|
||||
// -----------------
|
||||
|
||||
Future::CommandResult::CommandResult( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn )
|
||||
:_server(server) ,_db(db) , _options(options), _cmd(cmd) ,_conn(conn) ,_done(false)
|
||||
{
|
||||
try {
|
||||
if ( ! _conn ){
|
||||
_connHolder.reset( new ScopedDbConnection( _server ) );
|
||||
_conn = _connHolder->get();
|
||||
}
|
||||
|
||||
if ( _conn->lazySupported() ) {
|
||||
_cursor.reset( new DBClientCursor(_conn, _db + ".$cmd", _cmd, -1/*limit*/, 0, NULL, _options, 0));
|
||||
_cursor->initLazy();
|
||||
}
|
||||
else {
|
||||
_done = true; // we set _done first because even if there is an error we're done
|
||||
_ok = _conn->runCommand( db , cmd , _res , options );
|
||||
}
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
error() << "Future::spawnComand (part 1) exception: " << e.what() << endl;
|
||||
_ok = false;
|
||||
_done = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool Future::CommandResult::join() {
|
||||
if (_done)
|
||||
return _ok;
|
||||
|
||||
try {
|
||||
// TODO: Allow retries?
|
||||
bool retry = false;
|
||||
bool finished = _cursor->initLazyFinish( retry );
|
||||
|
||||
// Shouldn't need to communicate with server any more
|
||||
if ( _connHolder )
|
||||
_connHolder->done();
|
||||
|
||||
uassert(14812, str::stream() << "Error running command on server: " << _server, finished);
|
||||
massert(14813, "Command returned nothing", _cursor->more());
|
||||
|
||||
_res = _cursor->nextSafe();
|
||||
_ok = _res["ok"].trueValue();
|
||||
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
error() << "Future::spawnComand (part 2) exception: " << e.what() << endl;
|
||||
_ok = false;
|
||||
}
|
||||
|
||||
_done = true;
|
||||
return _ok;
|
||||
}
|
||||
|
||||
shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn ) {
|
||||
shared_ptr<Future::CommandResult> res (new Future::CommandResult( server , db , cmd , options , conn ));
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
315
client/parallel.h
Normal file
315
client/parallel.h
Normal file
|
|
@ -0,0 +1,315 @@
|
|||
// parallel.h
|
||||
|
||||
/* Copyright 2009 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
tools for working in parallel/sharded/clustered environment
|
||||
*/
|
||||
|
||||
#include "../pch.h"
|
||||
#include "dbclient.h"
|
||||
#include "redef_macros.h"
|
||||
#include "../db/dbmessage.h"
|
||||
#include "../db/matcher.h"
|
||||
#include "../util/concurrency/mvar.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/**
|
||||
* holder for a server address and a query to run
|
||||
*/
|
||||
class ServerAndQuery {
|
||||
public:
|
||||
ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
|
||||
_server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ) {
|
||||
}
|
||||
|
||||
bool operator<( const ServerAndQuery& other ) const {
|
||||
if ( ! _orderObject.isEmpty() )
|
||||
return _orderObject.woCompare( other._orderObject ) < 0;
|
||||
|
||||
if ( _server < other._server )
|
||||
return true;
|
||||
if ( other._server > _server )
|
||||
return false;
|
||||
return _extra.woCompare( other._extra ) < 0;
|
||||
}
|
||||
|
||||
string toString() const {
|
||||
StringBuilder ss;
|
||||
ss << "server:" << _server << " _extra:" << _extra.toString() << " _orderObject:" << _orderObject.toString();
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
operator string() const {
|
||||
return toString();
|
||||
}
|
||||
|
||||
string _server;
|
||||
BSONObj _extra;
|
||||
BSONObj _orderObject;
|
||||
};
|
||||
|
||||
/**
|
||||
* this is a cursor that works over a set of servers
|
||||
* can be used in serial/paralellel as controlled by sub classes
|
||||
*/
|
||||
class ClusteredCursor {
|
||||
public:
|
||||
ClusteredCursor( QueryMessage& q );
|
||||
ClusteredCursor( const string& ns , const BSONObj& q , int options=0 , const BSONObj& fields=BSONObj() );
|
||||
virtual ~ClusteredCursor();
|
||||
|
||||
/** call before using */
|
||||
void init();
|
||||
|
||||
virtual bool more() = 0;
|
||||
virtual BSONObj next() = 0;
|
||||
|
||||
static BSONObj concatQuery( const BSONObj& query , const BSONObj& extraFilter );
|
||||
|
||||
virtual string type() const = 0;
|
||||
|
||||
virtual BSONObj explain();
|
||||
|
||||
protected:
|
||||
|
||||
virtual void _init() = 0;
|
||||
|
||||
auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() , int skipLeft = 0 , bool lazy=false );
|
||||
BSONObj explain( const string& server , BSONObj extraFilter = BSONObj() );
|
||||
|
||||
/**
|
||||
* checks the cursor for any errors
|
||||
* will throw an exceptionif an error is encountered
|
||||
*/
|
||||
void _checkCursor( DBClientCursor * cursor );
|
||||
|
||||
static BSONObj _concatFilter( const BSONObj& filter , const BSONObj& extraFilter );
|
||||
|
||||
virtual void _explain( map< string,list<BSONObj> >& out ) = 0;
|
||||
|
||||
string _ns;
|
||||
BSONObj _query;
|
||||
int _options;
|
||||
BSONObj _fields;
|
||||
int _batchSize;
|
||||
|
||||
bool _didInit;
|
||||
|
||||
bool _done;
|
||||
};
|
||||
|
||||
|
||||
class FilteringClientCursor {
|
||||
public:
|
||||
FilteringClientCursor( const BSONObj filter = BSONObj() );
|
||||
FilteringClientCursor( DBClientCursor* cursor , const BSONObj filter = BSONObj() );
|
||||
FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter = BSONObj() );
|
||||
~FilteringClientCursor();
|
||||
|
||||
void reset( auto_ptr<DBClientCursor> cursor );
|
||||
void reset( DBClientCursor* cursor );
|
||||
|
||||
bool more();
|
||||
BSONObj next();
|
||||
|
||||
BSONObj peek();
|
||||
|
||||
DBClientCursor* raw() { return _cursor.get(); }
|
||||
|
||||
private:
|
||||
void _advance();
|
||||
|
||||
Matcher _matcher;
|
||||
auto_ptr<DBClientCursor> _cursor;
|
||||
|
||||
BSONObj _next;
|
||||
bool _done;
|
||||
};
|
||||
|
||||
|
||||
class Servers {
|
||||
public:
|
||||
Servers() {
|
||||
}
|
||||
|
||||
void add( const ServerAndQuery& s ) {
|
||||
add( s._server , s._extra );
|
||||
}
|
||||
|
||||
void add( const string& server , const BSONObj& filter ) {
|
||||
vector<BSONObj>& mine = _filters[server];
|
||||
mine.push_back( filter.getOwned() );
|
||||
}
|
||||
|
||||
// TOOO: pick a less horrible name
|
||||
class View {
|
||||
View( const Servers* s ) {
|
||||
for ( map<string, vector<BSONObj> >::const_iterator i=s->_filters.begin(); i!=s->_filters.end(); ++i ) {
|
||||
_servers.push_back( i->first );
|
||||
_filters.push_back( i->second );
|
||||
}
|
||||
}
|
||||
public:
|
||||
int size() const {
|
||||
return _servers.size();
|
||||
}
|
||||
|
||||
string getServer( int n ) const {
|
||||
return _servers[n];
|
||||
}
|
||||
|
||||
vector<BSONObj> getFilter( int n ) const {
|
||||
return _filters[ n ];
|
||||
}
|
||||
|
||||
private:
|
||||
vector<string> _servers;
|
||||
vector< vector<BSONObj> > _filters;
|
||||
|
||||
friend class Servers;
|
||||
};
|
||||
|
||||
View view() const {
|
||||
return View( this );
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
map<string, vector<BSONObj> > _filters;
|
||||
|
||||
friend class View;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* runs a query in serial across any number of servers
|
||||
* returns all results from 1 server, then the next, etc...
|
||||
*/
|
||||
class SerialServerClusteredCursor : public ClusteredCursor {
|
||||
public:
|
||||
SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder=0);
|
||||
virtual bool more();
|
||||
virtual BSONObj next();
|
||||
virtual string type() const { return "SerialServer"; }
|
||||
|
||||
protected:
|
||||
virtual void _explain( map< string,list<BSONObj> >& out );
|
||||
|
||||
void _init() {}
|
||||
|
||||
vector<ServerAndQuery> _servers;
|
||||
unsigned _serverIndex;
|
||||
|
||||
FilteringClientCursor _current;
|
||||
|
||||
int _needToSkip;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* runs a query in parellel across N servers
|
||||
* sots
|
||||
*/
|
||||
class ParallelSortClusteredCursor : public ClusteredCursor {
|
||||
public:
|
||||
ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , const BSONObj& sortKey );
|
||||
ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
|
||||
const Query& q , int options=0, const BSONObj& fields=BSONObj() );
|
||||
virtual ~ParallelSortClusteredCursor();
|
||||
virtual bool more();
|
||||
virtual BSONObj next();
|
||||
virtual string type() const { return "ParallelSort"; }
|
||||
protected:
|
||||
void _finishCons();
|
||||
void _init();
|
||||
|
||||
virtual void _explain( map< string,list<BSONObj> >& out );
|
||||
|
||||
int _numServers;
|
||||
int _lastFrom;
|
||||
set<ServerAndQuery> _servers;
|
||||
BSONObj _sortKey;
|
||||
|
||||
FilteringClientCursor * _cursors;
|
||||
int _needToSkip;
|
||||
};
|
||||
|
||||
/**
|
||||
* tools for doing asynchronous operations
|
||||
* right now uses underlying sync network ops and uses another thread
|
||||
* should be changed to use non-blocking io
|
||||
*/
|
||||
class Future {
|
||||
public:
|
||||
class CommandResult {
|
||||
public:
|
||||
|
||||
string getServer() const { return _server; }
|
||||
|
||||
bool isDone() const { return _done; }
|
||||
|
||||
bool ok() const {
|
||||
assert( _done );
|
||||
return _ok;
|
||||
}
|
||||
|
||||
BSONObj result() const {
|
||||
assert( _done );
|
||||
return _res;
|
||||
}
|
||||
|
||||
/**
|
||||
blocks until command is done
|
||||
returns ok()
|
||||
*/
|
||||
bool join();
|
||||
|
||||
private:
|
||||
|
||||
CommandResult( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn );
|
||||
|
||||
string _server;
|
||||
string _db;
|
||||
int _options;
|
||||
BSONObj _cmd;
|
||||
DBClientBase * _conn;
|
||||
scoped_ptr<ScopedDbConnection> _connHolder; // used if not provided a connection
|
||||
|
||||
scoped_ptr<DBClientCursor> _cursor;
|
||||
|
||||
BSONObj _res;
|
||||
bool _ok;
|
||||
bool _done;
|
||||
|
||||
friend class Future;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @param server server name
|
||||
* @param db db name
|
||||
* @param cmd cmd to exec
|
||||
* @param conn optional connection to use. will use standard pooled if non-specified
|
||||
*/
|
||||
static shared_ptr<CommandResult> spawnCommand( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn = 0 );
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#include "undef_macros.h"
|
||||
|
|
@ -23,16 +23,17 @@
|
|||
#define MONGO_MACROS_PUSHED 1
|
||||
|
||||
// util/allocator.h
|
||||
#ifdef MONGO_MALLOC
|
||||
#pragma push_macro("malloc")
|
||||
#undef malloc
|
||||
#define malloc MONGO_malloc
|
||||
#pragma push_macro("realloc")
|
||||
#undef realloc
|
||||
#define realloc MONGO_realloc
|
||||
#endif
|
||||
|
||||
// util/assert_util.h
|
||||
#pragma push_macro("assert")
|
||||
#undef assert
|
||||
#define assert MONGO_assert
|
||||
#pragma push_macro("verify")
|
||||
#undef verify
|
||||
#define verify MONGO_verify
|
||||
|
|
@ -48,9 +49,7 @@
|
|||
#pragma push_macro("uassert")
|
||||
#undef uassert
|
||||
#define uassert MONGO_uassert
|
||||
#pragma push_macro("uassertStatusOK")
|
||||
#undef uassertStatusOK
|
||||
#define uassertStatusOK MONGO_uassertStatusOK
|
||||
#define BOOST_CHECK_EXCEPTION MONGO_BOOST_CHECK_EXCEPTION
|
||||
#pragma push_macro("DESTRUCTOR_GUARD")
|
||||
#undef DESTRUCTOR_GUARD
|
||||
#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
|
||||
|
|
@ -62,6 +61,18 @@
|
|||
#pragma push_macro("PRINTFL")
|
||||
#undef PRINTFL
|
||||
#define PRINTFL MONGO_PRINTFL
|
||||
#pragma push_macro("asctime")
|
||||
#undef asctime
|
||||
#define asctime MONGO_asctime
|
||||
#pragma push_macro("gmtime")
|
||||
#undef gmtime
|
||||
#define gmtime MONGO_gmtime
|
||||
#pragma push_macro("localtime")
|
||||
#undef localtime
|
||||
#define localtime MONGO_localtime
|
||||
#pragma push_macro("ctime")
|
||||
#undef ctime
|
||||
#define ctime MONGO_ctime
|
||||
|
||||
// util/debug_util.h
|
||||
#pragma push_macro("DEV")
|
||||
54
client/simple_client_demo.cpp
Normal file
54
client/simple_client_demo.cpp
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
/* simple_client_demo.cpp
|
||||
|
||||
See also : http://www.mongodb.org/pages/viewpage.action?pageId=133415
|
||||
|
||||
How to build and run:
|
||||
|
||||
(1) Using the mongoclient:
|
||||
g++ simple_client_demo.cpp -lmongoclient -lboost_thread-mt -lboost_filesystem -lboost_program_options
|
||||
./a.out
|
||||
|
||||
(2) using client_lib.cpp:
|
||||
g++ -I .. simple_client_demo.cpp mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
|
||||
./a.out
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include "dbclient.h" // the mongo c++ driver
|
||||
|
||||
using namespace std;
|
||||
using namespace mongo;
|
||||
using namespace bson;
|
||||
|
||||
int main() {
|
||||
try {
|
||||
cout << "connecting to localhost..." << endl;
|
||||
DBClientConnection c;
|
||||
c.connect("localhost");
|
||||
cout << "connected ok" << endl;
|
||||
unsigned long long count = c.count("test.foo");
|
||||
cout << "count of exiting documents in collection test.foo : " << count << endl;
|
||||
|
||||
bo o = BSON( "hello" << "world" );
|
||||
c.insert("test.foo", o);
|
||||
|
||||
string e = c.getLastError();
|
||||
if( !e.empty() ) {
|
||||
cout << "insert #1 failed: " << e << endl;
|
||||
}
|
||||
|
||||
// make an index with a unique key constraint
|
||||
c.ensureIndex("test.foo", BSON("hello"<<1), /*unique*/true);
|
||||
|
||||
c.insert("test.foo", o); // will cause a dup key error on "hello" field
|
||||
cout << "we expect a dup key error here:" << endl;
|
||||
cout << " " << c.getLastErrorDetailed().toString() << endl;
|
||||
}
|
||||
catch(DBException& e) {
|
||||
cout << "caught DBException " << e.toString() << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
407
client/syncclusterconnection.cpp
Normal file
407
client/syncclusterconnection.cpp
Normal file
|
|
@ -0,0 +1,407 @@
|
|||
// syncclusterconnection.cpp
|
||||
/*
|
||||
* Copyright 2010 10gen Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
#include "pch.h"
|
||||
#include "syncclusterconnection.h"
|
||||
#include "../db/dbmessage.h"
|
||||
|
||||
// error codes 8000-8009
|
||||
|
||||
namespace mongo {
|
||||
|
||||
SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
|
||||
{
|
||||
stringstream s;
|
||||
int n=0;
|
||||
for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ ) {
|
||||
if( ++n > 1 ) s << ',';
|
||||
s << i->toString();
|
||||
}
|
||||
_address = s.str();
|
||||
}
|
||||
for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ )
|
||||
_connect( i->toString() );
|
||||
}
|
||||
|
||||
SyncClusterConnection::SyncClusterConnection( string commaSeperated, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
|
||||
_address = commaSeperated;
|
||||
string::size_type idx;
|
||||
while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ) {
|
||||
string h = commaSeperated.substr( 0 , idx );
|
||||
commaSeperated = commaSeperated.substr( idx + 1 );
|
||||
_connect( h );
|
||||
}
|
||||
_connect( commaSeperated );
|
||||
uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
|
||||
}
|
||||
|
||||
SyncClusterConnection::SyncClusterConnection( string a , string b , string c, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
|
||||
_address = a + "," + b + "," + c;
|
||||
// connect to all even if not working
|
||||
_connect( a );
|
||||
_connect( b );
|
||||
_connect( c );
|
||||
}
|
||||
|
||||
SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
SyncClusterConnection::~SyncClusterConnection() {
|
||||
for ( size_t i=0; i<_conns.size(); i++ )
|
||||
delete _conns[i];
|
||||
_conns.clear();
|
||||
}
|
||||
|
||||
bool SyncClusterConnection::prepare( string& errmsg ) {
|
||||
_lastErrors.clear();
|
||||
return fsync( errmsg );
|
||||
}
|
||||
|
||||
bool SyncClusterConnection::fsync( string& errmsg ) {
|
||||
bool ok = true;
|
||||
errmsg = "";
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
BSONObj res;
|
||||
try {
|
||||
if ( _conns[i]->simpleCommand( "admin" , &res , "fsync" ) )
|
||||
continue;
|
||||
}
|
||||
catch ( DBException& e ) {
|
||||
errmsg += e.toString();
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
errmsg += e.what();
|
||||
}
|
||||
catch ( ... ) {
|
||||
}
|
||||
ok = false;
|
||||
errmsg += " " + _conns[i]->toString() + ":" + res.toString();
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
void SyncClusterConnection::_checkLast() {
|
||||
_lastErrors.clear();
|
||||
vector<string> errors;
|
||||
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
BSONObj res;
|
||||
string err;
|
||||
try {
|
||||
if ( ! _conns[i]->runCommand( "admin" , BSON( "getlasterror" << 1 << "fsync" << 1 ) , res ) )
|
||||
err = "cmd failed: ";
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
err += e.what();
|
||||
}
|
||||
catch ( ... ) {
|
||||
err += "unknown failure";
|
||||
}
|
||||
_lastErrors.push_back( res.getOwned() );
|
||||
errors.push_back( err );
|
||||
}
|
||||
|
||||
assert( _lastErrors.size() == errors.size() && _lastErrors.size() == _conns.size() );
|
||||
|
||||
stringstream err;
|
||||
bool ok = true;
|
||||
|
||||
for ( size_t i = 0; i<_conns.size(); i++ ) {
|
||||
BSONObj res = _lastErrors[i];
|
||||
if ( res["ok"].trueValue() && (res["fsyncFiles"].numberInt() > 0 || res.hasElement("waited")))
|
||||
continue;
|
||||
ok = false;
|
||||
err << _conns[i]->toString() << ": " << res << " " << errors[i];
|
||||
}
|
||||
|
||||
if ( ok )
|
||||
return;
|
||||
throw UserException( 8001 , (string)"SyncClusterConnection write op failed: " + err.str() );
|
||||
}
|
||||
|
||||
BSONObj SyncClusterConnection::getLastErrorDetailed() {
|
||||
if ( _lastErrors.size() )
|
||||
return _lastErrors[0];
|
||||
return DBClientBase::getLastErrorDetailed();
|
||||
}
|
||||
|
||||
void SyncClusterConnection::_connect( string host ) {
|
||||
log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
|
||||
DBClientConnection * c = new DBClientConnection( true );
|
||||
c->setSoTimeout( _socketTimeout );
|
||||
string errmsg;
|
||||
if ( ! c->connect( host , errmsg ) )
|
||||
log() << "SyncClusterConnection connect fail to: " << host << " errmsg: " << errmsg << endl;
|
||||
_connAddresses.push_back( host );
|
||||
_conns.push_back( c );
|
||||
}
|
||||
|
||||
bool SyncClusterConnection::callRead( Message& toSend , Message& response ) {
|
||||
// TODO: need to save state of which one to go back to somehow...
|
||||
return _conns[0]->callRead( toSend , response );
|
||||
}
|
||||
|
||||
BSONObj SyncClusterConnection::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
|
||||
|
||||
if ( ns.find( ".$cmd" ) != string::npos ) {
|
||||
string cmdName = query.obj.firstElementFieldName();
|
||||
|
||||
int lockType = _lockType( cmdName );
|
||||
|
||||
if ( lockType > 0 ) { // write $cmd
|
||||
string errmsg;
|
||||
if ( ! prepare( errmsg ) )
|
||||
throw UserException( 13104 , (string)"SyncClusterConnection::findOne prepare failed: " + errmsg );
|
||||
|
||||
vector<BSONObj> all;
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
all.push_back( _conns[i]->findOne( ns , query , 0 , queryOptions ).getOwned() );
|
||||
}
|
||||
|
||||
_checkLast();
|
||||
|
||||
for ( size_t i=0; i<all.size(); i++ ) {
|
||||
BSONObj temp = all[i];
|
||||
if ( isOk( temp ) )
|
||||
continue;
|
||||
stringstream ss;
|
||||
ss << "write $cmd failed on a node: " << temp.jsonString();
|
||||
ss << " " << _conns[i]->toString();
|
||||
ss << " ns: " << ns;
|
||||
ss << " cmd: " << query.toString();
|
||||
throw UserException( 13105 , ss.str() );
|
||||
}
|
||||
|
||||
return all[0];
|
||||
}
|
||||
}
|
||||
|
||||
return DBClientBase::findOne( ns , query , fieldsToReturn , queryOptions );
|
||||
}
|
||||
|
||||
bool SyncClusterConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
|
||||
for (vector<DBClientConnection*>::iterator it = _conns.begin(); it < _conns.end(); it++) {
|
||||
massert( 15848, "sync cluster of sync clusters?", (*it)->type() != ConnectionString::SYNC);
|
||||
|
||||
if (!(*it)->auth(dbname, username, password_text, errmsg, digestPassword)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
|
||||
const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
|
||||
_lastErrors.clear();
|
||||
if ( ns.find( ".$cmd" ) != string::npos ) {
|
||||
string cmdName = query.obj.firstElementFieldName();
|
||||
int lockType = _lockType( cmdName );
|
||||
uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection::query for:" + cmdName , lockType <= 0 );
|
||||
}
|
||||
|
||||
return _queryOnActive( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
|
||||
}
|
||||
|
||||
bool SyncClusterConnection::_commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options ) {
|
||||
auto_ptr<DBClientCursor> cursor = _queryOnActive( dbname + ".$cmd" , cmd , 1 , 0 , 0 , options , 0 );
|
||||
if ( cursor->more() )
|
||||
info = cursor->next().copy();
|
||||
else
|
||||
info = BSONObj();
|
||||
return isOk( info );
|
||||
}
|
||||
|
||||
auto_ptr<DBClientCursor> SyncClusterConnection::_queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
|
||||
const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
|
||||
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
try {
|
||||
auto_ptr<DBClientCursor> cursor =
|
||||
_conns[i]->query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
|
||||
if ( cursor.get() )
|
||||
return cursor;
|
||||
log() << "query failed to: " << _conns[i]->toString() << " no data" << endl;
|
||||
}
|
||||
catch ( ... ) {
|
||||
log() << "query failed to: " << _conns[i]->toString() << " exception" << endl;
|
||||
}
|
||||
}
|
||||
throw UserException( 8002 , "all servers down!" );
|
||||
}
|
||||
|
||||
auto_ptr<DBClientCursor> SyncClusterConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ) {
|
||||
uassert( 10022 , "SyncClusterConnection::getMore not supported yet" , 0);
|
||||
auto_ptr<DBClientCursor> c;
|
||||
return c;
|
||||
}
|
||||
|
||||
void SyncClusterConnection::insert( const string &ns, BSONObj obj , int flags) {
|
||||
|
||||
uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() ,
|
||||
ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() );
|
||||
|
||||
string errmsg;
|
||||
if ( ! prepare( errmsg ) )
|
||||
throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );
|
||||
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
_conns[i]->insert( ns , obj , flags);
|
||||
}
|
||||
|
||||
_checkLast();
|
||||
}
|
||||
|
||||
void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v , int flags) {
|
||||
uassert( 10023 , "SyncClusterConnection bulk insert not implemented" , 0);
|
||||
}
|
||||
|
||||
void SyncClusterConnection::remove( const string &ns , Query query, bool justOne ) {
|
||||
string errmsg;
|
||||
if ( ! prepare( errmsg ) )
|
||||
throw UserException( 8020 , (string)"SyncClusterConnection::remove prepare failed: " + errmsg );
|
||||
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
_conns[i]->remove( ns , query , justOne );
|
||||
}
|
||||
|
||||
_checkLast();
|
||||
}
|
||||
|
||||
void SyncClusterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ) {
|
||||
|
||||
if ( upsert ) {
|
||||
uassert( 13120 , "SyncClusterConnection::update upsert query needs _id" , query.obj["_id"].type() );
|
||||
}
|
||||
|
||||
if ( _writeConcern ) {
|
||||
string errmsg;
|
||||
if ( ! prepare( errmsg ) )
|
||||
throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
|
||||
}
|
||||
|
||||
for ( size_t i = 0; i < _conns.size(); i++ ) {
|
||||
try {
|
||||
_conns[i]->update( ns , query , obj , upsert , multi );
|
||||
}
|
||||
catch ( std::exception& e ) {
|
||||
if ( _writeConcern )
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
if ( _writeConcern ) {
|
||||
_checkLast();
|
||||
assert( _lastErrors.size() > 1 );
|
||||
|
||||
int a = _lastErrors[0]["n"].numberInt();
|
||||
for ( unsigned i=1; i<_lastErrors.size(); i++ ) {
|
||||
int b = _lastErrors[i]["n"].numberInt();
|
||||
if ( a == b )
|
||||
continue;
|
||||
|
||||
throw UpdateNotTheSame( 8017 ,
|
||||
str::stream()
|
||||
<< "update not consistent "
|
||||
<< " ns: " << ns
|
||||
<< " query: " << query.toString()
|
||||
<< " update: " << obj
|
||||
<< " gle1: " << _lastErrors[0]
|
||||
<< " gle2: " << _lastErrors[i] ,
|
||||
_connAddresses , _lastErrors );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
string SyncClusterConnection::_toString() const {
|
||||
stringstream ss;
|
||||
ss << "SyncClusterConnection [" << _address << "]";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
bool SyncClusterConnection::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
|
||||
uassert( 8006 , "SyncClusterConnection::call can only be used directly for dbQuery" ,
|
||||
toSend.operation() == dbQuery );
|
||||
|
||||
DbMessage d( toSend );
|
||||
uassert( 8007 , "SyncClusterConnection::call can't handle $cmd" , strstr( d.getns(), "$cmd" ) == 0 );
|
||||
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
try {
|
||||
bool ok = _conns[i]->call( toSend , response , assertOk );
|
||||
if ( ok ) {
|
||||
if ( actualServer )
|
||||
*actualServer = _connAddresses[i];
|
||||
return ok;
|
||||
}
|
||||
log() << "call failed to: " << _conns[i]->toString() << " no data" << endl;
|
||||
}
|
||||
catch ( ... ) {
|
||||
log() << "call failed to: " << _conns[i]->toString() << " exception" << endl;
|
||||
}
|
||||
}
|
||||
throw UserException( 8008 , "all servers down!" );
|
||||
}
|
||||
|
||||
void SyncClusterConnection::say( Message &toSend, bool isRetry ) {
|
||||
string errmsg;
|
||||
if ( ! prepare( errmsg ) )
|
||||
throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg );
|
||||
|
||||
for ( size_t i=0; i<_conns.size(); i++ ) {
|
||||
_conns[i]->say( toSend );
|
||||
}
|
||||
|
||||
_checkLast();
|
||||
}
|
||||
|
||||
void SyncClusterConnection::sayPiggyBack( Message &toSend ) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
int SyncClusterConnection::_lockType( const string& name ) {
|
||||
{
|
||||
scoped_lock lk(_mutex);
|
||||
map<string,int>::iterator i = _lockTypes.find( name );
|
||||
if ( i != _lockTypes.end() )
|
||||
return i->second;
|
||||
}
|
||||
|
||||
BSONObj info;
|
||||
uassert( 13053 , str::stream() << "help failed: " << info , _commandOnActive( "admin" , BSON( name << "1" << "help" << 1 ) , info ) );
|
||||
|
||||
int lockType = info["lockType"].numberInt();
|
||||
|
||||
scoped_lock lk(_mutex);
|
||||
_lockTypes[name] = lockType;
|
||||
return lockType;
|
||||
}
|
||||
|
||||
void SyncClusterConnection::killCursor( long long cursorID ) {
|
||||
// should never need to do this
|
||||
assert(0);
|
||||
}
|
||||
|
||||
void SyncClusterConnection::setAllSoTimeouts( double socketTimeout ){
|
||||
_socketTimeout = socketTimeout;
|
||||
for ( size_t i=0; i<_conns.size(); i++ )
|
||||
|
||||
if( _conns[i] ) _conns[i]->setSoTimeout( socketTimeout );
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -18,10 +18,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
|
||||
#include "mongo/bson/bsonelement.h"
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "mongo/client/dbclientinterface.h"
|
||||
#include "../pch.h"
|
||||
#include "dbclient.h"
|
||||
#include "redef_macros.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -41,20 +40,12 @@ namespace mongo {
|
|||
*/
|
||||
class SyncClusterConnection : public DBClientBase {
|
||||
public:
|
||||
|
||||
using DBClientBase::query;
|
||||
using DBClientBase::update;
|
||||
using DBClientBase::remove;
|
||||
|
||||
/**
|
||||
* @param commaSeparated should be 3 hosts comma separated
|
||||
*/
|
||||
SyncClusterConnection( const list<HostAndPort> &, double socketTimeout = 0);
|
||||
SyncClusterConnection( string commaSeparated, double socketTimeout = 0);
|
||||
SyncClusterConnection( const std::string& a,
|
||||
const std::string& b,
|
||||
const std::string& c,
|
||||
double socketTimeout = 0 );
|
||||
SyncClusterConnection( string a , string b , string c, double socketTimeout = 0 );
|
||||
~SyncClusterConnection();
|
||||
|
||||
/**
|
||||
|
|
@ -80,12 +71,12 @@ namespace mongo {
|
|||
|
||||
virtual void insert( const string &ns, const vector< BSONObj >& v, int flags=0);
|
||||
|
||||
virtual void remove( const string &ns , Query query, int flags );
|
||||
virtual void remove( const string &ns , Query query, bool justOne );
|
||||
|
||||
virtual void update( const string &ns , Query query , BSONObj obj , int flags );
|
||||
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi );
|
||||
|
||||
virtual bool call( Message &toSend, Message &response, bool assertOk , string * actualServer );
|
||||
virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 );
|
||||
virtual void say( Message &toSend, bool isRetry = false );
|
||||
virtual void sayPiggyBack( Message &toSend );
|
||||
|
||||
virtual void killCursor( long long cursorID );
|
||||
|
|
@ -94,12 +85,7 @@ namespace mongo {
|
|||
virtual bool isFailed() const { return false; }
|
||||
virtual string toString() { return _toString(); }
|
||||
|
||||
virtual BSONObj getLastErrorDetailed(const std::string& db,
|
||||
bool fsync=false,
|
||||
bool j=false,
|
||||
int w=0,
|
||||
int wtimeout=0);
|
||||
virtual BSONObj getLastErrorDetailed(bool fsync=false, bool j=false, int w=0, int wtimeout=0);
|
||||
virtual BSONObj getLastErrorDetailed();
|
||||
|
||||
virtual bool callRead( Message& toSend , Message& response );
|
||||
|
||||
|
|
@ -108,12 +94,9 @@ namespace mongo {
|
|||
void setAllSoTimeouts( double socketTimeout );
|
||||
double getSoTimeout() const { return _socketTimeout; }
|
||||
|
||||
virtual bool auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword);
|
||||
|
||||
virtual bool lazySupported() const { return false; }
|
||||
|
||||
protected:
|
||||
virtual void _auth(const BSONObj& params);
|
||||
|
||||
private:
|
||||
SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout = 0 );
|
||||
string _toString() const;
|
||||
|
|
@ -122,7 +105,7 @@ namespace mongo {
|
|||
const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
|
||||
int _lockType( const string& name );
|
||||
void _checkLast();
|
||||
void _connect( const std::string& host );
|
||||
void _connect( string host );
|
||||
|
||||
string _address;
|
||||
vector<string> _connAddresses;
|
||||
|
|
@ -139,7 +122,7 @@ namespace mongo {
|
|||
public:
|
||||
UpdateNotTheSame( int code , const string& msg , const vector<string>& addrs , const vector<BSONObj>& lastErrors )
|
||||
: UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ) {
|
||||
verify( _addrs.size() == _lastErrors.size() );
|
||||
assert( _addrs.size() == _lastErrors.size() );
|
||||
}
|
||||
|
||||
virtual ~UpdateNotTheSame() throw() {
|
||||
|
|
@ -160,3 +143,5 @@ namespace mongo {
|
|||
};
|
||||
|
||||
};
|
||||
|
||||
#include "undef_macros.h"
|
||||
|
|
@ -19,17 +19,19 @@
|
|||
|
||||
// #pragma once // this file is intended to be processed multiple times
|
||||
|
||||
#if !defined (MONGO_EXPOSE_MACROS)
|
||||
|
||||
#ifdef MONGO_MACROS_PUSHED
|
||||
|
||||
// util/allocator.h
|
||||
#ifdef MONGO_MALLOC
|
||||
#undef malloc
|
||||
#pragma pop_macro("malloc")
|
||||
#undef realloc
|
||||
#pragma pop_macro("realloc")
|
||||
#endif
|
||||
|
||||
// util/assert_util.h
|
||||
#undef assert
|
||||
#pragma pop_macro("assert")
|
||||
#undef dassert
|
||||
#pragma pop_macro("dassert")
|
||||
#undef wassert
|
||||
|
|
@ -38,8 +40,7 @@
|
|||
#pragma pop_macro("massert")
|
||||
#undef uassert
|
||||
#pragma pop_macro("uassert")
|
||||
#undef uassertStatusOK
|
||||
#pragma pop_macro("uassertStatusOK")
|
||||
#undef BOOST_CHECK_EXCEPTION
|
||||
#undef verify
|
||||
#pragma pop_macro("verify")
|
||||
#undef DESTRUCTOR_GUARD
|
||||
|
|
@ -50,6 +51,14 @@
|
|||
#pragma pop_macro("PRINT")
|
||||
#undef PRINTFL
|
||||
#pragma pop_macro("PRINTFL")
|
||||
#undef asctime
|
||||
#pragma pop_macro("asctime")
|
||||
#undef gmtime
|
||||
#pragma pop_macro("gmtime")
|
||||
#undef localtime
|
||||
#pragma pop_macro("localtime")
|
||||
#undef ctime
|
||||
#pragma pop_macro("ctime")
|
||||
|
||||
// util/debug_util.h
|
||||
#undef DEV
|
||||
|
|
@ -71,3 +80,4 @@
|
|||
|
||||
#undef MONGO_MACROS_PUSHED
|
||||
#endif
|
||||
#endif
|
||||
56
db/background.h
Normal file
56
db/background.h
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Copyright (C) 2010 10gen Inc.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3,
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/* background.h
|
||||
|
||||
Concurrency coordination for administrative operations.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace mongo {
|
||||
|
||||
/* these are administrative operations / jobs
|
||||
for a namespace running in the background, and that only one
|
||||
at a time per namespace is permitted, and that if in progress,
|
||||
you aren't allowed to do other NamespaceDetails major manipulations
|
||||
(such as dropping ns or db) even in the foreground and must
|
||||
instead uassert.
|
||||
|
||||
It's assumed this is not for super-high RPS things, so we don't do
|
||||
anything special in the implementation here to be fast.
|
||||
*/
|
||||
class BackgroundOperation : public boost::noncopyable {
|
||||
public:
|
||||
static bool inProgForDb(const char *db);
|
||||
static bool inProgForNs(const char *ns);
|
||||
static void assertNoBgOpInProgForDb(const char *db);
|
||||
static void assertNoBgOpInProgForNs(const char *ns);
|
||||
static void dump(stringstream&);
|
||||
|
||||
/* check for in progress before instantiating */
|
||||
BackgroundOperation(const char *ns);
|
||||
|
||||
virtual ~BackgroundOperation();
|
||||
|
||||
private:
|
||||
NamespaceString _ns;
|
||||
static map<string, unsigned> dbsInProg;
|
||||
static set<string> nsInProg;
|
||||
};
|
||||
|
||||
} // namespace mongo
|
||||
|
||||
|
|
@ -16,40 +16,25 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "mongo/pch.h"
|
||||
|
||||
#include "mongo/db/btree.h"
|
||||
#include "mongo/db/btree_stats.h"
|
||||
#include "mongo/db/btreebuilder.h"
|
||||
#include "mongo/db/client.h"
|
||||
#include "mongo/db/clientcursor.h"
|
||||
#include "mongo/db/curop-inl.h"
|
||||
#include "mongo/db/db.h"
|
||||
#include "mongo/db/dbhelpers.h"
|
||||
#include "mongo/db/dur_commitjob.h"
|
||||
#include "mongo/db/index_insertion_continuation.h"
|
||||
#include "mongo/db/json.h"
|
||||
#include "mongo/db/kill_current_op.h"
|
||||
#include "mongo/db/pdfile.h"
|
||||
#include "mongo/db/stats/counters.h"
|
||||
#include "mongo/server.h"
|
||||
#include "mongo/util/startup_test.h"
|
||||
#include "pch.h"
|
||||
#include "db.h"
|
||||
#include "btree.h"
|
||||
#include "pdfile.h"
|
||||
#include "json.h"
|
||||
#include "clientcursor.h"
|
||||
#include "client.h"
|
||||
#include "dbhelpers.h"
|
||||
#include "curop-inl.h"
|
||||
#include "stats/counters.h"
|
||||
#include "dur_commitjob.h"
|
||||
#include "btreebuilder.h"
|
||||
#include "../util/unittest.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
BOOST_STATIC_ASSERT( Record::HeaderSize == 16 );
|
||||
BOOST_STATIC_ASSERT( Record::HeaderSize + BtreeData_V1::BucketSize == 8192 );
|
||||
|
||||
NOINLINE_DECL void checkFailed(unsigned line) {
|
||||
static time_t last;
|
||||
if( time(0) - last >= 10 ) {
|
||||
msgasserted(15898, str::stream() << "error in index possibly corruption consider repairing " << line);
|
||||
}
|
||||
}
|
||||
|
||||
/** data check. like assert, but gives a reasonable error message to the user. */
|
||||
#define check(expr) if(!(expr) ) { checkFailed(__LINE__); }
|
||||
|
||||
#define VERIFYTHISLOC dassert( thisLoc.btree<V>() == this );
|
||||
|
||||
template< class Loc >
|
||||
|
|
@ -93,7 +78,7 @@ namespace mongo {
|
|||
template< class V >
|
||||
void BucketBasics<V>::assertWritable() {
|
||||
if( cmdLine.dur )
|
||||
dur::assertAlreadyDeclared(this, V::BucketSize);
|
||||
dur::assertAlreadyDeclared(this, V::BucketSize);
|
||||
}
|
||||
|
||||
template< class V >
|
||||
|
|
@ -102,7 +87,7 @@ namespace mongo {
|
|||
ss << " Bucket info:" << endl;
|
||||
ss << " n: " << this->n << endl;
|
||||
ss << " parent: " << this->parent.toString() << endl;
|
||||
ss << " nextChild: " << this->nextChild.toString() << endl;
|
||||
ss << " nextChild: " << this->parent.toString() << endl;
|
||||
ss << " flags:" << this->flags << endl;
|
||||
ss << " emptySize: " << this->emptySize << " topSize: " << this->topSize << endl;
|
||||
return ss.str();
|
||||
|
|
@ -116,7 +101,7 @@ namespace mongo {
|
|||
template< class V >
|
||||
void BucketBasics<V>::_shape(int level, stringstream& ss) const {
|
||||
for ( int i = 0; i < level; i++ ) ss << ' ';
|
||||
ss << "*[" << this->n << "]\n";
|
||||
ss << "*\n";
|
||||
for ( int i = 0; i < this->n; i++ ) {
|
||||
if ( !k(i).prevChildBucket.isNull() ) {
|
||||
DiskLoc ll = k(i).prevChildBucket;
|
||||
|
|
@ -141,6 +126,12 @@ namespace mongo {
|
|||
|
||||
template< class V >
|
||||
long long BtreeBucket<V>::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount, bool strict, unsigned depth) const {
|
||||
{
|
||||
bool f = false;
|
||||
assert( f = true );
|
||||
massert( 10281 , "assert is misdefined", f);
|
||||
}
|
||||
|
||||
killCurrentOp.checkForInterrupt();
|
||||
this->assertValid(order, true);
|
||||
|
||||
|
|
@ -167,7 +158,7 @@ namespace mongo {
|
|||
DiskLoc left = kn.prevChildBucket;
|
||||
const BtreeBucket *b = left.btree<V>();
|
||||
if ( strict ) {
|
||||
verify( b->parent == thisLoc );
|
||||
assert( b->parent == thisLoc );
|
||||
}
|
||||
else {
|
||||
wassert( b->parent == thisLoc );
|
||||
|
|
@ -176,10 +167,10 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
if ( !this->nextChild.isNull() ) {
|
||||
DiskLoc ll = this->nextChild;
|
||||
DiskLoc ll = this->nextChild;
|
||||
const BtreeBucket *b = ll.btree<V>();
|
||||
if ( strict ) {
|
||||
verify( b->parent == thisLoc );
|
||||
assert( b->parent == thisLoc );
|
||||
}
|
||||
else {
|
||||
wassert( b->parent == thisLoc );
|
||||
|
|
@ -250,7 +241,7 @@ namespace mongo {
|
|||
ONCE {
|
||||
((BtreeBucket<V> *) this)->dump();
|
||||
}
|
||||
verify(false);
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -258,7 +249,7 @@ namespace mongo {
|
|||
|
||||
template< class V >
|
||||
inline void BucketBasics<V>::markUnused(int keypos) {
|
||||
verify( keypos >= 0 && keypos < this->n );
|
||||
assert( keypos >= 0 && keypos < this->n );
|
||||
k(keypos).setUnused();
|
||||
}
|
||||
|
||||
|
|
@ -291,21 +282,21 @@ namespace mongo {
|
|||
*/
|
||||
template< class V >
|
||||
inline int BucketBasics<V>::_alloc(int bytes) {
|
||||
verify( this->emptySize >= bytes );
|
||||
assert( this->emptySize >= bytes );
|
||||
this->topSize += bytes;
|
||||
this->emptySize -= bytes;
|
||||
int ofs = totalDataSize() - this->topSize;
|
||||
verify( ofs > 0 );
|
||||
assert( ofs > 0 );
|
||||
return ofs;
|
||||
}
|
||||
|
||||
template< class V >
|
||||
void BucketBasics<V>::_delKeyAtPos(int keypos, bool mayEmpty) {
|
||||
// TODO This should be keypos < n
|
||||
verify( keypos >= 0 && keypos <= this->n );
|
||||
verify( childForPos(keypos).isNull() );
|
||||
assert( keypos >= 0 && keypos <= this->n );
|
||||
assert( childForPos(keypos).isNull() );
|
||||
// TODO audit cases where nextChild is null
|
||||
verify( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
|
||||
assert( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
|
||||
this->emptySize += sizeof(_KeyNode);
|
||||
this->n--;
|
||||
for ( int j = keypos; j < this->n; j++ )
|
||||
|
|
@ -320,7 +311,7 @@ namespace mongo {
|
|||
template< class V >
|
||||
void BucketBasics<V>::popBack(DiskLoc& recLoc, Key &key) {
|
||||
massert( 10282 , "n==0 in btree popBack()", this->n > 0 );
|
||||
verify( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
|
||||
assert( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
|
||||
KeyNode kn = keyNode(this->n-1);
|
||||
recLoc = kn.recordLoc;
|
||||
key.assign(kn.key);
|
||||
|
|
@ -345,7 +336,7 @@ namespace mongo {
|
|||
int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
|
||||
if ( bytesNeeded > this->emptySize )
|
||||
return false;
|
||||
verify( bytesNeeded <= this->emptySize );
|
||||
assert( bytesNeeded <= this->emptySize );
|
||||
if( this->n ) {
|
||||
const KeyNode klast = keyNode(this->n-1);
|
||||
if( klast.key.woCompare(key, order) > 0 ) {
|
||||
|
|
@ -353,7 +344,7 @@ namespace mongo {
|
|||
log() << " klast: " << keyNode(this->n-1).key.toString() << endl;
|
||||
log() << " key: " << key.toString() << endl;
|
||||
DEV klast.key.woCompare(key, order);
|
||||
verify(false);
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
this->emptySize -= sizeof(_KeyNode);
|
||||
|
|
@ -378,8 +369,7 @@ namespace mongo {
|
|||
*/
|
||||
template< class V >
|
||||
bool BucketBasics<V>::basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const Key& key, const Ordering &order) const {
|
||||
check( this->n < 1024 );
|
||||
check( keypos >= 0 && keypos <= this->n );
|
||||
assert( keypos >= 0 && keypos <= this->n );
|
||||
int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
|
||||
if ( bytesNeeded > this->emptySize ) {
|
||||
_pack(thisLoc, order, keypos);
|
||||
|
|
@ -431,7 +421,7 @@ namespace mongo {
|
|||
template< class V >
|
||||
int BucketBasics<V>::packedDataSize( int refPos ) const {
|
||||
if ( this->flags & Packed ) {
|
||||
return V::BucketSize - this->emptySize - headerSize();
|
||||
return V::BucketSize - this->emptySize - headerSize();
|
||||
}
|
||||
int size = 0;
|
||||
for( int j = 0; j < this->n; ++j ) {
|
||||
|
|
@ -506,7 +496,7 @@ namespace mongo {
|
|||
this->emptySize = tdz - dataUsed - this->n * sizeof(_KeyNode);
|
||||
{
|
||||
int foo = this->emptySize;
|
||||
verify( foo >= 0 );
|
||||
assert( foo >= 0 );
|
||||
}
|
||||
|
||||
setPacked();
|
||||
|
|
@ -516,8 +506,9 @@ namespace mongo {
|
|||
|
||||
template< class V >
|
||||
inline void BucketBasics<V>::truncateTo(int N, const Ordering &order, int &refPos) {
|
||||
verify( Lock::somethingWriteLocked() );
|
||||
dbMutex.assertWriteLocked();
|
||||
assertWritable();
|
||||
|
||||
this->n = N;
|
||||
setNotPacked();
|
||||
_packReadyForMod( order, refPos );
|
||||
|
|
@ -542,7 +533,7 @@ namespace mongo {
|
|||
*/
|
||||
template< class V >
|
||||
int BucketBasics<V>::splitPos( int keypos ) const {
|
||||
verify( this->n > 2 );
|
||||
assert( this->n > 2 );
|
||||
int split = 0;
|
||||
int rightSize = 0;
|
||||
// when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
|
||||
|
|
@ -569,7 +560,7 @@ namespace mongo {
|
|||
|
||||
template< class V >
|
||||
void BucketBasics<V>::reserveKeysFront( int nAdd ) {
|
||||
verify( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
|
||||
assert( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
|
||||
this->emptySize -= sizeof( _KeyNode ) * nAdd;
|
||||
for( int i = this->n - 1; i > -1; --i ) {
|
||||
k( i + nAdd ) = k( i );
|
||||
|
|
@ -611,7 +602,7 @@ namespace mongo {
|
|||
continue;
|
||||
}
|
||||
|
||||
verify(b->n>0);
|
||||
assert(b->n>0);
|
||||
largestLoc = loc;
|
||||
largestKey = b->n-1;
|
||||
|
||||
|
|
@ -632,7 +623,6 @@ namespace mongo {
|
|||
* jstests/index_check6.js
|
||||
* https://jira.mongodb.org/browse/SERVER-371
|
||||
*/
|
||||
/* static */
|
||||
template< class V >
|
||||
int BtreeBucket<V>::customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction ) {
|
||||
BSONObjIterator ll( l );
|
||||
|
|
@ -738,11 +728,9 @@ namespace mongo {
|
|||
* returns n if it goes after the last existing key.
|
||||
* note result might be an Unused location!
|
||||
*/
|
||||
|
||||
bool guessIncreasing = false;
|
||||
template< class V >
|
||||
bool BtreeBucket<V>::find(const IndexDetails& idx, const Key& key, const DiskLoc &rl,
|
||||
const Ordering &order, int& pos, bool assertIfDup) const {
|
||||
const Ordering &order, int& pos, bool assertIfDup) const {
|
||||
Loc recordLoc;
|
||||
recordLoc = rl;
|
||||
globalIndexCounters.btree( (char*)this );
|
||||
|
|
@ -751,11 +739,8 @@ namespace mongo {
|
|||
bool dupsChecked = false;
|
||||
int l=0;
|
||||
int h=this->n-1;
|
||||
int m = (l+h)/2;
|
||||
if( guessIncreasing ) {
|
||||
m = h;
|
||||
}
|
||||
while ( l <= h ) {
|
||||
int m = (l+h)/2;
|
||||
KeyNode M = this->keyNode(m);
|
||||
int x = key.woCompare(M.key, order);
|
||||
if ( x == 0 ) {
|
||||
|
|
@ -795,7 +780,6 @@ namespace mongo {
|
|||
pos = m;
|
||||
return true;
|
||||
}
|
||||
m = (l+h)/2;
|
||||
}
|
||||
// not found
|
||||
pos = l;
|
||||
|
|
@ -819,9 +803,9 @@ namespace mongo {
|
|||
template< class V >
|
||||
void BtreeBucket<V>::delBucket(const DiskLoc thisLoc, const IndexDetails& id) {
|
||||
ClientCursor::informAboutToDeleteBucket(thisLoc); // slow...
|
||||
verify( !isHead() );
|
||||
assert( !isHead() );
|
||||
|
||||
DiskLoc ll = this->parent;
|
||||
DiskLoc ll = this->parent;
|
||||
const BtreeBucket *p = ll.btree<V>();
|
||||
int parentIdx = indexInParent( thisLoc );
|
||||
p->childForPos( parentIdx ).writing().Null();
|
||||
|
|
@ -835,19 +819,18 @@ namespace mongo {
|
|||
// it (meaning it is ineligible for reuse).
|
||||
memset(this, 0, Size());
|
||||
#else
|
||||
// Mark the bucket as deallocated, see SERVER-4575.
|
||||
this->n = this->INVALID_N_SENTINEL;
|
||||
// defensive:
|
||||
this->n = -1;
|
||||
this->parent.Null();
|
||||
string ns = id.indexNamespace();
|
||||
theDataFileMgr._deleteRecord(nsdetails(ns), ns.c_str(), thisLoc.rec(), thisLoc);
|
||||
theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), thisLoc.rec(), thisLoc);
|
||||
#endif
|
||||
}
|
||||
|
||||
/** note: may delete the entire bucket! this invalid upon return sometimes. */
|
||||
template< class V >
|
||||
void BtreeBucket<V>::delKeyAtPos( const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order) {
|
||||
verify(this->n>0);
|
||||
assert(this->n>0);
|
||||
DiskLoc left = this->childForPos(p);
|
||||
|
||||
if ( this->n == 1 ) {
|
||||
|
|
@ -894,11 +877,6 @@ namespace mongo {
|
|||
* This function is only needed in cases where k has a left or right child;
|
||||
* in other cases a simpler key removal implementation is possible.
|
||||
*
|
||||
* NOTE on noncompliant BtreeBuilder btrees:
|
||||
* It is possible (though likely rare) for btrees created by BtreeBuilder to
|
||||
* have k' that is not a leaf, see SERVER-2732. These cases are handled in
|
||||
* the same manner as described in the "legacy btree structures" note below.
|
||||
*
|
||||
* NOTE on legacy btree structures:
|
||||
* In legacy btrees, k' can be a nonleaf. In such a case we 'delete' k by
|
||||
* marking it as an unused node rather than replacing it with k'. Also, k'
|
||||
|
|
@ -910,7 +888,7 @@ namespace mongo {
|
|||
void BtreeBucket<V>::deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order ) {
|
||||
DiskLoc lchild = this->childForPos( keypos );
|
||||
DiskLoc rchild = this->childForPos( keypos + 1 );
|
||||
verify( !lchild.isNull() || !rchild.isNull() );
|
||||
assert( !lchild.isNull() || !rchild.isNull() );
|
||||
int advanceDirection = lchild.isNull() ? 1 : -1;
|
||||
int advanceKeyOfs = keypos;
|
||||
DiskLoc advanceLoc = advance( thisLoc, advanceKeyOfs, advanceDirection, __FUNCTION__ );
|
||||
|
|
@ -940,23 +918,26 @@ namespace mongo {
|
|||
|
||||
template< class V >
|
||||
void BtreeBucket<V>::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
|
||||
verify( this->n == 0 && !this->nextChild.isNull() );
|
||||
assert( this->n == 0 && !this->nextChild.isNull() );
|
||||
if ( this->parent.isNull() ) {
|
||||
verify( id.head == thisLoc );
|
||||
assert( id.head == thisLoc );
|
||||
id.head.writing() = this->nextChild;
|
||||
}
|
||||
else {
|
||||
DiskLoc ll = this->parent;
|
||||
DiskLoc ll = this->parent;
|
||||
ll.btree<V>()->childForPos( indexInParent( thisLoc ) ).writing() = this->nextChild;
|
||||
}
|
||||
BTREE(this->nextChild)->parent.writing() = this->parent;
|
||||
|
||||
BTREE(this->nextChild)->parent.writing() = this->parent;
|
||||
//(static_cast<DiskLoc>(this->nextChild).btree<V>())->parent.writing() = this->parent;
|
||||
ClientCursor::informAboutToDeleteBucket( thisLoc );
|
||||
deallocBucket( thisLoc, id );
|
||||
}
|
||||
|
||||
template< class V >
|
||||
bool BtreeBucket<V>::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
|
||||
verify( leftIndex >= 0 && leftIndex < this->n );
|
||||
assert( leftIndex >= 0 && leftIndex < this->n );
|
||||
DiskLoc leftNodeLoc = this->childForPos( leftIndex );
|
||||
DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
|
||||
if ( leftNodeLoc.isNull() || rightNodeLoc.isNull() ) {
|
||||
|
|
@ -989,7 +970,7 @@ namespace mongo {
|
|||
int rightSizeLimit = ( l->topSize + l->n * KNS + keyNode( leftIndex ).key.dataSize() + KNS + r->topSize + r->n * KNS ) / 2;
|
||||
// This constraint should be ensured by only calling this function
|
||||
// if we go below the low water mark.
|
||||
verify( rightSizeLimit < BtreeBucket<V>::bodySize() );
|
||||
assert( rightSizeLimit < BtreeBucket<V>::bodySize() );
|
||||
for( int i = r->n - 1; i > -1; --i ) {
|
||||
rightSize += r->keyNode( i ).key.dataSize() + KNS;
|
||||
if ( rightSize > rightSizeLimit ) {
|
||||
|
|
@ -1064,7 +1045,7 @@ namespace mongo {
|
|||
|
||||
template< class V >
|
||||
int BtreeBucket<V>::indexInParent( const DiskLoc &thisLoc ) const {
|
||||
verify( !this->parent.isNull() );
|
||||
assert( !this->parent.isNull() );
|
||||
const BtreeBucket *p = BTREE(this->parent);
|
||||
if ( p->nextChild == thisLoc ) {
|
||||
return p->n;
|
||||
|
|
@ -1081,7 +1062,7 @@ namespace mongo {
|
|||
dump();
|
||||
out() << "Parent: " << this->parent << "\n";
|
||||
p->dump();
|
||||
verify(false);
|
||||
assert(false);
|
||||
return -1; // just to compile
|
||||
}
|
||||
|
||||
|
|
@ -1121,7 +1102,7 @@ namespace mongo {
|
|||
KeyNode kn = l->keyNode( split );
|
||||
l->nextChild = kn.prevChildBucket;
|
||||
// Because lchild is a descendant of thisLoc, updating thisLoc will
|
||||
// not affect packing or keys of lchild and kn will be stable
|
||||
// not not affect packing or keys of lchild and kn will be stable
|
||||
// during the following setInternalKey()
|
||||
setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
|
||||
}
|
||||
|
|
@ -1178,7 +1159,7 @@ namespace mongo {
|
|||
|
||||
// By definition, if we are below the low water mark and cannot merge
|
||||
// then we must actively balance.
|
||||
verify( split != l->n );
|
||||
assert( split != l->n );
|
||||
if ( split < l->n ) {
|
||||
doBalanceLeftToRight( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
|
||||
}
|
||||
|
|
@ -1246,6 +1227,13 @@ namespace mongo {
|
|||
return false;
|
||||
}
|
||||
|
||||
template< class V >
|
||||
BtreeBucket<V> * BtreeBucket<V>::allocTemp() {
|
||||
BtreeBucket *b = (BtreeBucket*) malloc(V::BucketSize);
|
||||
b->init();
|
||||
return b;
|
||||
}
|
||||
|
||||
template< class V >
|
||||
inline void BtreeBucket<V>::fix(const DiskLoc thisLoc, const DiskLoc child) {
|
||||
if ( !child.isNull() ) {
|
||||
|
|
@ -1283,7 +1271,7 @@ namespace mongo {
|
|||
this->_delKeyAtPos( keypos, true );
|
||||
|
||||
// Ensure we do not orphan neighbor's old child.
|
||||
verify( this->childForPos( keypos ) == rchild );
|
||||
assert( this->childForPos( keypos ) == rchild );
|
||||
|
||||
// Just set temporarily - required to pass validation in insertHere()
|
||||
this->childForPos( keypos ) = lchild;
|
||||
|
|
@ -1326,13 +1314,13 @@ namespace mongo {
|
|||
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
|
||||
out() << " key: " << key.toString() << endl;
|
||||
dump();
|
||||
verify(false);
|
||||
assert(false);
|
||||
}
|
||||
kn->prevChildBucket = this->nextChild;
|
||||
verify( kn->prevChildBucket == lchild );
|
||||
assert( kn->prevChildBucket == lchild );
|
||||
this->nextChild.writing() = rchild;
|
||||
if ( !rchild.isNull() )
|
||||
BTREE(rchild)->parent.writing() = thisLoc;
|
||||
BTREE(rchild)->parent.writing() = thisLoc;
|
||||
}
|
||||
else {
|
||||
kn->prevChildBucket = lchild;
|
||||
|
|
@ -1344,7 +1332,7 @@ namespace mongo {
|
|||
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
|
||||
out() << " key: " << key.toString() << endl;
|
||||
dump();
|
||||
verify(false);
|
||||
assert(false);
|
||||
}
|
||||
const Loc *pc = &k(keypos+1).prevChildBucket;
|
||||
*getDur().alreadyDeclared( const_cast<Loc*>(pc) ) = rchild; // declared in basicInsert()
|
||||
|
|
@ -1366,7 +1354,7 @@ namespace mongo {
|
|||
DiskLoc rLoc = addBucket(idx);
|
||||
BtreeBucket *r = rLoc.btreemod<V>();
|
||||
if ( split_debug )
|
||||
out() << " split:" << split << ' ' << keyNode(split).key.toString() << " n:" << this->n << endl;
|
||||
out() << " split:" << split << ' ' << keyNode(split).key.toString() << " this->n:" << this->n << endl;
|
||||
for ( int i = split+1; i < this->n; i++ ) {
|
||||
KeyNode kn = keyNode(i);
|
||||
r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket);
|
||||
|
|
@ -1375,7 +1363,7 @@ namespace mongo {
|
|||
r->assertValid( order );
|
||||
|
||||
if ( split_debug )
|
||||
out() << " new rLoc:" << rLoc.toString() << endl;
|
||||
out() << " this->new rLoc:" << rLoc.toString() << endl;
|
||||
r = 0;
|
||||
rLoc.btree<V>()->fixParentPtrs(rLoc);
|
||||
|
||||
|
|
@ -1392,7 +1380,7 @@ namespace mongo {
|
|||
|
||||
// promote splitkey to a parent this->node
|
||||
if ( this->parent.isNull() ) {
|
||||
// make a new parent if we were the root
|
||||
// make a this->new this->parent if we were the root
|
||||
DiskLoc L = addBucket(idx);
|
||||
BtreeBucket *p = L.btreemod<V>();
|
||||
p->pushBack(splitkey.recordLoc, splitkey.key, order, thisLoc);
|
||||
|
|
@ -1400,7 +1388,7 @@ namespace mongo {
|
|||
p->assertValid( order );
|
||||
this->parent = idx.head.writing() = L;
|
||||
if ( split_debug )
|
||||
out() << " we were root, making new root:" << hex << this->parent.getOfs() << dec << endl;
|
||||
out() << " we were root, making this->new root:" << hex << this->parent.getOfs() << dec << endl;
|
||||
rLoc.btree<V>()->parent.writing() = this->parent;
|
||||
}
|
||||
else {
|
||||
|
|
@ -1420,12 +1408,12 @@ namespace mongo {
|
|||
{
|
||||
if ( keypos <= split ) {
|
||||
if ( split_debug )
|
||||
out() << " keypos<split, insertHere() the new key" << endl;
|
||||
out() << " keypos<split, insertHere() the this->new key" << endl;
|
||||
insertHere(thisLoc, newpos, recordLoc, key, order, lchild, rchild, idx);
|
||||
}
|
||||
else {
|
||||
int kp = keypos-split-1;
|
||||
verify(kp>=0);
|
||||
assert(kp>=0);
|
||||
BTREE(rLoc)->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
|
||||
}
|
||||
}
|
||||
|
|
@ -1434,16 +1422,20 @@ namespace mongo {
|
|||
out() << " split end " << hex << thisLoc.getOfs() << dec << endl;
|
||||
}
|
||||
|
||||
/** start a new index off, empty */
|
||||
/** start a this->new index off, empty */
|
||||
template< class V >
|
||||
DiskLoc BtreeBucket<V>::addBucket(const IndexDetails& id) {
|
||||
string ns = id.indexNamespace();
|
||||
DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, false, true);
|
||||
DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, true);
|
||||
BtreeBucket *b = BTREEMOD(loc);
|
||||
b->init();
|
||||
return loc;
|
||||
}
|
||||
|
||||
void renameIndexNamespace(const char *oldNs, const char *newNs) {
|
||||
renameNamespace( oldNs, newNs );
|
||||
}
|
||||
|
||||
template< class V >
|
||||
const DiskLoc BtreeBucket<V>::getHead(const DiskLoc& thisLoc) const {
|
||||
DiskLoc p = thisLoc;
|
||||
|
|
@ -1457,17 +1449,17 @@ namespace mongo {
|
|||
if ( keyOfs < 0 || keyOfs >= this->n ) {
|
||||
out() << "ASSERT failure BtreeBucket<V>::advance, caller: " << caller << endl;
|
||||
out() << " thisLoc: " << thisLoc.toString() << endl;
|
||||
out() << " keyOfs: " << keyOfs << " n:" << this->n << " direction: " << direction << endl;
|
||||
out() << " keyOfs: " << keyOfs << " this->n:" << this->n << " direction: " << direction << endl;
|
||||
out() << bucketSummary() << endl;
|
||||
verify(false);
|
||||
assert(false);
|
||||
}
|
||||
int adj = direction < 0 ? 1 : 0;
|
||||
int ko = keyOfs + direction;
|
||||
DiskLoc nextDown = this->childForPos(ko+adj);
|
||||
if ( !nextDown.isNull() ) {
|
||||
while ( 1 ) {
|
||||
keyOfs = direction>0 ? 0 : BTREE(nextDown)->n - 1;
|
||||
DiskLoc loc = BTREE(nextDown)->childForPos(keyOfs + adj);
|
||||
keyOfs = direction>0 ? 0 : BTREE(nextDown)->n - 1;
|
||||
DiskLoc loc = BTREE(nextDown)->childForPos(keyOfs + adj);
|
||||
if ( loc.isNull() )
|
||||
break;
|
||||
nextDown = loc;
|
||||
|
|
@ -1493,8 +1485,8 @@ namespace mongo {
|
|||
return ancestor;
|
||||
}
|
||||
}
|
||||
verify( direction<0 || an->nextChild == childLoc );
|
||||
// parent exhausted also, keep going up
|
||||
assert( direction<0 || an->nextChild == childLoc );
|
||||
// this->parent exhausted also, keep going up
|
||||
childLoc = ancestor;
|
||||
ancestor = an->parent;
|
||||
}
|
||||
|
|
@ -1533,12 +1525,11 @@ namespace mongo {
|
|||
}
|
||||
|
||||
template< class V >
|
||||
bool BtreeBucket<V>::customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) {
|
||||
const BtreeBucket<V> * bucket = BTREE(thisLoc);
|
||||
bool BtreeBucket<V>::customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) const {
|
||||
while( 1 ) {
|
||||
if ( l + 1 == h ) {
|
||||
keyOfs = ( direction > 0 ) ? h : l;
|
||||
DiskLoc next = bucket->k( h ).prevChildBucket;
|
||||
DiskLoc next = BTREE(thisLoc)->k( h ).prevChildBucket;
|
||||
if ( !next.isNull() ) {
|
||||
bestParent = make_pair( thisLoc, keyOfs );
|
||||
thisLoc = next;
|
||||
|
|
@ -1549,7 +1540,7 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
int m = l + ( h - l ) / 2;
|
||||
int cmp = customBSONCmp( bucket->keyNode( m ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
|
||||
int cmp = customBSONCmp( BTREE(thisLoc)->keyNode( m ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
|
||||
if ( cmp < 0 ) {
|
||||
l = m;
|
||||
}
|
||||
|
|
@ -1594,16 +1585,16 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
else {
|
||||
// go up parents until rightmost/leftmost node is >=/<= target or at top
|
||||
while( !BTREE(thisLoc)->parent.isNull() ) {
|
||||
thisLoc = BTREE(thisLoc)->parent;
|
||||
// go up this->parents until rightmost/leftmost node is >=/<= target or at top
|
||||
while( !BTREE(thisLoc)->parent.isNull() ) {
|
||||
thisLoc = BTREE(thisLoc)->parent;
|
||||
if ( direction > 0 ) {
|
||||
if ( customBSONCmp( BTREE(thisLoc)->keyNode( BTREE(thisLoc)->n - 1 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 ) {
|
||||
if ( customBSONCmp( BTREE(thisLoc)->keyNode( BTREE(thisLoc)->n - 1 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 ) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 ) {
|
||||
if ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 ) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -1612,136 +1603,75 @@ namespace mongo {
|
|||
customLocate( thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, bestParent );
|
||||
}
|
||||
|
||||
/** @param thisLoc in/out param. perhaps thisLoc isn't the best name given that.
|
||||
Ut is used by advanceTo, which skips
|
||||
from one key to another key without necessarily checking all the keys
|
||||
between them in the btree (it can skip to different btree buckets).
|
||||
The advanceTo function can get called a lot, and for different targets
|
||||
we want to advance to, don't want to create a bson obj in a new
|
||||
buffer each time we call that function. The
|
||||
customLocate function necessary for advanceTo, and does the same thing
|
||||
as normal locate function but takes basically the same arguments
|
||||
as advanceTo.
|
||||
*/
|
||||
template< class V >
|
||||
void BtreeBucket<V>::customLocate(DiskLoc &locInOut, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey,
|
||||
const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive,
|
||||
const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) {
|
||||
dassert( direction == 1 || direction == -1 );
|
||||
const BtreeBucket<V> *bucket = BTREE(locInOut);
|
||||
if ( bucket->n == 0 ) {
|
||||
locInOut = DiskLoc();
|
||||
void BtreeBucket<V>::customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const {
|
||||
if ( BTREE(thisLoc)->n == 0 ) {
|
||||
thisLoc = DiskLoc();
|
||||
return;
|
||||
}
|
||||
// go down until find smallest/biggest >=/<= target
|
||||
while( 1 ) {
|
||||
int l = 0;
|
||||
int h = bucket->n - 1;
|
||||
|
||||
// +direction: 0, -direction: h
|
||||
int z = (1-direction)/2*h;
|
||||
|
||||
int h = BTREE(thisLoc)->n - 1;
|
||||
// leftmost/rightmost key may possibly be >=/<= search key
|
||||
int res = customBSONCmp( bucket->keyNode( z ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
|
||||
bool firstCheck = direction*res >= 0;
|
||||
|
||||
bool firstCheck;
|
||||
if ( direction > 0 ) {
|
||||
firstCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
|
||||
}
|
||||
else {
|
||||
firstCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( h ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
|
||||
}
|
||||
if ( firstCheck ) {
|
||||
DiskLoc next;
|
||||
keyOfs = z;
|
||||
if ( direction > 0 ) {
|
||||
dassert( z == 0 );
|
||||
next = bucket->k( 0 ).prevChildBucket;
|
||||
next = BTREE(thisLoc)->k( 0 ).prevChildBucket;
|
||||
keyOfs = 0;
|
||||
}
|
||||
else {
|
||||
next = bucket->nextChild;
|
||||
next = BTREE(thisLoc)->nextChild;
|
||||
keyOfs = h;
|
||||
}
|
||||
if ( !next.isNull() ) {
|
||||
bestParent = pair< DiskLoc, int >( locInOut, keyOfs );
|
||||
locInOut = next;
|
||||
bucket = BTREE(locInOut);
|
||||
bestParent = pair< DiskLoc, int >( thisLoc, keyOfs );
|
||||
thisLoc = next;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
res = customBSONCmp( bucket->keyNode( h-z ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
|
||||
bool secondCheck = direction*res < 0;
|
||||
|
||||
bool secondCheck;
|
||||
if ( direction > 0 ) {
|
||||
secondCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( h ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) < 0 );
|
||||
}
|
||||
else {
|
||||
secondCheck = ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) > 0 );
|
||||
}
|
||||
if ( secondCheck ) {
|
||||
DiskLoc next;
|
||||
if ( direction > 0 ) {
|
||||
next = bucket->nextChild;
|
||||
next = BTREE(thisLoc)->nextChild;
|
||||
}
|
||||
else {
|
||||
next = bucket->k( 0 ).prevChildBucket;
|
||||
next = BTREE(thisLoc)->k( 0 ).prevChildBucket;
|
||||
}
|
||||
if ( next.isNull() ) {
|
||||
// if bestParent is null, we've hit the end and locInOut gets set to DiskLoc()
|
||||
locInOut = bestParent.first;
|
||||
// if bestParent is this->null, we've hit the end and thisLoc gets set to DiskLoc()
|
||||
thisLoc = bestParent.first;
|
||||
keyOfs = bestParent.second;
|
||||
return;
|
||||
}
|
||||
else {
|
||||
locInOut = next;
|
||||
bucket = BTREE(locInOut);
|
||||
thisLoc = next;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if ( !customFind( l, h, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, locInOut, keyOfs, bestParent ) ) {
|
||||
if ( !customFind( l, h, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, thisLoc, keyOfs, bestParent ) ) {
|
||||
return;
|
||||
}
|
||||
bucket = BTREE(locInOut);
|
||||
}
|
||||
}
|
||||
|
||||
/** @thisLoc disk location of *this */
|
||||
template< class V >
|
||||
void BtreeBucket<V>::insertStepOne(DiskLoc thisLoc,
|
||||
IndexInsertionContinuationImpl<V>& c,
|
||||
bool dupsAllowed) const {
|
||||
dassert( c.key.dataSize() <= this->KeyMax );
|
||||
verify( c.key.dataSize() > 0 );
|
||||
|
||||
int pos;
|
||||
bool found = find(c.idx, c.key, c.recordLoc, c.order, pos, !dupsAllowed);
|
||||
|
||||
if ( found ) {
|
||||
const _KeyNode& kn = k(pos);
|
||||
if ( kn.isUnused() ) {
|
||||
LOG(4) << "btree _insert: reusing unused key" << endl;
|
||||
c.b = this;
|
||||
c.pos = pos;
|
||||
c.op = IndexInsertionContinuation::SetUsed;
|
||||
return;
|
||||
}
|
||||
|
||||
DEV {
|
||||
log() << "_insert(): key already exists in index (ok for background:true)\n";
|
||||
log() << " " << c.idx.indexNamespace() << " thisLoc:" << thisLoc.toString() << '\n';
|
||||
log() << " " << c.key.toString() << '\n';
|
||||
log() << " " << "recordLoc:" << c.recordLoc.toString() << " pos:" << pos << endl;
|
||||
log() << " old l r: " << this->childForPos(pos).toString() << ' ' << this->childForPos(pos+1).toString() << endl;
|
||||
}
|
||||
alreadyInIndex();
|
||||
}
|
||||
|
||||
Loc ch = this->childForPos(pos);
|
||||
DiskLoc child = ch;
|
||||
|
||||
if ( child.isNull() ) {
|
||||
// A this->new key will be inserted at the same tree height as an adjacent existing key.
|
||||
c.bLoc = thisLoc;
|
||||
c.b = this;
|
||||
c.pos = pos;
|
||||
c.op = IndexInsertionContinuation::InsertHere;
|
||||
return;
|
||||
}
|
||||
|
||||
child.btree<V>()->insertStepOne(child, c, dupsAllowed);
|
||||
}
|
||||
|
||||
/** @thisLoc disk location of *this */
|
||||
template< class V >
|
||||
|
|
@ -1752,7 +1682,7 @@ namespace mongo {
|
|||
problem() << "ERROR: key too large len:" << key.dataSize() << " max:" << this->KeyMax << ' ' << key.dataSize() << ' ' << idx.indexNamespace() << endl;
|
||||
return 2;
|
||||
}
|
||||
verify( key.dataSize() > 0 );
|
||||
assert( key.dataSize() > 0 );
|
||||
|
||||
int pos;
|
||||
bool found = find(idx, key, recordLoc, order, pos, !dupsAllowed);
|
||||
|
|
@ -1760,15 +1690,15 @@ namespace mongo {
|
|||
out() << " " << thisLoc.toString() << '.' << "_insert " <<
|
||||
key.toString() << '/' << recordLoc.toString() <<
|
||||
" l:" << lChild.toString() << " r:" << rChild.toString() << endl;
|
||||
out() << " found:" << found << " pos:" << pos << " n:" << this->n << endl;
|
||||
out() << " found:" << found << " pos:" << pos << " this->n:" << this->n << endl;
|
||||
}
|
||||
|
||||
if ( found ) {
|
||||
const _KeyNode& kn = k(pos);
|
||||
if ( kn.isUnused() ) {
|
||||
LOG(4) << "btree _insert: reusing unused key" << endl;
|
||||
massert( 10285 , "_insert: reuse key but lchild is not null", lChild.isNull());
|
||||
massert( 10286 , "_insert: reuse key but rchild is not null", rChild.isNull());
|
||||
log(4) << "btree _insert: reusing unused key" << endl;
|
||||
massert( 10285 , "_insert: reuse key but lchild is not this->null", lChild.isNull());
|
||||
massert( 10286 , "_insert: reuse key but rchild is not this->null", rChild.isNull());
|
||||
kn.writing().setUsed();
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1779,7 +1709,7 @@ namespace mongo {
|
|||
log() << " " << key.toString() << '\n';
|
||||
log() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
|
||||
log() << " old l r: " << this->childForPos(pos).toString() << ' ' << this->childForPos(pos+1).toString() << endl;
|
||||
log() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
|
||||
log() << " this->new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
|
||||
}
|
||||
alreadyInIndex();
|
||||
}
|
||||
|
|
@ -1789,11 +1719,11 @@ namespace mongo {
|
|||
DiskLoc child = ch;
|
||||
if ( insert_debug )
|
||||
out() << " getChild(" << pos << "): " << child.toString() << endl;
|
||||
// In current usage, rChild isNull() for a new key and false when we are
|
||||
// In current usage, rChild isNull() for a this->new key and false when we are
|
||||
// promoting a split key. These are the only two cases where _insert()
|
||||
// is called currently.
|
||||
if ( child.isNull() || !rChild.isNull() ) {
|
||||
// A new key will be inserted at the same tree height as an adjacent existing key.
|
||||
// A this->new key will be inserted at the same tree height as an adjacent existing key.
|
||||
insertHere(thisLoc, pos, recordLoc, key, order, lChild, rChild, idx);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1818,28 +1748,14 @@ namespace mongo {
|
|||
_log() << "\n" << indent << " " << hex << this->nextChild.getOfs() << dec << endl;
|
||||
}
|
||||
|
||||
template< class V >
|
||||
void BtreeBucket<V>::twoStepInsert(DiskLoc thisLoc, IndexInsertionContinuationImpl<V> &c,
|
||||
bool dupsAllowed) const
|
||||
{
|
||||
|
||||
if ( c.key.dataSize() > this->KeyMax ) {
|
||||
problem() << "ERROR: key too large len:" << c.key.dataSize() << " max:" << this->KeyMax << ' ' << c.key.dataSize() << ' ' << c.idx.indexNamespace() << endl;
|
||||
return; // op=Nothing
|
||||
}
|
||||
insertStepOne(thisLoc, c, dupsAllowed);
|
||||
}
|
||||
|
||||
/** todo: meaning of return code unclear clean up */
|
||||
template< class V >
|
||||
int BtreeBucket<V>::bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
|
||||
const BSONObj& _key, const Ordering &order, bool dupsAllowed,
|
||||
IndexDetails& idx, bool toplevel) const
|
||||
{
|
||||
guessIncreasing = _key.firstElementType() == jstOID && idx.isIdIndex();
|
||||
KeyOwned key(_key);
|
||||
|
||||
dassert(toplevel);
|
||||
if ( toplevel ) {
|
||||
if ( key.dataSize() > this->KeyMax ) {
|
||||
problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace() << ' ' << key.dataSize() << ' ' << key.toString() << endl;
|
||||
|
|
@ -1847,16 +1763,9 @@ namespace mongo {
|
|||
}
|
||||
}
|
||||
|
||||
int x;
|
||||
try {
|
||||
x = _insert(thisLoc, recordLoc, key, order, dupsAllowed, DiskLoc(), DiskLoc(), idx);
|
||||
this->assertValid( order );
|
||||
}
|
||||
catch( ... ) {
|
||||
guessIncreasing = false;
|
||||
throw;
|
||||
}
|
||||
guessIncreasing = false;
|
||||
int x = _insert(thisLoc, recordLoc, key, order, dupsAllowed, DiskLoc(), DiskLoc(), idx);
|
||||
this->assertValid( order );
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
|
|
@ -1875,7 +1784,7 @@ namespace mongo {
|
|||
int pos;
|
||||
bool found;
|
||||
// TODO: is it really ok here that the order is a default?
|
||||
// for findById() use, yes. for checkNoIndexConflicts, no?
|
||||
// for findById() use, yes. for checkNoIndexConflicts, this->no?
|
||||
Ordering o = Ordering::make(BSONObj());
|
||||
DiskLoc bucket = locate( indexdetails , indexdetails.head , key , o , pos , found , minDiskLoc );
|
||||
if ( bucket.isNull() )
|
||||
|
|
@ -1897,7 +1806,7 @@ namespace mongo {
|
|||
return kn.recordLoc;
|
||||
}
|
||||
|
||||
} // namespace mongo
|
||||
} // this->namespace mongo
|
||||
|
||||
#include "db.h"
|
||||
#include "dbhelpers.h"
|
||||
|
|
@ -1926,7 +1835,7 @@ namespace mongo {
|
|||
A.GETOFS() += 2;
|
||||
b->bt_insert(id.head, A, key, order, true, id);
|
||||
A.GETOFS() += 2;
|
||||
verify( b->k(0).isUsed() );
|
||||
assert( b->k(0).isUsed() );
|
||||
// b->k(0).setUnused();
|
||||
b->k(1).setUnused();
|
||||
b->k(2).setUnused();
|
||||
|
|
@ -1951,32 +1860,28 @@ namespace mongo {
|
|||
template class BucketBasics<V1>;
|
||||
template class BtreeBucket<V0>;
|
||||
template class BtreeBucket<V1>;
|
||||
template struct __KeyNode<DiskLoc>;
|
||||
template struct __KeyNode<DiskLoc56Bit>;
|
||||
|
||||
struct BTUnitTest : public StartupTest {
|
||||
struct BTUnitTest : public UnitTest {
|
||||
void run() {
|
||||
DiskLoc big(0xf12312, 0x70001234);
|
||||
DiskLoc56Bit bigl;
|
||||
{
|
||||
bigl = big;
|
||||
verify( big == bigl );
|
||||
assert( big == bigl );
|
||||
DiskLoc e = bigl;
|
||||
verify( big == e );
|
||||
assert( big == e );
|
||||
}
|
||||
{
|
||||
DiskLoc d;
|
||||
verify( d.isNull() );
|
||||
assert( d.isNull() );
|
||||
DiskLoc56Bit l;
|
||||
l = d;
|
||||
verify( l.isNull() );
|
||||
assert( l.isNull() );
|
||||
d = l;
|
||||
verify( d.isNull() );
|
||||
verify( l < bigl );
|
||||
assert( d.isNull() );
|
||||
assert( l < bigl );
|
||||
}
|
||||
}
|
||||
} btunittest;
|
||||
|
||||
|
||||
IndexInsertionContinuation::~IndexInsertionContinuation() {}
|
||||
}
|
||||
|
|
@ -18,12 +18,11 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "mongo/pch.h"
|
||||
|
||||
#include "mongo/db/diskloc.h"
|
||||
#include "mongo/db/dur.h"
|
||||
#include "mongo/db/jsobj.h"
|
||||
#include "mongo/db/key.h"
|
||||
#include "../pch.h"
|
||||
#include "jsobj.h"
|
||||
#include "diskloc.h"
|
||||
#include "pdfile.h"
|
||||
#include "key.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -91,12 +90,12 @@ namespace mongo {
|
|||
unsigned short _kdo;
|
||||
void setKeyDataOfs(short s) {
|
||||
_kdo = s;
|
||||
verify(s>=0);
|
||||
assert(s>=0);
|
||||
}
|
||||
/** Seems to be redundant. */
|
||||
void setKeyDataOfsSavingUse(short s) {
|
||||
_kdo = s;
|
||||
verify(s>=0);
|
||||
assert(s>=0);
|
||||
}
|
||||
/**
|
||||
* Unused keys are not returned by read operations. Keys may be marked
|
||||
|
|
@ -181,8 +180,6 @@ namespace mongo {
|
|||
|
||||
// largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
|
||||
static const int KeyMax = OldBucketSize / 10;
|
||||
// A sentinel value sometimes used to identify a deallocated bucket.
|
||||
static const int INVALID_N_SENTINEL = -1;
|
||||
};
|
||||
|
||||
// a a a ofs ofs ofs ofs
|
||||
|
|
@ -206,7 +203,7 @@ namespace mongo {
|
|||
BtreeBucket<V> * btreemod() const {
|
||||
return DiskLoc(*this).btreemod<V>();
|
||||
}
|
||||
operator const DiskLoc() const {
|
||||
operator DiskLoc() const {
|
||||
// endian
|
||||
if( isNull() ) return DiskLoc();
|
||||
unsigned a = *((unsigned *) (_a-1));
|
||||
|
|
@ -241,16 +238,14 @@ namespace mongo {
|
|||
void operator=(const DiskLoc& loc) {
|
||||
ofs = loc.getOfs();
|
||||
int la = loc.a();
|
||||
verify( la <= 0xffffff ); // must fit in 3 bytes
|
||||
assert( la <= 0xffffff ); // must fit in 3 bytes
|
||||
if( la < 0 ) {
|
||||
if ( la != -1 ) {
|
||||
log() << "btree diskloc isn't negative 1: " << la << endl;
|
||||
verify ( la == -1 );
|
||||
}
|
||||
assert( la == -1 );
|
||||
la = 0;
|
||||
ofs = OurNullOfs;
|
||||
}
|
||||
memcpy(_a, &la, 3); // endian
|
||||
dassert( ofs != 0 );
|
||||
}
|
||||
DiskLoc56Bit& writing() const {
|
||||
return *((DiskLoc56Bit*) getDur().writingPtr((void*)this, 7));
|
||||
|
|
@ -267,8 +262,6 @@ namespace mongo {
|
|||
enum { BucketSize = 8192-16 }; // leave room for Record header
|
||||
// largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
|
||||
static const int KeyMax = 1024;
|
||||
// A sentinel value sometimes used to identify a deallocated bucket.
|
||||
static const unsigned short INVALID_N_SENTINEL = 0xffff;
|
||||
protected:
|
||||
/** Parent bucket of this bucket, which isNull() for the root bucket. */
|
||||
Loc parent;
|
||||
|
|
@ -377,14 +370,6 @@ namespace mongo {
|
|||
int nKeys() const { return this->n; }
|
||||
const DiskLoc getNextChild() const { return this->nextChild; }
|
||||
|
||||
// for tree inspection and statistical analysis
|
||||
// NOTE: topSize and emptySize have different types in BtreeData_V0 and BtreeData_V1
|
||||
|
||||
/** Size used for bson storage, including storage of old keys. */
|
||||
unsigned int getTopSize() const { return static_cast<unsigned int>(this->topSize); }
|
||||
/** Size of the empty region. */
|
||||
unsigned int getEmptySize() const { return static_cast<unsigned int>(this->emptySize); }
|
||||
|
||||
protected:
|
||||
char * dataAt(short ofs) { return this->data + ofs; }
|
||||
|
||||
|
|
@ -412,7 +397,7 @@ namespace mongo {
|
|||
/**
|
||||
* Preconditions:
|
||||
* - key / recordLoc are > all existing keys
|
||||
* - The keys in prevChild and their descendants are between all existing
|
||||
* - The keys in prevChild and their descendents are between all existing
|
||||
* keys and 'key'.
|
||||
* Postconditions:
|
||||
* - If there is space for key without packing, it is inserted as the
|
||||
|
|
@ -423,7 +408,7 @@ namespace mongo {
|
|||
bool _pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild);
|
||||
void pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
|
||||
bool ok = _pushBack( recordLoc , key , order , prevChild );
|
||||
verify(ok);
|
||||
assert(ok);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -587,11 +572,6 @@ namespace mongo {
|
|||
void setKey( int i, const DiskLoc recordLoc, const Key& key, const DiskLoc prevChildBucket );
|
||||
};
|
||||
|
||||
class IndexDetails;
|
||||
class IndexInsertionContinuation;
|
||||
template< class V>
|
||||
struct IndexInsertionContinuationImpl;
|
||||
|
||||
/**
|
||||
* This class adds functionality for manipulating buckets that are assembled
|
||||
* in a tree. The requirements for const and non const functions and
|
||||
|
|
@ -615,7 +595,7 @@ namespace mongo {
|
|||
* so assignment of const is sometimes nonideal.
|
||||
*
|
||||
* TODO There are several cases in which the 'this' pointer is invalidated
|
||||
* as a result of deallocation. A separate class representing a btree would
|
||||
* as a result of deallocation. A seperate class representing a btree would
|
||||
* alleviate some fragile cases where the implementation must currently
|
||||
* behave correctly if the 'this' pointer is suddenly invalidated by a
|
||||
* callee.
|
||||
|
|
@ -623,14 +603,13 @@ namespace mongo {
|
|||
template< class V >
|
||||
class BtreeBucket : public BucketBasics<V> {
|
||||
friend class BtreeCursor;
|
||||
friend struct IndexInsertionContinuationImpl<V>;
|
||||
public:
|
||||
// make compiler happy:
|
||||
// make compiler happy:
|
||||
typedef typename V::Key Key;
|
||||
typedef typename V::KeyOwned KeyOwned;
|
||||
typedef typename BucketBasics<V>::KeyNode KeyNode;
|
||||
typedef typename BucketBasics<V>::_KeyNode _KeyNode;
|
||||
typedef typename BucketBasics<V>::Loc Loc;
|
||||
typedef typename BucketBasics<V>::KeyNode KeyNode;
|
||||
typedef typename BucketBasics<V>::_KeyNode _KeyNode;
|
||||
typedef typename BucketBasics<V>::Loc Loc;
|
||||
const _KeyNode& k(int i) const { return static_cast< const BucketBasics<V> * >(this)->k(i); }
|
||||
protected:
|
||||
_KeyNode& k(int i) { return static_cast< BucketBasics<V> * >(this)->_k(i); }
|
||||
|
|
@ -700,11 +679,6 @@ namespace mongo {
|
|||
const BSONObj& key, const Ordering &order, bool dupsAllowed,
|
||||
IndexDetails& idx, bool toplevel = true) const;
|
||||
|
||||
/** does the insert in two steps - can then use an upgradable lock for step 1, which
|
||||
is the part which may have page faults. also that step is most of the computational work.
|
||||
*/
|
||||
void twoStepInsert(DiskLoc thisLoc, IndexInsertionContinuationImpl<V> &c, bool dupsAllowed) const;
|
||||
|
||||
/**
|
||||
* Preconditions:
|
||||
* - 'key' has a valid schema for this index, and may have objsize() > KeyMax.
|
||||
|
|
@ -747,7 +721,7 @@ namespace mongo {
|
|||
void advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const;
|
||||
|
||||
/** Locate a key with fields comprised of a combination of keyBegin fields and keyEnd fields. */
|
||||
static void customLocate(DiskLoc &locInOut, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) ;
|
||||
void customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const;
|
||||
|
||||
/** @return head of the btree by traversing from current bucket. */
|
||||
const DiskLoc getHead(const DiskLoc& thisLoc) const;
|
||||
|
|
@ -915,6 +889,12 @@ namespace mongo {
|
|||
}
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Allocate a temporary btree bucket in ram rather than in memory mapped
|
||||
* storage. The caller must release this bucket with free().
|
||||
*/
|
||||
static BtreeBucket<V> * allocTemp();
|
||||
|
||||
/**
|
||||
* Preconditions:
|
||||
* - This bucket is packed.
|
||||
|
|
@ -946,7 +926,7 @@ namespace mongo {
|
|||
* - The bucket may be packed or split, invalidating the specified value
|
||||
* of keypos.
|
||||
* This function will always modify thisLoc, but it's marked const because
|
||||
* it commonly relies on the specialized writ]e intent mechanism of basicInsert().
|
||||
* it commonly relies on the specialized write intent mechanism of basicInsert().
|
||||
*/
|
||||
void insertHere(const DiskLoc thisLoc, int keypos,
|
||||
const DiskLoc recordLoc, const Key& key, const Ordering &order,
|
||||
|
|
@ -957,11 +937,8 @@ namespace mongo {
|
|||
const Key& key, const Ordering &order, bool dupsAllowed,
|
||||
const DiskLoc lChild, const DiskLoc rChild, IndexDetails &idx) const;
|
||||
|
||||
void insertStepOne(
|
||||
DiskLoc thisLoc, IndexInsertionContinuationImpl<V>& c, bool dupsAllowed) const;
|
||||
|
||||
bool find(const IndexDetails& idx, const Key& key, const DiskLoc &recordLoc, const Ordering &order, int& pos, bool assertIfDup) const;
|
||||
static bool customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) ;
|
||||
bool customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) const;
|
||||
static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
|
||||
static int customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction );
|
||||
|
||||
|
|
@ -1006,13 +983,147 @@ namespace mongo {
|
|||
};
|
||||
#pragma pack()
|
||||
|
||||
class FieldRangeVector;
|
||||
class FieldRangeVectorIterator;
|
||||
|
||||
class BtreeCursor : public Cursor {
|
||||
protected:
|
||||
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
|
||||
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
|
||||
public:
|
||||
virtual ~BtreeCursor();
|
||||
/** makes an appropriate subclass depending on the index version */
|
||||
static BtreeCursor* make( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
|
||||
static BtreeCursor* make( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
|
||||
|
||||
virtual bool ok() { return !bucket.isNull(); }
|
||||
virtual bool advance();
|
||||
virtual void noteLocation(); // updates keyAtKeyOfs...
|
||||
virtual void checkLocation() = 0;
|
||||
virtual bool supportGetMore() { return true; }
|
||||
virtual bool supportYields() { return true; }
|
||||
|
||||
/**
|
||||
* used for multikey index traversal to avoid sending back dups. see Matcher::matches().
|
||||
* if a multikey index traversal:
|
||||
* if loc has already been sent, returns true.
|
||||
* otherwise, marks loc as sent.
|
||||
* @return false if the loc has not been seen
|
||||
*/
|
||||
virtual bool getsetdup(DiskLoc loc) {
|
||||
if( _multikey ) {
|
||||
pair<set<DiskLoc>::iterator, bool> p = _dups.insert(loc);
|
||||
return !p.second;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool modifiedKeys() const { return _multikey; }
|
||||
virtual bool isMultiKey() const { return _multikey; }
|
||||
|
||||
/*const _KeyNode& _currKeyNode() const {
|
||||
assert( !bucket.isNull() );
|
||||
const _KeyNode& kn = keyNode(keyOfs);
|
||||
assert( kn.isUsed() );
|
||||
return kn;
|
||||
}*/
|
||||
|
||||
/** returns BSONObj() if ofs is out of range */
|
||||
virtual BSONObj keyAt(int ofs) const = 0;
|
||||
|
||||
virtual BSONObj currKey() const = 0;
|
||||
virtual BSONObj indexKeyPattern() { return indexDetails.keyPattern(); }
|
||||
|
||||
virtual void aboutToDeleteBucket(const DiskLoc& b) {
|
||||
if ( bucket == b )
|
||||
keyOfs = -1;
|
||||
}
|
||||
|
||||
virtual DiskLoc currLoc() = 0; // { return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc(); }
|
||||
virtual DiskLoc refLoc() { return currLoc(); }
|
||||
virtual Record* _current() { return currLoc().rec(); }
|
||||
virtual BSONObj current() { return BSONObj(_current()); }
|
||||
virtual string toString();
|
||||
|
||||
BSONObj prettyKey( const BSONObj &key ) const {
|
||||
return key.replaceFieldNames( indexDetails.keyPattern() ).clientReadable();
|
||||
}
|
||||
|
||||
virtual BSONObj prettyIndexBounds() const;
|
||||
|
||||
void forgetEndKey() { endKey = BSONObj(); }
|
||||
|
||||
virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
|
||||
virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
|
||||
|
||||
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
|
||||
|
||||
virtual long long nscanned() { return _nscanned; }
|
||||
|
||||
/** for debugging only */
|
||||
const DiskLoc getBucket() const { return bucket; }
|
||||
|
||||
// just for unit tests
|
||||
virtual bool curKeyHasChild() = 0;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Our btrees may (rarely) have "unused" keys when items are deleted.
|
||||
* Skip past them.
|
||||
*/
|
||||
virtual bool skipUnusedKeys() = 0;
|
||||
|
||||
bool skipOutOfRangeKeysAndCheckEnd();
|
||||
void skipAndCheck();
|
||||
void checkEnd();
|
||||
|
||||
/** selective audits on construction */
|
||||
void audit();
|
||||
|
||||
virtual void _audit() = 0;
|
||||
virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc) = 0;
|
||||
virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) = 0;
|
||||
virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) = 0;
|
||||
|
||||
/** set initial bucket */
|
||||
void init();
|
||||
|
||||
/** if afterKey is true, we want the first key with values of the keyBegin fields greater than keyBegin */
|
||||
void advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive );
|
||||
|
||||
set<DiskLoc> _dups;
|
||||
NamespaceDetails * const d;
|
||||
const int idxNo;
|
||||
BSONObj startKey;
|
||||
BSONObj endKey;
|
||||
bool _endKeyInclusive;
|
||||
bool _multikey; // this must be updated every getmore batch in case someone added a multikey
|
||||
const IndexDetails& indexDetails;
|
||||
const BSONObj _order;
|
||||
const Ordering _ordering;
|
||||
DiskLoc bucket;
|
||||
int keyOfs;
|
||||
const int _direction; // 1=fwd,-1=reverse
|
||||
BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
|
||||
DiskLoc locAtKeyOfs;
|
||||
const shared_ptr< FieldRangeVector > _bounds;
|
||||
auto_ptr< FieldRangeVectorIterator > _boundsIterator;
|
||||
const IndexSpec& _spec;
|
||||
shared_ptr< CoveredIndexMatcher > _matcher;
|
||||
bool _independentFieldRanges;
|
||||
long long _nscanned;
|
||||
};
|
||||
|
||||
/** Renames the index namespace for this btree's index. */
|
||||
void renameIndexNamespace(const char *oldNs, const char *newNs);
|
||||
|
||||
/**
|
||||
* give us a writable version of the btree bucket (declares write intent).
|
||||
* note it is likely more efficient to declare write intent on something smaller when you can.
|
||||
*/
|
||||
template< class V >
|
||||
BtreeBucket<V> * DiskLoc::btreemod() const {
|
||||
verify( _a != -1 );
|
||||
assert( _a != -1 );
|
||||
BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() );
|
||||
return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::BucketSize ) );
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue