def setUpClass(cls):
        """ Initializes the cluster
        """
        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        use_mongos = True    
        if PORTS_ONE['MONGOS'] != "27217":
            use_mongos = False

        cls.flag =  start_cluster(use_mongos=use_mongos)
        if cls.flag:
            cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
                              replicaSet="demo-repl")
            timer = Timer(60, abort_test)
            cls.connector = Connector(
                address="%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]),
                oplog_checkpoint=CONFIG,
                target_url=None,
                ns_set=['test.test'],
                u_key='_id',
                auth_key=None
            )
            cls.synchronizer = cls.connector.doc_managers[0]
            timer.start()
            cls.connector.start()
            while len(cls.connector.shard_set) == 0:
                pass
            timer.cancel()
Exemplo n.º 2
0
    def setUpClass(cls):
        """ Initializes the cluster
        """
        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        use_mongos = True
        if PORTS_ONE['MONGOS'] != "27217":
            use_mongos = False

        cls.flag = start_cluster(use_mongos=use_mongos)
        if cls.flag:
            cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
                                  replicaSet="demo-repl")
            timer = Timer(60, abort_test)
            cls.connector = Connector(address="%s:%s" %
                                      (HOSTNAME, PORTS_ONE["MONGOS"]),
                                      oplog_checkpoint=CONFIG,
                                      target_url=None,
                                      ns_set=['test.test'],
                                      u_key='_id',
                                      auth_key=None)
            cls.synchronizer = cls.connector.doc_managers[0]
            timer.start()
            cls.connector.start()
            while len(cls.connector.shard_set) == 0:
                pass
            timer.cancel()
    def setUpClass(cls):
        """ Initializes the cluster
        """

        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        use_mongos = True
        if MAIN_ADDRESS.split(":")[1] != "27217":
            use_mongos = False
        cls.flag = start_cluster(use_mongos=use_mongos)
Exemplo n.º 4
0
 def setUpClass(cls):
     os.system('rm %s; touch %s' % (CONFIG, CONFIG))
     start_single_mongod_instance("30000", "/MC", "MC_log")
     cls.mongo_doc = DocManager("localhost:30000")
     cls.mongo_doc._remove()
     cls.flag = start_cluster()
     if cls.flag:
         cls.conn = Connection("%s:%s" % (HOSTNAME,  PORTS_ONE['MONGOS']),
                       replicaSet="demo-repl")
    def setUpClass(cls):    
        """ Initializes the cluster
        """

        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        use_mongos = True
        if MAIN_ADDRESS.split(":")[1] != "27217":
            use_mongos = False
        cls.flag = start_cluster(use_mongos=use_mongos)
Exemplo n.º 6
0
 def setUpClass(cls):
     os.system('rm %s; touch %s' % (CONFIG, CONFIG))
     start_single_mongod_instance("30000", "/MC", "MC_log")
     cls.mongo_doc = DocManager("localhost:30000")
     cls.mongo_doc._remove()
     cls.flag = start_cluster()
     if cls.flag:
         cls.conn = Connection("%s:%s" % (HOSTNAME, PORTS_ONE['MONGOS']),
                               replicaSet="demo-repl")
Exemplo n.º 7
0
    def setUp(self):
        # Create a new oplog progress file
        try:
            os.unlink("config.txt")
        except OSError:
            pass
        open("config.txt", "w").close()

        # Start a replica set
        start_cluster(sharded=False, use_mongos=False)
        # Connection to the replica set as a whole
        self.main_conn = Connection("localhost:%s" % PORTS_ONE["PRIMARY"],
                                    replicaSet="demo-repl")
        # Connection to the primary specifically
        self.primary_conn = Connection("localhost:%s" % PORTS_ONE["PRIMARY"])
        # Connection to the secondary specifically
        self.secondary_conn = Connection(
            "localhost:%s" % PORTS_ONE["SECONDARY"],
            read_preference=ReadPreference.SECONDARY_PREFERRED
        )

        # Wipe any test data
        self.main_conn["test"]["mc"].drop()

        # Oplog thread
        doc_manager = DocManager()
        oplog_progress = LockingDict()
        self.opman = OplogThread(
            primary_conn=self.main_conn,
            main_address="localhost:%s" % PORTS_ONE["PRIMARY"],
            oplog_coll=self.main_conn["local"]["oplog.rs"],
            is_sharded=False,
            doc_manager=doc_manager,
            oplog_progress_dict=oplog_progress,
            namespace_set=["test.mc"],
            auth_key=None,
            auth_username=None,
            repl_set="demo-repl"
        )
    def setUp(self):
        # Create a new oplog progress file
        try:
            os.unlink("config.txt")
        except OSError:
            pass
        open("config.txt", "w").close()

        # Start a replica set
        start_cluster(sharded=False, use_mongos=False)
        # Connection to the replica set as a whole
        self.main_conn = Connection("localhost:%s" % PORTS_ONE["PRIMARY"],
                                    replicaSet="demo-repl")
        # Connection to the primary specifically
        self.primary_conn = Connection("localhost:%s" % PORTS_ONE["PRIMARY"])
        # Connection to the secondary specifically
        self.secondary_conn = Connection(
            "localhost:%s" % PORTS_ONE["SECONDARY"],
            read_preference=ReadPreference.SECONDARY_PREFERRED)

        # Wipe any test data
        self.main_conn["test"]["mc"].drop()

        # Oplog thread
        doc_manager = DocManager()
        oplog_progress = LockingDict()
        self.opman = OplogThread(
            primary_conn=self.main_conn,
            main_address="localhost:%s" % PORTS_ONE["PRIMARY"],
            oplog_coll=self.main_conn["local"]["oplog.rs"],
            is_sharded=False,
            doc_manager=doc_manager,
            oplog_progress_dict=oplog_progress,
            namespace_set=["test.mc"],
            auth_key=None,
            auth_username=None,
            repl_set="demo-repl")
Exemplo n.º 9
0
    def setUpClass(cls):
        """ Starts the cluster
        """
        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        cls.elastic_doc = DocManager('localhost:9200')
        cls.elastic_doc._remove()
        cls.flag = start_cluster()
        if cls.flag:
            cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
                                  replicaSet="demo-repl")

        import logging
        logger = logging.getLogger()
        loglevel = logging.INFO
        logger.setLevel(loglevel)
Exemplo n.º 10
0
    def setUpClass(cls):
        """ Starts the cluster
        """
        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        cls.elastic_doc = DocManager('localhost:9200')
        cls.elastic_doc._remove()
        cls.flag = start_cluster()
        if cls.flag:
            cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
                                  replicaSet="demo-repl")

        import logging
        logger = logging.getLogger()
        loglevel = logging.INFO
        logger.setLevel(loglevel)
Exemplo n.º 11
0
 def setUpClass(cls):
     os.system('rm %s; touch %s' % (CONFIG, CONFIG))
     cls.flag = start_cluster()
     if cls.flag:
         cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN']),
                               replicaSet="demo-repl")
         # Creating a Solr object with an invalid URL
         # doesn't create an exception
         cls.solr_conn = Solr('http://localhost:8983/solr')
         try:
             cls.solr_conn.commit()
         except (SolrError, MissingSchema):
             cls.err_msg = "Cannot connect to Solr!"
             cls.flag = False
         if cls.flag:
             cls.solr_conn.delete(q='*:*')
     else:
         cls.err_msg = "Shards cannot be added to mongos"
Exemplo n.º 12
0
 def setUpClass(cls):
     os.system('rm %s; touch %s' % (CONFIG, CONFIG))
     cls.flag = start_cluster()
     if cls.flag:
         cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN']),
             replicaSet="demo-repl")
         # Creating a Solr object with an invalid URL 
         # doesn't create an exception
         cls.solr_conn = Solr('http://localhost:8983/solr')
         try:
             cls.solr_conn.commit()
         except (SolrError, MissingSchema):
             cls.err_msg = "Cannot connect to Solr!"
             cls.flag = False
         if cls.flag:    
             cls.solr_conn.delete(q='*:*')
     else:
         cls.err_msg = "Shards cannot be added to mongos"        
Exemplo n.º 13
0
    def setUpClass(cls):
        """ Initializes the cluster
        """
        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        cls.AUTH_KEY = None
        cls.flag = True
        if AUTH_FILE:
            # We want to get the credentials from this file
            try:
                key = (open(AUTH_FILE)).read()
                re.sub(r'\s', '', key)
                cls.AUTH_KEY = key
            except IOError:
                print('Could not parse authentication file!')
                cls.flag = False
                cls.err_msg = "Could not read key file!"

        if not start_cluster(key_file=AUTH_FILE):
            cls.flag = False
            cls.err_msg = "Shards cannot be added to mongos"
Exemplo n.º 14
0
    def setUpClass(cls):
        """ Initializes the cluster
        """
        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        cls.AUTH_KEY = None
        cls.flag = True
        if AUTH_FILE:
            # We want to get the credentials from this file
            try:
                key = (open(AUTH_FILE)).read()
                re.sub(r'\s', '', key)
                cls.AUTH_KEY = key
            except IOError:
                print('Could not parse authentication file!')
                cls.flag = False
                cls.err_msg = "Could not read key file!"

        if not start_cluster(key_file=AUTH_FILE):
            cls.flag = False
            cls.err_msg = "Shards cannot be added to mongos"
    def setUp(self):
        """ Initialize the cluster:

        Clean out the databases used by the tests
        Make connections to mongos, mongods
        Create and shard test collections
        Create OplogThreads
        """
        # Start the cluster with a mongos on port 27217
        self.mongos_p = start_cluster()

        # Connection to mongos
        mongos_address = '%s:%d' % (mongo_host, self.mongos_p)
        self.mongos_conn = MongoClient(mongos_address)

        # Connections to the shards
        shard1_ports = get_shard(self.mongos_p, 0)
        shard2_ports = get_shard(self.mongos_p, 1)
        self.shard1_prim_p = shard1_ports['primary']
        self.shard1_scnd_p = shard1_ports['secondaries'][0]
        self.shard2_prim_p = shard2_ports['primary']
        self.shard2_scnd_p = shard2_ports['secondaries'][0]
        self.shard1_conn = MongoClient('%s:%d'
                                       % (mongo_host, self.shard1_prim_p),
                                       replicaSet="demo-set-0")
        self.shard2_conn = MongoClient('%s:%d'
                                       % (mongo_host, self.shard2_prim_p),
                                       replicaSet="demo-set-1")
        self.shard1_secondary_conn = MongoClient(
            '%s:%d' % (mongo_host, self.shard1_scnd_p),
            read_preference=ReadPreference.SECONDARY_PREFERRED
        )
        self.shard2_secondary_conn = MongoClient(
            '%s:%d' % (mongo_host, self.shard2_scnd_p),
            read_preference=ReadPreference.SECONDARY_PREFERRED
        )

        # Wipe any test data
        self.mongos_conn["test"]["mcsharded"].drop()

        # Create and shard the collection test.mcsharded on the "i" field
        self.mongos_conn["test"]["mcsharded"].ensure_index("i")
        self.mongos_conn.admin.command("enableSharding", "test")
        self.mongos_conn.admin.command("shardCollection",
                                       "test.mcsharded",
                                       key={"i": 1})

        # Pre-split the collection so that:
        # i < 1000            lives on shard1
        # i >= 1000           lives on shard2
        self.mongos_conn.admin.command(bson.SON([
            ("split", "test.mcsharded"),
            ("middle", {"i": 1000})
        ]))

        # disable the balancer
        self.mongos_conn.config.settings.update(
            {"_id": "balancer"},
            {"$set": {"stopped": True}},
            upsert=True
        )

        # Move chunks to their proper places
        try:
            self.mongos_conn["admin"].command(
                "moveChunk",
                "test.mcsharded",
                find={"i": 1},
                to="demo-set-0"
            )
        except pymongo.errors.OperationFailure:
            pass        # chunk may already be on the correct shard
        try:
            self.mongos_conn["admin"].command(
                "moveChunk",
                "test.mcsharded",
                find={"i": 1000},
                to="demo-set-1"
            )
        except pymongo.errors.OperationFailure:
            pass        # chunk may already be on the correct shard

        # Make sure chunks are distributed correctly
        self.mongos_conn["test"]["mcsharded"].insert({"i": 1})
        self.mongos_conn["test"]["mcsharded"].insert({"i": 1000})

        def chunks_moved():
            doc1 = self.shard1_conn.test.mcsharded.find_one()
            doc2 = self.shard2_conn.test.mcsharded.find_one()
            if None in (doc1, doc2):
                return False
            return doc1['i'] == 1 and doc2['i'] == 1000
        assert_soon(chunks_moved)
        self.mongos_conn.test.mcsharded.remove()

        # create a new oplog progress file
        try:
            os.unlink("config.txt")
        except OSError:
            pass
        open("config.txt", "w").close()

        # Oplog threads (oplog manager) for each shard
        doc_manager = DocManager()
        oplog_progress = LockingDict()
        self.opman1 = OplogThread(
            primary_conn=self.shard1_conn,
            main_address='%s:%d' % (mongo_host, self.mongos_p),
            oplog_coll=self.shard1_conn["local"]["oplog.rs"],
            is_sharded=True,
            doc_manager=doc_manager,
            oplog_progress_dict=oplog_progress,
            namespace_set=["test.mcsharded", "test.mcunsharded"],
            auth_key=None,
            auth_username=None
        )
        self.opman2 = OplogThread(
            primary_conn=self.shard2_conn,
            main_address='%s:%d' % (mongo_host, self.mongos_p),
            oplog_coll=self.shard2_conn["local"]["oplog.rs"],
            is_sharded=True,
            doc_manager=doc_manager,
            oplog_progress_dict=oplog_progress,
            namespace_set=["test.mcsharded", "test.mcunsharded"],
            auth_key=None,
            auth_username=None
        )
Exemplo n.º 16
0
    def test_rollback(self):
        """Test rollback in oplog_manager. Assertion failure if it doesn't pass
            We force a rollback by inserting a doc, killing primary, inserting
            another doc, killing the new primary, and then restarting both
            servers.
        """

        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        if not start_cluster(sharded=True):
            self.fail("Shards cannot be added to mongos")

        test_oplog, primary_conn, solr, mongos = self.get_new_oplog()

        solr = DocManager()
        test_oplog.doc_manager = solr
        solr._delete()  # equivalent to solr.delete(q='*:*')

        safe_mongo_op(mongos['alpha']['foo'].remove, {})
        safe_mongo_op(mongos['alpha']['foo'].insert, {
            '_id': ObjectId('4ff74db3f646462b38000001'),
            'name': 'paulie'
        })
        cutoff_ts = test_oplog.get_last_oplog_timestamp()

        obj2 = ObjectId('4ff74db3f646462b38000002')
        first_doc = {
            'name': 'paulie',
            '_ts': bson_ts_to_long(cutoff_ts),
            'ns': 'alpha.foo',
            '_id': ObjectId('4ff74db3f646462b38000001')
        }

        # try kill one, try restarting
        kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))
        admin_db = new_primary_conn['admin']
        while admin_db.command("isMaster")['ismaster'] is False:
            time.sleep(1)
        time.sleep(5)
        count = 0
        while True:
            try:
                mongos['alpha']['foo'].insert({'_id': obj2, 'name': 'paul'})
                break
            except OperationFailure:
                time.sleep(1)
                count += 1
                if count > 60:
                    self.fail('Insert failed too many times in rollback')
                continue

        kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])

        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                         "/replset1a.log", None)

        # wait for master to be established
        while primary_conn['admin'].command("isMaster")['ismaster'] is False:
            time.sleep(1)

        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                         "/replset1b.log", None)

        # wait for secondary to be established
        admin_db = new_primary_conn['admin']
        while admin_db.command("replSetGetStatus")['myState'] != 2:
            time.sleep(1)

        while retry_until_ok(mongos['alpha']['foo'].find().count) != 1:
            time.sleep(1)

        self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])
        self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])

        last_ts = test_oplog.get_last_oplog_timestamp()
        second_doc = {
            'name': 'paul',
            '_ts': bson_ts_to_long(last_ts),
            'ns': 'alpha.foo',
            '_id': obj2
        }

        test_oplog.doc_manager.upsert(first_doc)
        test_oplog.doc_manager.upsert(second_doc)
        test_oplog.rollback()
        test_oplog.doc_manager.commit()
        results = solr._search()

        self.assertEqual(len(results), 1)

        results_doc = results[0]
        self.assertEqual(results_doc['name'], 'paulie')
        self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts))
    def test_rollback(self):
        """Test rollback in oplog_manager. Assertion failure if it doesn't pass
            We force a rollback by inserting a doc, killing primary, inserting
            another doc, killing the new primary, and then restarting both
            servers.
        """

        os.system('rm %s; touch %s' % (CONFIG, CONFIG))
        if not start_cluster(sharded=True):
            self.fail("Shards cannot be added to mongos")

        test_oplog, primary_conn, solr, mongos = self.get_new_oplog()

        solr = DocManager()
        test_oplog.doc_manager = solr
        solr._delete()          # equivalent to solr.delete(q='*:*')

        safe_mongo_op(mongos['alpha']['foo'].remove, {})
        safe_mongo_op(mongos['alpha']['foo'].insert,
                      {'_id': ObjectId('4ff74db3f646462b38000001'),
                      'name': 'paulie'})
        cutoff_ts = test_oplog.get_last_oplog_timestamp()

        obj2 = ObjectId('4ff74db3f646462b38000002')
        first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),
                     'ns': 'alpha.foo', 
                     '_id': ObjectId('4ff74db3f646462b38000001')}

        # try kill one, try restarting
        kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))
        admin_db = new_primary_conn['admin']
        while admin_db.command("isMaster")['ismaster'] is False:
            time.sleep(1)
        time.sleep(5)
        count = 0
        while True:
            try:
                mongos['alpha']['foo'].insert({'_id': obj2, 'name': 'paul'})
                break
            except OperationFailure:
                time.sleep(1)
                count += 1
                if count > 60:
                    self.fail('Insert failed too many times in rollback')
                continue

        kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])

        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                       "/replset1a.log", None)

        # wait for master to be established
        while primary_conn['admin'].command("isMaster")['ismaster'] is False:
            time.sleep(1)

        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                       "/replset1b.log", None)

        # wait for secondary to be established
        admin_db = new_primary_conn['admin']
        while admin_db.command("replSetGetStatus")['myState'] != 2:
            time.sleep(1)

        while retry_until_ok(mongos['alpha']['foo'].find().count) != 1:
            time.sleep(1)

        self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])
        self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])

        last_ts = test_oplog.get_last_oplog_timestamp()
        second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),
                      'ns': 'alpha.foo', '_id': obj2}

        test_oplog.doc_manager.upsert(first_doc)
        test_oplog.doc_manager.upsert(second_doc)
        test_oplog.rollback()
        test_oplog.doc_manager.commit()
        results = solr._search()

        self.assertEqual(len(results), 1)

        results_doc = results[0]
        self.assertEqual(results_doc['name'], 'paulie')
        self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts))
Exemplo n.º 18
0
    def test_rollback(self):
        """Test rollback in oplog_manager. Assertion failure if it doesn't pass
            We force a rollback by inserting a doc, killing the primary,
            inserting another doc, killing the new primary, and then restarting
            both.
        """
        os.system('rm config.txt; touch config.txt')
        test_oplog, primary_conn, mongos, solr = self.get_new_oplog()

        if not start_cluster():
            self.fail('Cluster could not be started successfully!')

        solr = DocManager()
        test_oplog.doc_manager = solr
        solr._delete()          # equivalent to solr.delete(q='*: *')

        mongos['test']['test'].remove({})
        mongos['test']['test'].insert( 
             {'_id': ObjectId('4ff74db3f646462b38000001'),
             'name': 'paulie'},
             safe=True
             )
        while (mongos['test']['test'].find().count() != 1):
            time.sleep(1)
        cutoff_ts = test_oplog.get_last_oplog_timestamp()

        first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),
                     'ns': 'test.test',
                     '_id':  ObjectId('4ff74db3f646462b38000001')}

        #try kill one, try restarting
        kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))
        admin = new_primary_conn['admin']
        while admin.command("isMaster")['ismaster'] is False:
            time.sleep(1)
        time.sleep(5)
        count = 0
        while True:
            try:
                mongos['test']['test'].insert({
                    '_id': ObjectId('4ff74db3f646462b38000002'),
                    'name': 'paul'}, 
                    safe=True)
                break
            except OperationFailure:
                count += 1
                if count > 60:
                    self.fail('Call to insert doc failed too many times')
                time.sleep(1)
                continue
        while (mongos['test']['test'].find().count() != 2):
            time.sleep(1)
        kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])
        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                       "/replset1a.log", None)

        #wait for master to be established
        while primary_conn['admin'].command("isMaster")['ismaster'] is False:
            time.sleep(1)

        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                       "/replset1b.log", None)

        #wait for secondary to be established
        admin = new_primary_conn['admin']
        while admin.command("replSetGetStatus")['myState'] != 2:
            time.sleep(1)
        while retry_until_ok(mongos['test']['test'].find().count) != 1:
            time.sleep(1)

        self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])
        self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])

        last_ts = test_oplog.get_last_oplog_timestamp()
        second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),
                      'ns': 'test.test', 
                      '_id': ObjectId('4ff74db3f646462b38000002')}

        test_oplog.doc_manager.upsert(first_doc)
        test_oplog.doc_manager.upsert(second_doc)

        test_oplog.rollback()
        test_oplog.doc_manager.commit()
        results = solr._search()

        assert(len(results) == 1)

        self.assertEqual(results[0]['name'], 'paulie')
        self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))