def test_create_collection(self):
     self.initOplogThread()
     pymongo.collection.Collection(
         self.primary_conn['test'], 'test', create=True)
     assert_soon(lambda: self.docman.commands)
     command = self.docman.commands[0]
     self.assertEqual(command['create'], 'test')
Exemple #2
0
 def check_renamed_insert(self, target_coll):
     target_db, target_coll = target_coll.split('.', 1)
     mongo_target = self.mongo_conn[target_db][target_coll]
     assert_soon(lambda: len(list(mongo_target.find({}))))
     target_docs = list(mongo_target.find({}))
     self.assertEqual(len(target_docs), 1)
     self.assertEqual(target_docs[0]["renamed"], 1)
    def test_many_targets(self):
        """Test that one OplogThread is capable of replicating to more than
        one target.
        """
        doc_managers = [DocManager(), DocManager(), DocManager()]
        self.opman.doc_managers = doc_managers

        # start replicating
        self.opman.start()
        self.primary_conn["test"]["test"].insert_one(
            {"name": "kermit", "color": "green"}
        )
        self.primary_conn["test"]["test"].insert_one(
            {"name": "elmo", "color": "firetruck red"}
        )

        assert_soon(
            lambda: sum(len(d._search()) for d in doc_managers) == 6,
            "OplogThread should be able to replicate to multiple targets",
        )

        self.primary_conn["test"]["test"].delete_one({"name": "elmo"})

        assert_soon(
            lambda: sum(len(d._search()) for d in doc_managers) == 3,
            "OplogThread should be able to replicate to multiple targets",
        )
        for d in doc_managers:
            self.assertEqual(d._search()[0]["name"], "kermit")
    def test_skipped_oplog_entry_updates_checkpoint(self):
        repl_set = ReplicaSetSingle().start()
        conn = repl_set.client()
        opman = OplogThread(
            primary_client=conn,
            doc_managers=(DocManager(),),
            oplog_progress_dict=LockingDict(),
            namespace_config=NamespaceConfig(namespace_set=["test.test"]),
        )
        opman.start()

        # Insert a document into an included collection
        conn["test"]["test"].insert_one({"test": 1})
        last_ts = opman.get_last_oplog_timestamp()
        assert_soon(
            lambda: last_ts == opman.checkpoint,
            "OplogThread never updated checkpoint to non-skipped " "entry.",
        )
        self.assertEqual(len(opman.doc_managers[0]._search()), 1)

        # Make sure that the oplog thread updates its checkpoint on every
        # oplog entry.
        conn["test"]["ignored"].insert_one({"test": 1})
        last_ts = opman.get_last_oplog_timestamp()
        assert_soon(
            lambda: last_ts == opman.checkpoint,
            "OplogThread never updated checkpoint to skipped entry.",
        )
        opman.join()
        conn.close()
        repl_set.stop()
 def test_remove(self):
     """Tests remove
     """
     self.conn['test']['test'].insert_one({'name': 'paulie'})
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
     self.conn['test']['test'].delete_one({'name': 'paulie'})
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
 def test_remove(self):
     """Tests remove operations."""
     self.conn['test']['test'].insert_one({'name': 'paulie'})
     assert_soon(lambda: self._count() == 1)
     self.conn['test']['test'].delete_one({'name': 'paulie'})
     assert_soon(lambda: self._count() != 1)
     self.assertEqual(self._count(), 0)
    def test_filter_fields(self):
        docman = self.opman.doc_managers[0]
        conn = self.opman.primary_client

        include_fields = ["a", "b", "c"]
        exclude_fields = ["d", "e", "f"]

        # Set fields to care about
        self.opman.fields = include_fields
        # Documents have more than just these fields
        doc = {
            "a": 1, "b": 2, "c": 3,
            "d": 4, "e": 5, "f": 6,
            "_id": 1
        }
        db = conn['test']['test']
        db.insert_one(doc)
        assert_soon(lambda: db.count() == 1)
        self.opman.dump_collection()

        result = docman._search()[0]
        keys = result.keys()
        for inc, exc in zip(include_fields, exclude_fields):
            self.assertIn(inc, keys)
            self.assertNotIn(exc, keys)
 def test_remove(self):
     """Tests remove operations."""
     self.conn["test"]["test"].insert_one({"name": "paulie"})
     assert_soon(lambda: self._count() == 1)
     self.conn["test"]["test"].delete_one({"name": "paulie"})
     assert_soon(lambda: self._count() != 1)
     self.assertEqual(self._count(), 0)
 def test_drop_collection(self):
     self.initOplogThread()
     coll = pymongo.collection.Collection(
         self.primary_conn['test'], 'test', create=True)
     coll.drop()
     assert_soon(lambda: len(self.docman.commands) == 2)
     self.assertEqual(self.docman.commands[1], {'drop': 'test'})
 def test_drop_database(self):
     self.initOplogThread()
     pymongo.collection.Collection(
         self.primary_conn['test'], 'test', create=True)
     self.primary_conn.drop_database('test')
     assert_soon(lambda: len(self.docman.commands) == 2)
     self.assertEqual(self.docman.commands[1], {'dropDatabase': 1})
    def test_connector_minimum_privileges(self):
        """Test the Connector works with a user with minimum privileges."""
        if not (db_user and db_password):
            raise SkipTest("Need to set a user/password to test this.")
        client = self.repl_set.client()
        minimum_user = "******"
        minimum_pwd = "password"
        client.admin.add_user(
            minimum_user,
            minimum_pwd,
            roles=[
                {"role": "read", "db": "test"},
                {"role": "read", "db": "wildcard"},
                {"role": "read", "db": "local"},
            ],
        )

        client.test.test.insert_one({"replicated": 1})
        client.test.ignored.insert_one({"replicated": 0})
        client.ignored.ignored.insert_one({"replicated": 0})
        client.wildcard.test.insert_one({"replicated": 1})
        conn = Connector(
            mongo_address=self.repl_set.primary.uri,
            auth_username=minimum_user,
            auth_key=minimum_pwd,
            namespace_options={"test.test": True, "wildcard.*": True},
        )
        conn.start()
        try:
            assert_soon(conn.doc_managers[0]._search)
        finally:
            conn.join()
 def test_drop_collection(self):
     self.initOplogThread()
     coll = pymongo.collection.Collection(
         self.primary_conn["test"], "test", create=True
     )
     coll.drop()
     assert_soon(lambda: len(self.docman.commands) == 2)
     self.assertEqual(self.docman.commands[1], {"drop": "test"})
Exemple #13
0
 def test_drop_database_renamed(self):
     """Test the dropDatabase command on a renamed database."""
     self.create_renamed_collection("rename.me", "new.target")
     self.create_renamed_collection("rename.me2", "new2.target2")
     # test that drop database removes target databases
     self.conn.drop_database("rename")
     assert_soon(lambda: "new" not in self.mongo_conn.database_names())
     assert_soon(lambda: "new2" not in self.mongo_conn.database_names())
 def test_remove_file(self):
     """Tests removing a gridfs file
     """
     fs = GridFS(self.conn['test'], 'test')
     id = fs.put("test file", filename="test.txt", encoding='utf8')
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
     fs.delete(id)
     assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
 def test_rename_collection(self):
     self.initOplogThread()
     coll = pymongo.collection.Collection(
         self.primary_conn["test"], "test", create=True
     )
     coll.rename("test2")
     assert_soon(lambda: len(self.docman.commands) == 2)
     self.assertEqual(self.docman.commands[1].get("renameCollection"), "test.test")
     self.assertEqual(self.docman.commands[1].get("to"), "test.test2")
 def test_rename_collection_renamed(self):
     """Test the renameCollection command on a renamed collection to a
     renamed collection.
     """
     self.create_renamed_collection("rename.me", "new.target")
     self.conn.admin.command("renameCollection", "rename.me", to="rename.me2")
     # In the target, 'new.target' should be renamed to 'new2.target2'
     assert_soon(lambda: "target" not in self.mongo_conn.new.collection_names())
     self.check_renamed_insert("new2.target2")
 def test_bad_int_value(self):
     self.conn.test.test.insert_one(
         {"inf": float("inf"), "nan": float("nan"), "still_exists": True}
     )
     assert_soon(lambda: self._count() > 0)
     for doc in self._search():
         self.assertNotIn("inf", doc)
         self.assertNotIn("nan", doc)
         self.assertTrue(doc["still_exists"])
 def check_update(update_spec):
     updated = self.conn.test.command(
         SON([('findAndModify', 'test'),
              ('query', {"a": 0}),
              ('update', update_spec),
              ('new', True)]))['value']
     # Stringify _id to match what will be retrieved from ES
     updated['_id'] = str(updated['_id'])
     assert_soon(lambda: next(self._search()) == updated)
 def test_drop_collection_renamed(self):
     """Test the drop collection command on a renamed collection."""
     self.create_renamed_collection("rename.me", "new.target")
     self.create_renamed_collection("rename.me2", "new2.target2")
     # test that drop collection removes target collection
     self.conn.rename.drop_collection("me")
     assert_soon(lambda: "target" not in self.mongo_conn.new.collection_names())
     self.conn.rename.drop_collection("me2")
     assert_soon(lambda: "target2" not in self.mongo_conn.new2.collection_names())
    def test_remove(self):
        """Tests remove
        """

        self.conn["test"]["test"].insert_one({"name": "paulie"})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)
        self.conn["test"]["test"].delete_one({"name": "paulie"})
        assert_soon(lambda: sum(1 for _ in self._search()) != 1)
        self.assertEqual(sum(1 for _ in self._search()), 0)
    def test_remove(self):
        """Tests remove
        """

        self.conn['test']['test'].insert_one({'name': 'paulie'})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)
        self.conn['test']['test'].delete_one({'name': 'paulie'})
        assert_soon(lambda: sum(1 for _ in self._search()) != 1)
        self.assertEqual(sum(1 for _ in self._search()), 0)
 def test_bad_int_value(self):
     self.conn.test.test.insert_one({
         'inf': float('inf'), 'nan': float('nan'),
         'still_exists': True})
     assert_soon(lambda: self._count() > 0)
     for doc in self._search():
         self.assertNotIn('inf', doc)
         self.assertNotIn('nan', doc)
         self.assertTrue(doc['still_exists'])
 def test_insert(self):
     """Test insert operations."""
     self.conn['test']['test'].insert_one({'name': 'paulie'})
     assert_soon(lambda: self._count() > 0)
     result_set_1 = list(self._search())
     self.assertEqual(len(result_set_1), 1)
     result_set_2 = self.conn['test']['test'].find_one()
     for item in result_set_1:
         self.assertEqual(item['_id'], str(result_set_2['_id']))
         self.assertEqual(item['name'], result_set_2['name'])
 def test_insert(self):
     """Test insert operations."""
     self.conn["test"]["test"].insert_one({"name": "paulie"})
     assert_soon(lambda: self._count() > 0)
     result_set_1 = list(self._search())
     self.assertEqual(len(result_set_1), 1)
     result_set_2 = self.conn["test"]["test"].find_one()
     for item in result_set_1:
         self.assertEqual(item["_id"], str(result_set_2["_id"]))
         self.assertEqual(item["name"], result_set_2["name"])
    def test_create_collection_skipped(self):
        self.initOplogThread(["test.test"])

        pymongo.collection.Collection(self.primary_conn["test2"], "test2", create=True)
        pymongo.collection.Collection(self.primary_conn["test"], "test", create=True)

        assert_soon(lambda: self.docman.commands)
        self.assertEqual(len(self.docman.commands), 1)
        command = self.docman.commands[0]
        self.assertEqual(command["create"], "test")
    def test_update(self):
        """Test update operations."""
        # Insert
        self.conn.test.test.insert_one({"a": 0})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)

        def check_update(update_spec):
            updated = self.conn.test.command(
                SON(
                    [
                        ("findAndModify", "test"),
                        ("query", {"a": 0}),
                        ("update", update_spec),
                        ("new", True),
                    ]
                )
            )["value"]

            def update_worked():
                replicated = self.mongo_doc.mongo.test.test.find_one({"a": 0})
                return replicated == updated

            # Allow some time for update to propagate
            assert_soon(update_worked)

        # Update by adding a field
        check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})

        # Update by setting an attribute of a sub-document beyond end of array.
        check_update({"$set": {"b.10.c": 42}})

        # Update by un-setting an array element.
        check_update({"$unset": {"b.10": True}})

        # Update by un-setting a non-existent attribute.
        check_update({"$unset": {"not-present": True}})

        # Update by changing a value within a sub-document (contains array)
        check_update({"$inc": {"b.0.c": 1}})

        # Update by changing the value within an array
        check_update({"$inc": {"b.1.f": 12}})

        # Update by adding new bucket to list
        check_update({"$push": {"b": {"e": 12}}})

        # Update by changing an entire sub-document
        check_update({"$set": {"b.0": {"e": 4}}})

        # Update by adding a sub-document
        check_update({"$set": {"b": {"0": {"c": 100}}}})

        # Update whole document
        check_update({"a": 0, "b": {"1": {"d": 10000}}})
Exemple #27
0
 def test_bad_int_value(self):
     self.conn.test.test.insert_one({
         "inf": float("inf"),
         "nan": float("nan"),
         "still_exists": True
     })
     assert_soon(lambda: self._count() > 0)
     for doc in self._search():
         self.assertNotIn("inf", doc)
         self.assertNotIn("nan", doc)
         self.assertTrue(doc["still_exists"])
    def test_create_collection_skipped(self):
        self.initOplogThread(['test.test'])

        pymongo.collection.Collection(
            self.primary_conn['test2'], 'test2', create=True)
        pymongo.collection.Collection(
            self.primary_conn['test'], 'test', create=True)

        assert_soon(lambda: self.docman.commands)
        self.assertEqual(len(self.docman.commands), 1)
        self.assertEqual(self.docman.commands[0], {'create': 'test'})
 def test_bad_int_value(self):
     self.conn.test.test.insert_one({
         'inf': float('inf'),
         'nan': float('nan'),
         'still_exists': True
     })
     assert_soon(lambda: self._count() > 0)
     for doc in self._search():
         self.assertNotIn('inf', doc)
         self.assertNotIn('nan', doc)
         self.assertTrue(doc['still_exists'])
Exemple #30
0
 def test_drop_collection_renamed(self):
     """Test the drop collection command on a renamed collection."""
     self.create_renamed_collection("rename.me", "new.target")
     self.create_renamed_collection("rename.me2", "new2.target2")
     # test that drop collection removes target collection
     self.conn.rename.drop_collection("me")
     assert_soon(
         lambda: "target" not in self.mongo_conn.new.collection_names())
     self.conn.rename.drop_collection("me2")
     assert_soon(
         lambda: "target2" not in self.mongo_conn.new2.collection_names())
Exemple #31
0
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert_one({'name': 'paulie'})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)
        result_set_1 = self._search()
        self.assertEqual(sum(1 for _ in result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], result_set_2['_id'])
            self.assertEqual(item['name'], result_set_2['name'])
    def test_with_chunk_migration(self):
        """Test that DocManagers have proper state after both a successful
        and an unsuccessful chunk migration
        """

        # Start replicating to dummy doc managers
        self.opman1.start()
        self.opman2.start()

        collection = self.mongos_conn["test"]["mcsharded"]
        for i in range(1000):
            collection.insert_one({"i": i + 500})
        # Assert current state of the mongoverse
        self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(),
                         500)
        self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(),
                         500)
        assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000)

        # Test successful chunk move from shard 1 to shard 2
        self.mongos_conn["admin"].command(
            "moveChunk",
            "test.mcsharded",
            find={"i": 1},
            to="demo-set-1"
        )

        # doc manager should still have all docs
        all_docs = self.opman1.doc_managers[0]._search()
        self.assertEqual(len(all_docs), 1000)
        for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
            self.assertEqual(doc["i"], i + 500)

        # Mark the collection as "dropped". This will cause migration to fail.
        self.mongos_conn["config"]["collections"].update_one(
            {"_id": "test.mcsharded"},
            {"$set": {"dropped": True}}
        )

        # Test unsuccessful chunk move from shard 2 to shard 1
        def fail_to_move_chunk():
            self.mongos_conn["admin"].command(
                "moveChunk",
                "test.mcsharded",
                find={"i": 1},
                to="demo-set-0"
            )
        self.assertRaises(pymongo.errors.OperationFailure, fail_to_move_chunk)
        # doc manager should still have all docs
        all_docs = self.opman1.doc_managers[0]._search()
        self.assertEqual(len(all_docs), 1000)
        for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
            self.assertEqual(doc["i"], i + 500)
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert_one({'name': 'paulie'})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)
        result_set_1 = self._search()
        self.assertEqual(sum(1 for _ in result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], result_set_2['_id'])
            self.assertEqual(item['name'], result_set_2['name'])
 def test_rename_collection(self):
     self.initOplogThread()
     coll = pymongo.collection.Collection(
         self.primary_conn['test'], 'test', create=True)
     coll.rename('test2')
     assert_soon(lambda: len(self.docman.commands) == 2)
     self.assertEqual(
         self.docman.commands[1].get('renameCollection'),
         'test.test')
     self.assertEqual(
         self.docman.commands[1].get('to'),
         'test.test2')
Exemple #35
0
        def check_update(update_spec):
            updated = self.conn.test.command(
                SON([('findAndModify', 'test'), ('query', {
                    "a": 0
                }), ('update', update_spec), ('new', True)]))['value']

            def update_worked():
                replicated = self.mongo_doc.mongo.test.test.find_one({"a": 0})
                return replicated == updated

            # Allow some time for update to propagate
            assert_soon(update_worked)
Exemple #36
0
    def test_update(self):
        """Test update operations."""
        # Insert
        self.conn.test.test.insert_one({"a": 0})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)

        def check_update(update_spec):
            updated = self.conn.test.command(
                SON([
                    ("findAndModify", "test"),
                    ("query", {
                        "a": 0
                    }),
                    ("update", update_spec),
                    ("new", True),
                ]))["value"]

            def update_worked():
                replicated = self.mongo_doc.mongo.test.test.find_one({"a": 0})
                return replicated == updated

            # Allow some time for update to propagate
            assert_soon(update_worked)

        # Update by adding a field
        check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})

        # Update by setting an attribute of a sub-document beyond end of array.
        check_update({"$set": {"b.10.c": 42}})

        # Update by un-setting an array element.
        check_update({"$unset": {"b.10": True}})

        # Update by un-setting a non-existent attribute.
        check_update({"$unset": {"not-present": True}})

        # Update by changing a value within a sub-document (contains array)
        check_update({"$inc": {"b.0.c": 1}})

        # Update by changing the value within an array
        check_update({"$inc": {"b.1.f": 12}})

        # Update by adding new bucket to list
        check_update({"$push": {"b": {"e": 12}}})

        # Update by changing an entire sub-document
        check_update({"$set": {"b.0": {"e": 4}}})

        # Update by adding a sub-document
        check_update({"$set": {"b": {"0": {"c": 100}}}})

        # Update whole document
        check_update({"a": 0, "b": {"1": {"d": 10000}}})
Exemple #37
0
 def test_rename_collection_renamed(self):
     """Test the renameCollection command on a renamed collection to a
     renamed collection.
     """
     self.create_renamed_collection("rename.me", "new.target")
     self.conn.admin.command("renameCollection",
                             "rename.me",
                             to="rename.me2")
     # In the target, 'new.target' should be renamed to 'new2.target2'
     assert_soon(
         lambda: "target" not in self.mongo_conn.new.collection_names())
     self.check_renamed_insert("new2.target2")
    def test_with_chunk_migration(self):
        """Test that DocManagers have proper state after both a successful
        and an unsuccessful chunk migration
        """

        # Start replicating to dummy doc managers
        self.opman1.start()
        self.opman2.start()

        collection = self.mongos_conn["test"]["mcsharded"]
        for i in range(1000):
            collection.insert_one({"i": i + 500})
        # Assert current state of the mongoverse
        self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(),
                         500)
        self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(),
                         500)
        assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000)

        # Test successful chunk move from shard 1 to shard 2
        self.mongos_conn["admin"].command(
            "moveChunk",
            "test.mcsharded",
            find={"i": 1},
            to="demo-set-1"
        )

        # doc manager should still have all docs
        all_docs = self.opman1.doc_managers[0]._search()
        self.assertEqual(len(all_docs), 1000)
        for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
            self.assertEqual(doc["i"], i + 500)

        # Mark the collection as "dropped". This will cause migration to fail.
        self.mongos_conn["config"]["collections"].update_one(
            {"_id": "test.mcsharded"},
            {"$set": {"dropped": True}}
        )

        # Test unsuccessful chunk move from shard 2 to shard 1
        def fail_to_move_chunk():
            self.mongos_conn["admin"].command(
                "moveChunk",
                "test.mcsharded",
                find={"i": 1},
                to="demo-set-0"
            )
        self.assertRaises(pymongo.errors.OperationFailure, fail_to_move_chunk)
        # doc manager should still have all docs
        all_docs = self.opman1.doc_managers[0]._search()
        self.assertEqual(len(all_docs), 1000)
        for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
            self.assertEqual(doc["i"], i + 500)
Exemple #39
0
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert_one({'name': 'paulie'})
        assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) > 0)
        result_set_1 = list(self.solr_conn.search('name:paulie'))
        self.assertEqual(len(result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], str(result_set_2['_id']))
            self.assertEqual(item['name'], result_set_2['name'])
Exemple #40
0
 def test_drop_database_renamed(self):
     """Test the dropDatabase command on a renamed database."""
     if not self.desination_version.at_least(3, 0, 7) or (
             self.desination_version.at_least(3, 1)
             and not self.desination_version.at_least(3, 1, 9)):
         raise SkipTest("This test fails often because of SERVER-13212")
     self.create_renamed_collection("rename.me", "new.target")
     self.create_renamed_collection("rename.me2", "new2.target2")
     # test that drop database removes target databases
     self.conn.drop_database("rename")
     assert_soon(lambda: "new" not in self.mongo_conn.database_names())
     assert_soon(lambda: "new2" not in self.mongo_conn.database_names())
Exemple #41
0
    def test_insert(self):
        """Tests insert
        """

        self.conn["test"]["test"].insert_one({"name": "paulie"})
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)
        result_set_1 = self._search()
        self.assertEqual(sum(1 for _ in result_set_1), 1)
        result_set_2 = self.conn["test"]["test"].find_one()
        for item in result_set_1:
            self.assertEqual(item["_id"], result_set_2["_id"])
            self.assertEqual(item["name"], result_set_2["name"])
Exemple #42
0
    def test_create_collection_skipped(self):
        self.initOplogThread(['test.test'])

        pymongo.collection.Collection(self.primary_conn['test2'],
                                      'test2',
                                      create=True)
        pymongo.collection.Collection(self.primary_conn['test'],
                                      'test',
                                      create=True)

        assert_soon(lambda: self.docman.commands)
        self.assertEqual(len(self.docman.commands), 1)
        self.assertEqual(self.docman.commands[0], {'create': 'test'})
Exemple #43
0
 def check_update(update_spec):
     updated = self.conn.test.command(
         SON([
             ("findAndModify", "test"),
             ("query", {
                 "a": 0
             }),
             ("update", update_spec),
             ("new", True),
         ]))["value"]
     # Stringify _id to match what will be retrieved from ES
     updated["_id"] = str(updated["_id"])
     assert_soon(lambda: next(self._search()) == updated)
Exemple #44
0
 def setUpClass(cls):
     if db_user and db_password:
         auth_args = dict(auth_username=db_user, auth_key=db_password)
     else:
         auth_args = {}
     cls.cluster = ShardedClusterSingle().start()
     cls.main_uri = cls.cluster.uri + '/?readPreference=primaryPreferred'
     cls.dm = DocManager()
     cls.connector = Connector(mongo_address=cls.main_uri,
                               doc_managers=[cls.dm],
                               **auth_args)
     cls.connector.start()
     assert_soon(lambda: len(cls.connector.shard_set) == 2,
                 message='connector failed to find both shards!')
Exemple #45
0
    def test_insert_file(self):
        """Tests inserting a gridfs file
        """
        fs = GridFS(self.conn["test"], "test")
        test_data = b"test_insert_file test file"
        id = fs.put(test_data, filename="test.txt", encoding="utf8")
        assert_soon(lambda: sum(1 for _ in self._search()) > 0)

        res = list(self._search())
        self.assertEqual(len(res), 1)
        doc = res[0]
        self.assertEqual(doc["filename"], "test.txt")
        self.assertEqual(doc["_id"], id)
        self.assertEqual(doc["content"], test_data)
Exemple #46
0
    def test_insert_file(self):
        """Tests inserting a gridfs file
        """
        fs = GridFS(self.conn['test'], 'test')
        test_data = b"test_insert_file test file"
        id = fs.put(test_data, filename="test.txt", encoding='utf8')
        assert_soon(lambda: sum(1 for _ in self._search()) > 0)

        res = list(self._search())
        self.assertEqual(len(res), 1)
        doc = res[0]
        self.assertEqual(doc['filename'], 'test.txt')
        self.assertEqual(doc['_id'], id)
        self.assertEqual(doc['content'], test_data)
    def test_create_collection_skipped(self):
        self.initOplogThread(["test.test"])

        pymongo.collection.Collection(self.primary_conn["test2"],
                                      "test2",
                                      create=True)
        pymongo.collection.Collection(self.primary_conn["test"],
                                      "test",
                                      create=True)

        assert_soon(lambda: self.docman.commands)
        self.assertEqual(len(self.docman.commands), 1)
        command = self.docman.commands[0]
        self.assertEqual(command["create"], "test")
Exemple #48
0
    def test_nested_fields(self):
        """Test indexing fields that are sub-documents in MongoDB

        The following fields are defined in the provided schema.xml:

        <field name="billing.address.street" type="string" ... />
        <field name="billing.address.state" type="string" ... />
        <dynamicField name="numbers.*" type="string" ... />
        <dynamicField name="characters.*" type="string" ... />

        """

        # Connector is already running
        self.conn["test"]["test"].insert_one({
            "name": "Jeb",
            "billing": {
                "address": {
                    "street": "12345 Mariposa Street",
                    "state": "California"
                }
            }
        })
        self.conn["test"]["test"].insert_one({
            "numbers": ["one", "two", "three"],
            "characters": [{
                "name": "Big Bird",
                "color": "yellow"
            }, {
                "name": "Elmo",
                "color": "red"
            }, "Cookie Monster"]
        })

        assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
                    "documents should have been replicated to Solr")

        # Search for first document
        results = self.solr_conn.search(
            "billing.address.street:12345\ Mariposa\ Street")
        self.assertEqual(len(results), 1)
        self.assertEqual(
            next(iter(results))["billing.address.state"], "California")

        # Search for second document
        results = self.solr_conn.search("characters.1.color:red")
        self.assertEqual(len(results), 1)
        self.assertEqual(next(iter(results))["numbers.2"], "three")
        results = self.solr_conn.search("characters.2:Cookie\ Monster")
        self.assertEqual(len(results), 1)
Exemple #49
0
    def test_insert_file(self):
        """Tests inserting a gridfs file
        """
        fs = GridFS(self.conn["test"], "test")
        test_data = b"test_insert_file test file"
        id = fs.put(test_data, filename="test.txt", encoding="utf8")
        assert_soon(lambda: self._count() > 0)

        query = {"match": {"attachment.content": "test_insert_file"}}
        res = list(self._search(query))
        self.assertEqual(len(res), 1)
        doc = res[0]
        self.assertEqual(doc["filename"], "test.txt")
        self.assertEqual(doc["_id"], str(id))
        self.assertEqual(base64.b64decode(doc["content"]), test_data)
Exemple #50
0
    def test_start_with_auth(self):
        dm = DocManager()
        connector = Connector(mongo_address=self.cluster.uri,
                              doc_managers=[dm],
                              auth_username=db_user,
                              auth_key=db_password)
        connector.start()

        # Insert some documents into the sharded cluster.  These
        # should go to the DocManager, and the connector should not
        # have an auth failure.
        self.cluster.client().test.test.insert_one({'auth_failure': False})
        assert_soon(lambda: len(dm._search()) > 0)

        connector.join()
    def test_insert_file(self):
        """Tests inserting a gridfs file
        """
        fs = GridFS(self.conn['test'], 'test')
        test_data = b"test_insert_file test file"
        id = fs.put(test_data, filename="test.txt", encoding='utf8')
        assert_soon(lambda: self._count() > 0)

        query = {"match": {"_all": "test_insert_file"}}
        res = list(self._search(query))
        self.assertEqual(len(res), 1)
        doc = res[0]
        self.assertEqual(doc['filename'], 'test.txt')
        self.assertEqual(doc['_id'], str(id))
        self.assertEqual(base64.b64decode(doc['content']), test_data)
    def test_rollback(self):
        """Test behavior during a MongoDB rollback.

        We force a rollback by adding a doc, killing the primary,
        adding another doc, killing the new primary, and then
        restarting both.
        """
        primary_conn = self.repl_set.primary.client()

        # This doc can be picked up in the collection dump
        self.conn['test']['test'].insert_one({'name': 'paul'})
        condition1 = lambda: self.conn['test']['test'].find(
            {'name': 'paul'}).count() == 1
        condition2 = lambda: self._count() == 1
        assert_soon(condition1)
        assert_soon(condition2)

        # This doc is definitely not picked up by collection dump
        self.conn['test']['test'].insert_one({'name': 'pauly'})

        self.repl_set.primary.stop(destroy=False)

        new_primary_conn = self.repl_set.secondary.client()

        admin = new_primary_conn['admin']
        assert_soon(lambda: admin.command("isMaster")['ismaster'])
        time.sleep(5)
        retry_until_ok(self.conn.test.test.insert_one,
                       {'name': 'pauline'})
        assert_soon(lambda: self._count() == 3)
        result_set_1 = list(self._search())
        result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
        self.assertEqual(len(result_set_1), 3)
        #make sure pauline is there
        for item in result_set_1:
            if item['name'] == 'pauline':
                self.assertEqual(item['_id'], str(result_set_2['_id']))
        self.repl_set.secondary.stop(destroy=False)

        self.repl_set.primary.start()
        while primary_conn['admin'].command("isMaster")['ismaster'] is False:
            time.sleep(1)

        self.repl_set.secondary.start()

        time.sleep(2)
        result_set_1 = list(self._search())
        self.assertEqual(len(result_set_1), 2)

        if result_set_1[0]['name'] == 'paul':
            self.assertEqual(result_set_1[1]['name'], 'pauly')
        elif result_set_1[0]['name'] == 'pauly':
            self.assertEqual(result_set_1[1]['name'], 'paul')
        else:
            self.assertTrue(0, 'Unknown document retrieved')

        find_cursor = retry_until_ok(self.conn['test']['test'].find)
        self.assertEqual(retry_until_ok(find_cursor.count), 2)
Exemple #53
0
    def test_valid_fields(self):
        """ Tests documents with field definitions
        """
        inserted_obj = self.conn['test']['test'].insert_one({
            'name':
            'test_valid'
        }).inserted_id
        self.conn['test']['test'].update_one({'_id': inserted_obj},
                                             {'$set': {
                                                 'popularity': 1
                                             }})

        docman = self.connector.doc_managers[0]
        assert_soon(lambda: sum(1 for _ in self._search("*:*")) > 0)
        result = docman.get_last_doc()
        self.assertIn('popularity', result)
        self.assertEqual(sum(1 for _ in self._search("name:test_valid")), 1)
Exemple #54
0
    def test_connector(self):
        """Test whether the connector initiates properly
        """
        conn = Connector(mongo_address=self.repl_set.uri, **connector_opts)
        conn.start()
        assert_soon(lambda: bool(conn.shard_set))

        # Make sure get_mininum_mongodb_version returns the current version.
        self.assertEqual(Version.from_client(self.repl_set.client()),
                         get_mininum_mongodb_version())

        conn.join()

        # Make sure the connector is shutdown correctly
        self.assertFalse(conn.can_run)
        for thread in conn.shard_set.values():
            self.assertFalse(thread.running)
Exemple #55
0
    def setUp(self):
        try:
            os.unlink("oplog.timestamp")
        except OSError:
            pass
        self._remove()
        self.connector = Connector(mongo_address=self.repl_set.uri,
                                   ns_set=['test.test'],
                                   doc_managers=(self.mongo_doc, ),
                                   gridfs_set=['test.test'],
                                   **connector_opts)

        self.conn.drop_database('test')

        self.connector.start()
        assert_soon(lambda: len(self.connector.shard_set) > 0)
        assert_soon(lambda: sum(1 for _ in self._search()) == 0)
    def setUpClass(cls):
        """ Initializes the cluster
        """
        try:
            os.unlink("oplog.timestamp")
        except OSError:
            pass
        open("oplog.timestamp", "w").close()

        cls.repl_set = ReplicaSet().start()
        cls.conn = cls.repl_set.client()
        cls.connector = Connector(mongo_address=cls.repl_set.uri,
                                  ns_set=['test.test'],
                                  **connector_opts)
        cls.synchronizer = cls.connector.doc_managers[0]
        cls.connector.start()
        assert_soon(lambda: len(cls.connector.shard_set) != 0)
Exemple #57
0
        def check_update(update_spec):
            updated = self.conn.test.command(
                SON([
                    ("findAndModify", "test"),
                    ("query", {
                        "a": 0
                    }),
                    ("update", update_spec),
                    ("new", True),
                ]))["value"]

            def update_worked():
                replicated = self.mongo_doc.mongo.test.test.find_one({"a": 0})
                return replicated == updated

            # Allow some time for update to propagate
            assert_soon(update_worked)
Exemple #58
0
    def test_rollback(self):
        """Tests rollback. We force a rollback by adding a doc, killing the
        primary, adding another doc, killing the new primary, and then
        restarting both.
        """
        primary_conn = self.repl_set.primary.client()
        self.conn["test"]["test"].insert_one({"name": "paul"})
        condition = (lambda: self.conn["test"]["test"].find_one(
            {"name": "paul"}) is not None)
        assert_soon(condition)
        assert_soon(lambda: sum(1 for _ in self._search()) == 1)

        self.repl_set.primary.stop(destroy=False)
        new_primary_conn = self.repl_set.secondary.client()
        admin = new_primary_conn["admin"]

        def condition():
            return admin.command("isMaster")["ismaster"]

        assert_soon(lambda: retry_until_ok(condition))

        retry_until_ok(self.conn.test.test.insert_one, {"name": "pauline"})
        assert_soon(lambda: sum(1 for _ in self._search()) == 2)
        result_set_1 = list(self._search())
        result_set_2 = self.conn["test"]["test"].find_one({"name": "pauline"})
        self.assertEqual(len(result_set_1), 2)
        # make sure pauline is there
        for item in result_set_1:
            if item["name"] == "pauline":
                self.assertEqual(item["_id"], result_set_2["_id"])
        self.repl_set.secondary.stop(destroy=False)

        self.repl_set.primary.start()
        assert_soon(
            lambda: primary_conn["admin"].command("isMaster")["ismaster"])

        self.repl_set.secondary.start()

        time.sleep(2)
        result_set_1 = list(self._search())
        self.assertEqual(len(result_set_1), 1)
        for item in result_set_1:
            self.assertEqual(item["name"], "paul")
        find_cursor = retry_until_ok(self.conn["test"]["test"].find)
        self.assertEqual(retry_until_ok(find_cursor.count), 1)
Exemple #59
0
    def test_insert_file(self):
        """Tests inserting a gridfs file
        """
        fs = GridFS(self.conn['test'], 'test')
        test_data = "test_insert_file test file"
        id = fs.put(test_data, filename="test.txt", encoding='utf8')
        assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) > 0)

        res = list(self.solr_conn.search('content:*test_insert_file*'))
        if not res:
            res = list(self.solr_conn.search('_text_:*test_insert_file*'))
        self.assertEqual(len(res), 1)
        doc = res[0]
        self.assertEqual(doc['filename'], "test.txt")
        self.assertEqual(doc['_id'], str(id))
        content = doc.get('content', doc.get('_text_', None))
        self.assertTrue(content)
        self.assertIn(test_data.strip(), content[0].strip())
Exemple #60
0
 def setUp(self):
     self._remove()
     try:
         os.unlink("oplog.timestamp")
     except OSError:
         pass
     open("oplog.timestamp", "w").close()
     docman = DocManager(solr_url, auto_commit_interval=0)
     self.connector = Connector(mongo_address=self.repl_set.uri,
                                ns_set=['test.test'],
                                doc_managers=(docman, ),
                                gridfs_set=['test.test'])
     retry_until_ok(self.conn.test.test.drop)
     retry_until_ok(self.conn.test.test.files.drop)
     retry_until_ok(self.conn.test.test.chunks.drop)
     self._remove()
     self.connector.start()
     assert_soon(lambda: len(self.connector.shard_set) > 0)