Esempio n. 1
0
 def _enable_abort(self, donor_primary_client, donor_primary_port,
                   donor_primary_rs_name):
     # Configure the failpoint to make the migration abort after the migration has been
     # blocking reads and writes for a randomly generated number of milliseconds
     # (< MAX_BLOCK_TIME_MILLISECS). Must be called with _disable_abort at the start and
     # end of each test so that each test uses its own randomly generated block time.
     try:
         donor_primary_client.admin.command(
             bson.SON([
                 ("configureFailPoint",
                  "abortTenantMigrationAfterBlockingStarts"),
                 ("mode", "alwaysOn"),
                 ("data",
                  bson.SON([
                      ("blockTimeMS",
                       random.uniform(
                           0,
                           _TenantMigrationThread.MAX_BLOCK_TIME_MILLISECS))
                  ]))
             ]))
     except pymongo.errors.OperationFailure as err:
         self.logger.exception(
             "Unable to enable the failpoint to make migrations abort on donor primary on port "
             + "%d of replica set '%s'.", donor_primary_port,
             donor_primary_rs_name)
         raise errors.ServerFailure(
             "Unable to enable the failpoint to make migrations abort on donor primary on port "
             + "{} of replica set '{}': {}".format(
                 donor_primary_port, donor_primary_rs_name, err.args[0]))
Esempio n. 2
0
    def test_incorrect_sub_dtype4(self):
        # Sub document not a document
        bad_doc = bson.SON([("x", bson.SON([("y", 0), ("z", 0)])), ("q", 10)])

        bad_raw_docs = self.raw_docs[:9]
        bad_raw_docs.append(
            bson._dict_to_bson(bad_doc, False, bson.DEFAULT_CODEC_OPTIONS))

        with self.assertRaisesPattern(
                bsonnumpy.error,
                "invalid document: expected subdoc from dtype,"
                " got other type"):
            bsonnumpy.sequence_to_ndarray(bad_raw_docs, self.dtype_sub, 10)

        bad_doc = bson.SON([("x", bson.SON([("y", 0), ("z", 0)])),
                            ("q", [10, 11, 12])])

        bad_raw_docs = self.raw_docs[:9]
        bad_raw_docs.append(
            bson._dict_to_bson(bad_doc, False, bson.DEFAULT_CODEC_OPTIONS))

        with self.assertRaisesPattern(
                bsonnumpy.error,
                "invalid document: expected subdoc from dtype,"
                " got other type"):
            bsonnumpy.sequence_to_ndarray(bad_raw_docs, self.dtype_sub, 10)
Esempio n. 3
0
    def test_array_scalar_load2(self):
        # Test sub arrays with documents as elements
        son_docs = [
            bson.SON(
                [('x', [
                    [
                        bson.SON([('a', i), ('b', i)]),
                        bson.SON([('a', -i), ('b', -i)])
                    ],
                    [
                        bson.SON([('c', i), ('d', i)]),
                        bson.SON([('c', -i), ('d', -i)])
                    ],

                ])]) for i in range(2, 4)]
        raw_docs = [bson._dict_to_bson(
            doc, False, bson.DEFAULT_CODEC_OPTIONS) for doc in son_docs]
        sub_sub_dtype = np.dtype(([('a', 'int32'), ('b', 'int32')], 2))
        sub_dtype = np.dtype((sub_sub_dtype, 2))
        dtype = np.dtype([('x', sub_dtype)])

        ndarray = np.array(
            [[([(i, i), (-i, -i)],),
              ([(i, i), (-i, -i)],)] for i in range(2, 4)], dtype)

        # Correct dtype
        with self.assertRaisesPattern(bsonnumpy.error,
                                      r'unsupported BSON type: unknown'):
            bsonnumpy.sequence_to_ndarray(raw_docs, dtype, 2)
Esempio n. 4
0
    def _after_test_impl(self, test, test_report, description):
        sync_node = self.fixture.get_initial_sync_node()
        sync_node_conn = utils.new_mongo_client(port=sync_node.port)

        if self.use_resync:
            self.hook_test_case.logger.info(
                "Calling resync on initial sync node...")
            cmd = bson.SON([("resync", 1)])
            sync_node_conn.admin.command(cmd)
        else:
            if not sync_node.teardown():
                raise errors.ServerFailure("%s did not exit cleanly" %
                                           (sync_node))

            self.hook_test_case.logger.info(
                "Starting the initial sync node back up again...")
            sync_node.setup()
            sync_node.await_ready()

        # Do initial sync round.
        self.hook_test_case.logger.info(
            "Waiting for initial sync node to go into SECONDARY state")
        cmd = bson.SON([("replSetTest", 1), ("waitForMemberState", 2),
                        ("timeoutMillis", 20 * 60 * 1000)])
        sync_node_conn.admin.command(cmd)

        # Run data validation and dbhash checking.
        self.hook_test_case.run_test()
Esempio n. 5
0
    def test_array_scalar_load4(self):
        # Test documents with multiple levels of sub documents
        son_docs = [
            bson.SON(
                [('x', [
                    [
                        bson.SON([('a', i), ('b', i)]),
                        bson.SON([('a', -i), ('b', -i)])
                    ],
                    [
                        bson.SON([('c', i), ('d', i)]),
                        bson.SON([('c', -i), ('d', -i)])
                    ],

                ])]) for i in range(10)]
        raw_docs = [bson._dict_to_bson(
            doc, False, bson.DEFAULT_CODEC_OPTIONS) for doc in son_docs]
        sub_sub_sub_dtype = np.dtype([('q', 'int32')])
        sub_sub_dtype = np.dtype(
            ([('a', sub_sub_sub_dtype), ('b', sub_sub_sub_dtype)], 2))
        sub_dtype = np.dtype((sub_sub_dtype, 2))
        dtype = np.dtype([('x', sub_dtype)])

        # Correct dtype
        with self.assertRaisesPattern(bsonnumpy.error,
                                      r'unsupported BSON type: unknown'):
            bsonnumpy.sequence_to_ndarray(raw_docs, dtype, 4)
Esempio n. 6
0
    def _dataset_to_meta(self, x: Union[xarray.DataArray,
                                        xarray.Dataset]) -> dict:
        """Helper function of put().
        Convert a DataArray or Dataset into the dict to insert into the 'meta'
        collection.
        """
        meta = {
            "attrs": bson.SON(x.attrs),
            "coords": bson.SON(),
            "data_vars": bson.SON(),
            "chunkSize": self.chunk_size_bytes,
        }
        if isinstance(x, xarray.DataArray):
            if x.name:
                meta["name"] = x.name
            x = x.to_dataset(name="__DataArray__")

        for k, v in x.variables.items():
            subdoc = meta["coords"] if k in x.coords else meta["data_vars"]
            subdoc[k] = {
                "dims": v.dims,
                "dtype": v.dtype.str,
                "shape": v.shape,
                "chunks": v.chunks,
                "type": "ndarray",
            }
            if isinstance(v.data, Quantity):
                subdoc[k]["units"] = str(v.data.units)

        return meta
Esempio n. 7
0
    def test_incorrect_sub_dtype3(self):
        # Sub document missing key
        bad_doc = bson.SON([("x", bson.SON([("bad", 0), ("z", 0)])),
                            ("q", bson.SON([("y", 0), ("z", 0)]))])

        bad_raw_docs = self.raw_docs[:9]
        bad_raw_docs.append(
            bson._dict_to_bson(bad_doc, False, bson.DEFAULT_CODEC_OPTIONS))

        with self.assertRaisesPattern(bsonnumpy.error,
                                      "document does not match dtype"):
            bsonnumpy.sequence_to_ndarray(bad_raw_docs, self.dtype_sub, 10)
Esempio n. 8
0
    def run_test(self):
        """Execute test hook."""
        sync_node = self.fixture.get_initial_sync_node()
        sync_node_conn = sync_node.mongo_client()

        sync_node.teardown()

        self.logger.info("Starting the initial sync node back up again...")
        sync_node.setup()
        sync_node.await_ready()

        # Do initial sync round.
        self.logger.info(
            "Waiting for initial sync node to go into SECONDARY state")
        cmd = bson.SON([
            ("replSetTest", 1), ("waitForMemberState", 2),
            ("timeoutMillis",
             fixture_interface.ReplFixture.AWAIT_REPL_TIMEOUT_FOREVER_MINS *
             60 * 1000)
        ])
        while True:
            try:
                sync_node_conn.admin.command(cmd)
                break
            except pymongo.errors.OperationFailure as err:
                if (err.code != self.INTERRUPTED_DUE_TO_REPL_STATE_CHANGE and
                        err.code != self.INTERRUPTED_DUE_TO_STORAGE_CHANGE):
                    raise
                msg = (
                    "Interrupted while waiting for node to reach secondary state, retrying: {}"
                ).format(err)
                self.logger.error(msg)

        # Run data validation and dbhash checking.
        self._js_test_case.run_test()
    def _await_secondary_state(self, secondary):
        client = secondary.mongo_client()
        while True:
            try:
                client.admin.command(
                    bson.SON([
                        ("replSetTest", 1),
                        ("waitForMemberState", 2),  # 2 = SECONDARY
                        ("timeoutMillis",
                         fixture.ReplFixture.AWAIT_REPL_TIMEOUT_FOREVER_MINS *
                         60 * 1000)
                    ]))
                break
            except pymongo.errors.OperationFailure as err:
                if (err.code != self.INTERRUPTED_DUE_TO_REPL_STATE_CHANGE and
                        err.code != self.INTERRUPTED_DUE_TO_STORAGE_CHANGE):
                    self.logger.exception(
                        "mongod on port %d failed to reach state SECONDARY after %d seconds",
                        secondary.port,
                        fixture.ReplFixture.AWAIT_REPL_TIMEOUT_FOREVER_MINS *
                        60)
                    raise errors.ServerFailure(
                        "mongod on port {} failed to reach state SECONDARY after {} seconds: {}"
                        .format(
                            secondary.port,
                            fixture.ReplFixture.AWAIT_REPL_TIMEOUT_FOREVER_MINS
                            * 60, err.args[0]))

                msg = (
                    "Interrupted while waiting for node to reach secondary state, retrying: {}"
                ).format(err)
                self.logger.error(msg)
Esempio n. 10
0
 def command(self, db, cmd, value=1, **kwargs):
     p = Promise()
     cmdson = bson.SON([(cmd, value)])
     cmdson.update(kwargs)
     reqid = self._sonquery('{0}.$cmd'.format(db), cmdson, limit=1)
     self.pqs[reqid] = (p, '{0}.$cmd'.format(db), [])
     return p
Esempio n. 11
0
    def after_test(self, test, test_report):
        self.tests_run += 1
        sync_node = self.fixture.get_initial_sync_node();
        sync_node_conn = utils.new_mongo_client(port=sync_node.port)

        if self.tests_run >= self.n:
            self.tests_run = 0
            teardown_success = sync_node.teardown()

            self.logger.info("Starting the initial sync node back up again...")
            sync_node.setup()
            sync_node.await_ready()

            # Do initial sync round.
            self.logger.info("Waiting for initial sync node to go into SECONDARY state")
            cmd = bson.SON([("replSetTest", 1),
                            ("waitForMemberState", 2),
                            ("timeoutMillis", 20 * 60 * 1000)])
            try:
                sync_node_conn.admin.command(cmd)
            except self.hook_test_case.failureException as err:
                self.logger.exception("{0} failed".format(description))
                test_report.addFailure(self.hook_test_case, sys.exc_info())
                raise errors.TestFailure(err.args[0])

            # Run data validation and dbhash checking.
            JsCustomBehavior.after_test(self, test, test_report)

            if not teardown_success:
                raise errors.TestFailure("%s did not exit cleanly" % (sync_node))
Esempio n. 12
0
def parse_router_list_search_query(args):
    query_usr = bson.SON()
    if "q" in args:
        for word in args["q"].strip().split(" "):
            if not ':' in word:
                key = "hostname"
                value = word
            else:
                key, value = word.split(':', 1)
            if key in allowed_filters:
                query_usr[key] = query_usr.get(key, "") + value
    query = {}
    for key, value in query_usr.items():
        if value == "EXISTS":
            query[key] = {"$exists": True}
        elif value == "EXISTS_NOT":
            query[key] = {"$exists": False}
        elif key == 'netifs.mac':
            query[key] = value.lower()
        elif key == 'hostname':
            query[key] = {"$regex": value.replace('.', '\.'), "$options": 'i'}
        elif key == 'hardware.name':
            query[key] = {
                "$regex": value.replace('.', '\.').replace('_', ' '),
                "$options": 'i'
            }
        elif key == 'netmon_id':
            query[key] = int(value)
        else:
            query[key] = value
    return (query, format_query(query_usr))
Esempio n. 13
0
async def search_proposed_tasks(db,
                                sort_by='id',
                                sort_order='asc',
                                offset=None,
                                limit=None):
    if sort_by == 'id':
        sort_by = '_id'
    if sort_order == 'asc':
        sort_order = pymongo.ASCENDING
    else:
        sort_order = pymongo.DESCENDING
    pipeline = [
        {
            '$project': {
                'title': '$title',
                'difficulty': '$difficulty',
                'solved_by': '$solved_by'
            }
        },
        {
            '$sort': bson.SON([(sort_by, sort_order)])
        },
    ]
    if offset:
        pipeline.append({'$skip': offset})
    if limit:
        pipeline.append({'$limit': limit})
    cursor = db.proposed_tasks.aggregate(pipeline)
    tasks = []
    async for task in cursor:
        tasks.append(task)
    return {'status': 'ok', 'tasks': tasks}
Esempio n. 14
0
def len_val_fn(value):
    """ This creates a SON pair of the type {len:len(value), val:value}, with the len first so lexicographic ordering works.
        WATCH OUT however as later manipulations of the database are likely to mess up this ordering if not careful.
        For this, use order_values below.
        Later we should implement SON_manipulators that insert and save safely.

        Detailed explanation: This is kind of a hack for mongodb:
        Mongo uses lexicographic(?) ordering on strings, which is not convenient when 
        strings are used to represent integers (necessary because of large integers).
        For instance, it would not compare properly a generic 2 character/digit
        integer and a 10 character/digit one. This means we lose the ability to
        perform some range queries easily with mongo syntax.
        The solution we are using is to set up a SON ordered dict for this:
        If we had one of the field in our document called "Conductor":"342353223525",
        we replace that with "Conductor_plus":{"len": int(12), "value": "342353223525"}
        (12 is the length of that string)
        This SON object is ordered, so the "len" entry comes first.
        When comparing ordered dicts (=SON), mongo uses a recursive algorithm.
        At the ordered dict stage it uses lexicographic ordering on the keys.
        Inside each key,value pair it compares based on the default ordering of the value type.
        For "Conductor_plus", it will first compare on the length, and if those are equal
        compare on the strings. 
    """
    import bson
    return bson.SON([("len", len(value)), ("val", value)])
Esempio n. 15
0
    def _after_test_impl(self, test, test_report, description):
        self.tests_run += 1
        sync_node = self.fixture.get_initial_sync_node()
        sync_node_conn = utils.new_mongo_client(port=sync_node.port)

        # If it's been 'n' tests so far, wait for the initial sync node to finish syncing.
        if self.tests_run >= self.n:
            self.hook_test_case.logger.info(
                "%d tests have been run against the fixture, waiting for initial sync"
                " node to go into SECONDARY state", self.tests_run)
            self.tests_run = 0

            cmd = bson.SON([("replSetTest", 1), ("waitForMemberState", 2),
                            ("timeoutMillis", 20 * 60 * 1000)])
            sync_node_conn.admin.command(cmd)

        # Check if the initial sync node is in SECONDARY state. If it's been 'n' tests, then it
        # should have waited to be in SECONDARY state and the test should be marked as a failure.
        # Otherwise, we just skip the hook and will check again after the next test.
        try:
            state = sync_node_conn.admin.command("replSetGetStatus").get(
                "myState")
            if state != 2:
                if self.tests_run == 0:
                    msg = "Initial sync node did not catch up after waiting 20 minutes"
                    self.hook_test_case.logger.exception(
                        "{0} failed: {1}".format(description, msg))
                    raise errors.TestFailure(msg)

                self.hook_test_case.logger.info(
                    "Initial sync node is in state %d, not state SECONDARY (2)."
                    " Skipping BackgroundInitialSync hook for %s", state,
                    test.short_name())

                # If we have not restarted initial sync since the last time we ran the data
                # validation, restart initial sync with a 20% probability.
                if self.random_restarts < 1 and random.random() < 0.2:
                    hook_type = "resync" if self.use_resync else "initial sync"
                    self.hook_test_case.logger.info("randomly restarting " +
                                                    hook_type +
                                                    " in the middle of " +
                                                    hook_type)
                    self.__restart_init_sync(test_report, sync_node,
                                             sync_node_conn)
                    self.random_restarts += 1
                return
        except pymongo.errors.OperationFailure:
            # replSetGetStatus can fail if the node is in STARTUP state. The node will soon go into
            # STARTUP2 state and replSetGetStatus will succeed after the next test.
            self.hook_test_case.logger.info(
                "replSetGetStatus call failed in BackgroundInitialSync hook, skipping hook for %s",
                test.short_name())
            return

        self.random_restarts = 0

        # Run data validation and dbhash checking.
        self.hook_test_case.run_test()

        self.__restart_init_sync(test_report, sync_node, sync_node_conn)
Esempio n. 16
0
    def after_test(self, test, test_report):
        self.tests_run += 1
        # If we have not run 'n' tests yet, skip this hook.
        if self.tests_run < self.n:
            return
        self.tests_run = 0

        sync_node = self.fixture.get_initial_sync_node()
        sync_node_conn = utils.new_mongo_client(port=sync_node.port)
        description = "{0} after running '{1}'".format(self.description,
                                                       test.short_name())

        teardown_success = True
        if self.use_resync:
            self.fixture.logger.info("Calling resync on initial sync node...")
            cmd = bson.SON([("resync", 1)])
            try:
                sync_node_conn.admin.command(cmd)
            except pymongo.errors.OperationFailure as err:
                self.fixture.logger.exception("{0} failed".format(description))
                test_report.addFailure(self.hook_test_case, sys.exc_info())
                raise errors.TestFailure(err.args[0])
        else:
            teardown_success = sync_node.teardown()

            self.fixture.logger.info(
                "Starting the initial sync node back up again...")
            sync_node.setup()
            sync_node.await_ready()

        # Do initial sync round.
        self.fixture.logger.info(
            "Waiting for initial sync node to go into SECONDARY state")
        cmd = bson.SON([("replSetTest", 1), ("waitForMemberState", 2),
                        ("timeoutMillis", 20 * 60 * 1000)])
        try:
            sync_node_conn.admin.command(cmd)
        except pymongo.errors.OperationFailure as err:
            self.fixture.logger.exception("{0} failed".format(description))
            test_report.addFailure(self.hook_test_case, sys.exc_info())
            raise errors.TestFailure(err.args[0])

        # Run data validation and dbhash checking.
        JsCustomBehavior.after_test(self, test, test_report)

        if not teardown_success:
            raise errors.TestFailure("%s did not exit cleanly" % (sync_node))
    def test_deeply_nested_array(self):
        # arrays of length 1 are maintained when they are within another array
        dtype = np.dtype([("a", "(3,2,1)int32"), ("b", "(3,2,1)int32")])

        doc = bson.SON([("a", [[[9], [9]], [[8], [8]], [[7], [7]]]),
                        ("b", [[[6], [6]], [[5], [5]], [[4], [4]]])])

        utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)
        result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
        self.assertEqual(dtype, result.dtype)
        self.assertTrue(
            np.array_equal(
                result,
                np.array([([[[9], [9]], [[8], [8]], [[7], [7]]
                            ], [[[6], [6]], [[5], [5]], [[4], [4]]])], dtype)))

        dtype = np.dtype([("a", "(3,1)int32"), ("b", "(3,1)int32"),
                          ("c", "(3,1)int32")])

        doc = bson.SON([("a", [[9], [8], [7]]), ("b", [[6], [5], [4]]),
                        ("c", [[3], [2], [1]])])

        utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)
        result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
        self.assertEqual(dtype, result.dtype)
        self.assertTrue(
            np.array_equal(
                result,
                np.array([([[9], [8], [7]], [[6], [5], [4]], [[3], [2], [1]])],
                         dtype)))

        dtype = np.dtype([("a", "2int32")])
        doc = bson.SON([("a", [7, 7])])
        utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)
        result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
        self.assertEqual(dtype, result.dtype)
        self.assertTrue(np.array_equal(result, np.array([([7, 7], )], dtype)))

        dtype = np.dtype([("a", "(2,1,1,1)int32")])
        doc = bson.SON([("a", [[[[99]]], [[[88]]]])])
        utf8 = bson._dict_to_bson(doc, False, bson.DEFAULT_CODEC_OPTIONS)

        result = bsonnumpy.sequence_to_ndarray([utf8], dtype, 1)
        self.assertEqual(dtype, result.dtype)
        self.assertTrue(
            np.array_equal(result, np.array([([[[[99]]], [[[88]]]], )],
                                            dtype)))
Esempio n. 18
0
 def from_json(self, json_data):
     """Convert from JSON."""
     mongo_data = self.__cut_excluded_field(
         json.loads(json_data,
                    object_hook=generate_object_hook(self._document)),
         ["exclude_from_json"])
     return [
         self._document._from_son(bson.SON(data)) for data in mongo_data
     ]
Esempio n. 19
0
def order_values(doc, field, sub_fields=["len", "val"]):
    """ Retrieving a document then saving it messes up the ordering in SON documents. This allows you to take a document,
        retrieve a specific field, order it according to the order of sub_fields, and return a document with a SON in place,
        which can then be saved.
    """
    import bson
    tmp = doc[field]
    doc[field] = bson.SON([(sub_field, tmp[sub_field]) for sub_field in sub_fields])
    return doc
Esempio n. 20
0
 def query(self, coll, q, limit=0, fields=None, special={}):
     p = Promise()
     special.update({'$query': q})
     reqid = self._sonquery(coll,
                            bson.SON(special),
                            limit=limit,
                            fields=fields)
     self.pqs[reqid] = (p, coll, [])
     return p
Esempio n. 21
0
    def run_test(self):
        """Execute test hook."""
        sync_node = self.fixture.get_initial_sync_node()
        sync_node_conn = sync_node.mongo_client()

        # If it's been 'n' tests so far, wait for the initial sync node to finish syncing.
        if self._hook.tests_run >= self._hook.n:
            self.logger.info(
                "%d tests have been run against the fixture, waiting for initial sync"
                " node to go into SECONDARY state", self._hook.tests_run)
            self._hook.tests_run = 0

            cmd = bson.SON(
                [("replSetTest", 1), ("waitForMemberState", 2),
                 ("timeoutMillis",
                  fixture_interface.ReplFixture.AWAIT_REPL_TIMEOUT_FOREVER_MINS * 60 * 1000)])
            sync_node_conn.admin.command(cmd)

        # Check if the initial sync node is in SECONDARY state. If it's been 'n' tests, then it
        # should have waited to be in SECONDARY state and the test should be marked as a failure.
        # Otherwise, we just skip the hook and will check again after the next test.
        try:
            state = sync_node_conn.admin.command("replSetGetStatus").get("myState")

            if state != 2:
                if self._hook.tests_run == 0:
                    msg = "Initial sync node did not catch up after waiting 24 hours"
                    self.logger.exception("{0} failed: {1}".format(self._hook.description, msg))
                    raise errors.TestFailure(msg)

                self.logger.info(
                    "Initial sync node is in state %d, not state SECONDARY (2)."
                    " Skipping BackgroundInitialSync hook for %s", state, self._base_test_name)

                # If we have not restarted initial sync since the last time we ran the data
                # validation, restart initial sync with a 20% probability.
                if self._hook.random_restarts < 1 and random.random() < 0.2:
                    self.logger.info(
                        "randomly restarting initial sync in the middle of initial sync")
                    self.__restart_init_sync(sync_node)
                    self._hook.random_restarts += 1
                return
        except pymongo.errors.OperationFailure:
            # replSetGetStatus can fail if the node is in STARTUP state. The node will soon go into
            # STARTUP2 state and replSetGetStatus will succeed after the next test.
            self.logger.info(
                "replSetGetStatus call failed in BackgroundInitialSync hook, skipping hook for %s",
                self._base_test_name)
            return

        self._hook.random_restarts = 0

        # Run data validation and dbhash checking.
        self._js_test.run_test()

        self.__restart_init_sync(sync_node)
Esempio n. 22
0
    def test_values(self):
        schedule = bson.SON(SCHEDULE)
        call = ScheduledCall.from_db(schedule)

        result = call.as_dict()

        self.assertEqual(result['_id'], call.id)
        for k, v in SCHEDULE.items():
            self.assertEqual(v, result[k])
        self.assertTrue('next_run' in result)
Esempio n. 23
0
 def _any_near(self, node, near_name):
     shape = GeoShapeParser().handle(self.get_arg(node, 0))
     # use SON because mongo expects the command before the arguments
     result = bson.SON({near_name: shape})
     if len(node.args) > 1:
         distance = self.parse_arg(node, 1, IntField())  # meters
         if isinstance(shape, list):  # legacy coordinate pair
             result['$maxDistance'] = distance
         else:
             shape['$maxDistance'] = distance
     return result
Esempio n. 24
0
    def test_values(self):
        schedule = bson.SON(SCHEDULE)
        call = ScheduledCall.from_db(schedule)

        as_dict = call.as_dict()
        result = call.for_display()

        for k, v in result.items():
            if k not in ['schedule', 'iso_schedule']:
                self.assertEqual(v, as_dict[k])
        self.assertEqual(result['schedule'], as_dict['iso_schedule'])
Esempio n. 25
0
    def test_array_scalar_load3(self):
        # Test sub arrays with documents that have arrays
        son_docs = [
            bson.SON(
                [('x', [
                    bson.SON([('a', [i, i, i, i]),
                              ('b', [i, i, i, i])]),
                    bson.SON([('a', [-i, -i, -i, -i]),
                              ('b', [-i, -i, -i, -i])])
                ])]) for i in range(10)]

        raw_docs = [bson._dict_to_bson(
            doc, False, bson.DEFAULT_CODEC_OPTIONS) for doc in son_docs]
        sub_dtype = np.dtype(([('a', '4int32'), ('b', '4int32')], 2))
        dtype = np.dtype([('x', sub_dtype)])

        # Correct dtype
        with self.assertRaisesPattern(bsonnumpy.error,
                                      r'unsupported BSON type: Sub-document'):
            bsonnumpy.sequence_to_ndarray(raw_docs, dtype, 4)
Esempio n. 26
0
 def test_incorrect_sub_dtype_array7(self):
     # Sub array too short
     bad_doc = bson.SON([("x", [['d' * 1, 'd' * 2], ['d' * 4, 'd' * 5]]),
                         ("y", [['d' * 7, 'd' * 8], ['d' * 10, 'd' * 11]])])
     bad_raw_docs = self.raw_docs[:3]
     bad_raw_docs.append(
         bson._dict_to_bson(bad_doc, False, bson.DEFAULT_CODEC_OPTIONS))
     with self.assertRaisesPattern(
             bsonnumpy.error,
             "invalid document: array is of incorrect length"):
         bsonnumpy.sequence_to_ndarray(bad_raw_docs, self.dtype, 4)
Esempio n. 27
0
 def test_incorrect_sub_dtype_array1(self):
     # Top document missing key
     bad_doc = bson.SON([("x", [['d' * 1, 'd' * 2, 'd' * 3],
                                ['d' * 4, 'd' * 5, 'd' * 6]]),
                         ("bad_key", [['d' * 7, 'd' * 7, 'd' * 9],
                                      ['d' * 10, 'd' * 11, 'd' * 12]])])
     bad_raw_docs = self.raw_docs[:3]
     bad_raw_docs.append(
         bson._dict_to_bson(bad_doc, False, bson.DEFAULT_CODEC_OPTIONS))
     with self.assertRaisesPattern(bsonnumpy.error,
                                   "document does not match dtype"):
         bsonnumpy.sequence_to_ndarray(bad_raw_docs, self.dtype, 4)
Esempio n. 28
0
def _json_convert(obj):
    """Recursive helper method that converts BSON types so they can be
    converted into json.
    """
    if hasattr(obj, 'iteritems') or hasattr(obj, 'items'):  # PY3 support
        return bson.SON(((k, _json_convert(v)) for k, v in obj.iteritems()))
    elif hasattr(obj, '__iter__') and not isinstance(obj, basestring):
        return list((_json_convert(v) for v in obj))
    try:
        return json_default(obj)
    except TypeError:
        return obj
Esempio n. 29
0
    def __restart_init_sync(self, test_report, sync_node, sync_node_conn):
        if self.use_resync:
            self.hook_test_case.logger.info("Calling resync on initial sync node...")
            cmd = bson.SON([("resync", 1), ("wait", 0)])
            sync_node_conn.admin.command(cmd)
        else:
            # Tear down and restart the initial sync node to start initial sync again.
            sync_node.teardown()

            self.hook_test_case.logger.info("Starting the initial sync node back up again...")
            sync_node.setup()
            sync_node.await_ready()
Esempio n. 30
0
def parse_router_list_search_query(args):
    query_usr = bson.SON()
    if "q" in args:
        for word in args["q"].strip().split(" "):
            if not word:
                # Case of "q=" without arguments
                break
            if not ':' in word:
                key = "hostname"
                value = word
            else:
                key, value = word.split(':', 1)
            if key in allowed_filters:
                query_usr[key] = query_usr.get(key, "") + value
    s = ""
    j = ""
    t = []
    i = 0
    for key, value in query_usr.items():
        if i == 0:
            prefix = " WHERE "
        else:
            prefix = " AND "
        if value.startswith('!'):
            no = "NOT "
            value = value[1:]
        else:
            no = ""

        if value == "EXISTS":
            k = key + ' <> "" AND ' + key + " IS NOT NULL"
        elif value == "EXISTS_NOT":
            k = key + ' = "" OR ' + key + " IS NULL"
        elif key == 'mac':
            j += " INNER JOIN ( SELECT router, mac FROM router_netif GROUP BY router, mac) AS j ON router.id = j.router "
            k = "mac {} REGEXP %s".format(no)
            t.append(value.lower())
        elif (key == 'hardware') or (key == 'hood'):
            k = key + " {} REGEXP %s".format(no)
            t.append(value.replace("_", "."))
        elif (key == 'hostname') or (key == 'firmware'):
            k = key + " {} REGEXP %s".format(no)
            t.append(value)
        elif key == 'contact':
            k = "contact {} REGEXP %s".format(no)
            t.append(value)
        else:
            k = no + key + " = %s"
            t.append(value)
        i += 1
        s += prefix + k
    where = j + " " + s
    return (where, tuple(t), format_query(query_usr))