コード例 #1
0
    def _configureCallback(self, reply, proto):
        """
            Handle the reply from the "ismaster" query. The reply contains
            configuration information about the peer.
            """
        # Make sure we got a result document.
        if len(reply.documents) != 1:
            proto.fail(errors.OperationFailure('Invalid document length.'))
            return

        # Get the configuration document from the reply.
        config = reply.documents[0].decode()

        # Make sure the command was successful.
        if not config.get('ok'):
            code = config.get('code')
            msg = config.get('err', 'Unknown error')
            proto.fail(errors.OperationFailure(msg, code))
            return

        # Check that the replicaSet matches.
        set_name = config.get('setName')
        expected_set_name = self.uri['options'].get('setname')
        if expected_set_name and (expected_set_name != set_name):
            # Log the invalid replica set failure.
            msg = 'Mongo instance does not match requested replicaSet.'
            reason = pymongo.errros.ConfigurationError(msg)
            proto.fail(reason)
            return

        # Track max bson object size limit.
        max_bson_size = config.get('maxBsonObjectSize')
        if max_bson_size:
            proto.max_bson_size = max_bson_size

        # Track the other hosts in the replica set.
        hosts = config.get('hosts')
        if isinstance(hosts, list) and hosts:
            hostaddrs = []
            for host in hosts:
                if ':' not in host:
                    host = (host, 27017)
                else:
                    host = host.split(':', 1)
                    host[1] = int(host[1])
                hostaddrs.append(host)
            self.__discovered = hostaddrs

        # Check if this node is the master.
        ismaster = config.get('ismaster')
        if not ismaster:
            reason = pymongo.errors.AutoReconnect('not master')
            proto.fail(reason)
            return

        # Notify deferreds waiting for completion.
        self.setInstance(instance=proto)
コード例 #2
0
    def test_no_use_case_if_no_log(
            self, mock_verify_oauth2_token: mock.MagicMock) -> None:
        """Do not create a use-case if there's no write access to the log table."""

        mock_verify_oauth2_token.return_value = {
            'iss': 'accounts.google.com',
            'email': '*****@*****.**',
            'sub': '12345',
        }

        self.create_user(email='*****@*****.**')

        real_insert_one = self._eval_db.email_requests.insert_one

        def _cleanup() -> None:
            self._eval_db.email_requests.insert_one = real_insert_one  # type: ignore

        self.addCleanup(_cleanup)

        mock_insert_one = mock.MagicMock()
        self._eval_db.email_requests.insert_one = mock_insert_one  # type: ignore
        mock_insert_one.side_effect = errors.OperationFailure(
            'No write access to the database.')

        response = self.app.post('/api/eval/use-case/create',
                                 headers={'Authorization': 'Bearer blabla'},
                                 data=json.dumps({'email': '*****@*****.**'}))
        self.assertEqual(401, response.status_code)
コード例 #3
0
ファイル: protocol.py プロジェクト: VoiSmart/txmongo
    def getlasterror(self, db):
        command = {'getlasterror': 1}
        db = '%s.$cmd' % db.split('.', 1)[0]
        uri = self.factory.uri
        if 'w' in uri['options']:
            command['w'] = int(uri['options']['w'])
        if 'wtimeoutms' in uri['options']:
            command['wtimeout'] = int(uri['options']['wtimeoutms'])
        if 'fsync' in uri['options']:
            command['fsync'] = bool(uri['options']['fsync'])
        if 'journal' in uri['options']:
            command['journal'] = bool(uri['options']['journal'])

        query = Query(collection=db, query=command)
        reply = yield self.send_QUERY(query)

        assert len(reply.documents) == 1

        document = reply.documents[0].decode()
        err = document.get('err', None)
        code = document.get('code', None)

        if err is not None:
            if code == 11000:
                raise errors.DuplicateKeyError(err, code=code)
            else:
                raise errors.OperationFailure(err, code=code)

        defer.returnValue(document)
コード例 #4
0
 def test_retry_until_ok_authorization_mongodb_24(self):
     """Test retry_until_ok does not mask authorization failures in
     MongoDB 2.4.
     """
     with self.assertRaises(errors.OperationFailure):
         retry_until_ok(
             err_func,
             errors.OperationFailure('', details={'errmsg':
                                                  'unauthorized'}))
     self.assertEqual(err_func.counter, 1)
コード例 #5
0
ファイル: collection.py プロジェクト: astanz/txmongo
    def aggregate(self, pipeline, full_response=False):
        if not isinstance(pipeline, types.ListType):
            raise TypeError("pipeline must be an instance of list")
        result = yield self._database['$cmd'].find_one({'aggregate': self._collection_name, 'pipeline': pipeline})
        if not result.get('ok', False):
            raise errors.OperationFailure(result.get('errmsg', 'An unknown error has occured'), code=result.get('code', None))

        if full_response:
            defer.returnValue(result)
        if 'result' in result:
            defer.returnValue(result['result'])
        defer.returnValue([])
コード例 #6
0
ファイル: setup.py プロジェクト: zkbupt/monkey
def try_store_mitigations_on_mongo():
    mitigation_collection_name = AttackMitigations.COLLECTION_NAME
    try:
        mongo.db.validate_collection(mitigation_collection_name)
        if mongo.db.attack_mitigations.count() == 0:
            raise errors.OperationFailure(
                "Mitigation collection empty. Try dropping the collection and running again"
            )
    except errors.OperationFailure:
        try:
            mongo.db.create_collection(mitigation_collection_name)
        except errors.CollectionInvalid:
            pass
        finally:
            store_mitigations_on_mongo()
コード例 #7
0
ファイル: protocol.py プロジェクト: VoiSmart/txmongo
 def handle_REPLY(self, request):
     if request.response_to in self.__deferreds:
         df = self.__deferreds.pop(request.response_to)
         if request.response_flags & REPLY_QUERY_FAILURE:
             doc = request.documents[0].decode()
             code = doc.get('code')
             msg = doc.get('$err', 'Unknown error')
             fail_conn = False
             if code == 13435:
                 err = errors.AutoReconnect(msg)
                 fail_conn = True
             else:
                 err = errors.OperationFailure(msg, code)
             df.errback(err)
             if fail_conn:
                 self.transport.loseConnection()
         else:
             df.callback(request)
コード例 #8
0
 def test_retry_until_ok_authorization(self):
     """Test retry_until_ok does not mask authorization failures.
     """
     with self.assertRaises(errors.OperationFailure):
         retry_until_ok(err_func, errors.OperationFailure("", 13, None))
     self.assertEqual(err_func.counter, 1)
コード例 #9
0
 def test_retry_until_ok_operation_failure(self):
     """Test retry_until_ok retries on PyMongo OperationFailure.
     """
     self.assertTrue(retry_until_ok(err_func, errors.OperationFailure("")))
     self.assertEqual(err_func.counter, 3)
コード例 #10
0
ファイル: db_interface.py プロジェクト: donnate/tfutils-1
    def load_from_db(self,
                     query,
                     cache_filters=False,
                     collfs=None,
                     collfs_recent=None):
        """Load checkpoint from the database.

        Checks the recent and regular checkpoint fs to find the latest one
        matching the query. Returns the GridOut obj corresponding to the
        record.

        Args:
            query: dict expressing MongoDB query
        """
        if collfs is None:
            collfs = self.collfs
        coll = collfs._GridFS__files
        if collfs_recent is None:
            collfs_recent = self.collfs_recent
        coll_recent = collfs_recent._GridFS__files

        query['saved_filters'] = True
        count = collfs.find(query).count()
        if count > 0:  # get latest that matches query
            ckpt_record = coll.find(query, sort=[('uploadDate', -1)])[0]
            loading_from = coll
        else:
            ckpt_record = None

        try:
            count_recent = collfs_recent.find(query).count()
        except Exception as inst:
            raise er.OperationFailure(
                inst.args[0] +
                "\n Is your dbname too long? Mongo requires that dbnames be no longer than 64 characters."
            )
        if count_recent > 0:  # get latest that matches query
            ckpt_record_recent = coll_recent.find(query,
                                                  sort=[('uploadDate', -1)])[0]
            # use the record with latest timestamp
            if ckpt_record is None or ckpt_record_recent[
                    'uploadDate'] > ckpt_record['uploadDate']:
                loading_from = coll_recent
                ckpt_record = ckpt_record_recent

        if count + count_recent == 0:  # no matches for query
            log.warning('No matching checkpoint for query "{}"'.format(
                repr(query)))
            return

        database = loading_from._Collection__database
        log.info('Loading checkpoint from %s' % loading_from.full_name)

        if cache_filters:
            filename = os.path.basename(ckpt_record['filename'])
            cache_filename = os.path.join(self.cache_dir, filename)

            # check if there is no local copy
            if not os.path.isfile(cache_filename):
                log.info('No cache file at %s, loading from DB' %
                         cache_filename)
                # create new file to write from gridfs
                load_dest = open(cache_filename, "w+")
                load_dest.close()
                load_dest = open(cache_filename, 'rwb+')
                fsbucket = gridfs.GridFSBucket(
                    database, bucket_name=loading_from.name.split('.')[0])
                fsbucket.download_to_stream(ckpt_record['_id'], load_dest)
                load_dest.close()
                if ckpt_record[
                        '_saver_write_version'] == saver_pb2.SaverDef.V2:
                    assert cache_filename.endswith('.tar')
                    tar = tarfile.open(cache_filename)
                    tar.extractall(path=self.cache_dir)
                    tar.close()
                    cache_filename = os.path.splitext(cache_filename)[0]
                    verify_pb2_v2_files(cache_filename, ckpt_record)
            else:
                if ckpt_record[
                        '_saver_write_version'] == saver_pb2.SaverDef.V2:
                    cache_filename = os.path.splitext(cache_filename)[0]
                    verify_pb2_v2_files(cache_filename, ckpt_record)
                log.info('Cache file found at %s, using that to load' %
                         cache_filename)
        else:
            cache_filename = None
        return ckpt_record, cache_filename