示例#1
0
    def __init__(self):
        self.name = None
        self._indices = []
        self._textIndex = None

        self.initialize()

        assert type(self.name) == str

        db_cfg = getDbConfig()
        db_connection = getDbConnection()
        if cherrypy.config['server']['mode'] == 'testing':
            dbName = '%s_test' % db_cfg['database']
        else:
            dbName = db_cfg['database']  # pragma: no cover
        self.database = db_connection[dbName]
        self.collection = self.database[self.name]

        assert isinstance(self.collection, pymongo.collection.Collection)
        assert type(self._indices) == list

        for index in self._indices:
            self.collection.ensure_index(index)

        if type(self._textIndex) is dict:
            textIdx = [(k, 'text') for k in self._textIndex.keys()]
            self.collection.ensure_index(
                textIdx, weights=self._textIndex,
                default_language=self._textLanguage)
示例#2
0
    def __init__(self):
        self.name = None
        self._indices = []
        self._textIndex = None
        self._textLanguage = None

        self._filterKeys = {
            AccessType.READ: set(),
            AccessType.WRITE: set(),
            AccessType.ADMIN: set(),
            AccessType.SITE_ADMIN: set()
        }

        self.initialize()

        db_connection = getDbConnection()
        self.database = db_connection.get_default_database()
        self.collection = MongoProxy(self.database[self.name])

        for index in self._indices:
            if isinstance(index, (list, tuple)):
                self.collection.ensure_index(index[0], **index[1])
            else:
                self.collection.ensure_index(index)

        if type(self._textIndex) is dict:
            textIdx = [(k, 'text') for k in self._textIndex.keys()]
            try:
                self.collection.ensure_index(
                    textIdx,
                    weights=self._textIndex,
                    default_language=self._textLanguage)
            except pymongo.errors.OperationFailure:
                print(
                    TerminalColor.warning('WARNING: Text search not enabled.'))
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     self.assetstore = assetstore
     try:
         self.chunkColl = getDbConnection(
             assetstore.get('mongohost', None),
             assetstore.get('replicaset', None))[assetstore['db']]['chunk']
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
         return
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
         return
     self.chunkColl.ensure_index([
         ('uuid', pymongo.ASCENDING),
         ('n', pymongo.ASCENDING)
     ], unique=True)
示例#4
0
    def __init__(self):
        self.name = None
        self._indices = []
        self._textIndex = None
        self._textLanguage = None

        self.initialize()

        db_cfg = getDbConfig()
        db_connection = getDbConnection()
        dbName = db_cfg['database']
        self.database = db_connection[dbName]
        self.collection = self.database[self.name]

        for index in self._indices:
            if isinstance(index, (list, tuple)):
                self.collection.ensure_index(index[0], **index[1])
            else:
                self.collection.ensure_index(index)

        if type(self._textIndex) is dict:
            textIdx = [(k, 'text') for k in self._textIndex.keys()]
            try:
                self.collection.ensure_index(
                    textIdx,
                    weights=self._textIndex,
                    default_language=self._textLanguage)
            except pymongo.errors.OperationFailure:
                print(
                    TerminalColor.warning('WARNING: Text search not enabled.'))
示例#5
0
文件: base.py 项目: chapmanbe/girder
def dropGridFSDatabase(dbName):
    """
    Clear all contents from a gridFS database used as an assetstore.
    :param dbName: the name of the database to drop.
    """
    db_connection = getDbConnection()
    db_connection.drop_database(dbName)
示例#6
0
    def runImageSimilaritySearch(self, params):
        assert hasattr(self, 'search_url')
        classifications = json.loads(params['classifications']) if 'classifications' in params else []
        params['n'] = params['n'] if 'n' in params else str(DEFAULT_PAGE_SIZE)
        smqtk_r = requests.get(self.search_url + '/n=' + params['n'] + '/' + params['url']).json()
        neighbors_to_distances = dict(zip(smqtk_r['neighbors'], smqtk_r['distances']))

        db = getDbConnection().get_default_database()
        mapped_paths = db[os.environ['IMAGE_SPACE_SMQTK_MAP_COLLECTION']].find({
            'sha': {
                '$in': smqtk_r['neighbors']
            }
        })
        solr_id_to_shas = {os.environ['IMAGE_SPACE_SOLR_PREFIX'] + '/' + x['path']: x['sha'] for x in mapped_paths}
        documents = solr_documents_from_paths(solr_id_to_shas.keys(), classifications)

        for document in documents:
            document['im_distance'] = neighbors_to_distances[solr_id_to_shas[document['id']]]

        if 'near_duplicates' in params and int(params['near_duplicates']) == 1:
            documents = [x for x in documents if x['im_distance'] <= NEAR_DUPLICATES_THRESHOLD]

        return {
            'numFound': len(documents),
            'docs': documents
        }
    def validateInfo(doc):
        """
        Validate the assetstore -- make sure we can connect to it and that the
        necessary indexes are set up.
        """
        if not doc.get('db', ''):
            raise ValidationException('Database name must not be empty.', 'db')
        if '.' in doc['db'] or ' ' in doc['db']:
            raise ValidationException('Database name cannot contain spaces'
                                      ' or periods.', 'db')

        chunkColl = getDbConnection(
            doc.get('mongohost', None), doc.get('replicaset', None),
            autoRetry=False, serverSelectionTimeoutMS=10000)[doc['db']].chunk

        try:
            chunkColl.create_index([
                ('uuid', pymongo.ASCENDING),
                ('n', pymongo.ASCENDING)
            ], unique=True)
        except pymongo.errors.ServerSelectionTimeoutError as e:
            raise ValidationException(
                'Could not connect to the database: %s' % str(e))

        return doc
示例#8
0
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     self.assetstore = assetstore
     try:
         self.chunkColl = getDbConnection(
             assetstore.get('mongohost', None),
             assetstore.get('replicaset', None))[assetstore['db']]['chunk']
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
         return
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
         return
     self.chunkColl.ensure_index([
         ('uuid', pymongo.ASCENDING),
         ('n', pymongo.ASCENDING)
     ], unique=True)
示例#9
0
    def validateInfo(doc):
        """
        Validate the assetstore -- make sure we can connect to it and that the
        necessary indexes are set up.
        """
        if not doc.get('db', ''):
            raise ValidationException('Database name must not be empty.', 'db')
        if '.' in doc['db'] or ' ' in doc['db']:
            raise ValidationException(
                'Database name cannot contain spaces'
                ' or periods.', 'db')

        try:
            chunkColl = getDbConnection(
                doc.get('mongohost', None),
                doc.get('replicaset', None),
                autoRetry=False,
                serverSelectionTimeoutMS=10000)[doc['db']].chunk
            chunkColl.create_index([('uuid', pymongo.ASCENDING),
                                    ('n', pymongo.ASCENDING)],
                                   unique=True)
        except pymongo.errors.ServerSelectionTimeoutError as e:
            raise ValidationException('Could not connect to the database: %s' %
                                      str(e))

        return doc
示例#10
0
    def reconnect(self):
        """
        Reconnect to the database and rebuild indices if necessary. Users should
        typically not have to call this method.
        """
        db_connection = getDbConnection()
        self.database = db_connection.get_default_database()
        self.collection = MongoProxy(self.database[self.name])

        for index in self._indices:
            if isinstance(index, (list, tuple)):
                self.collection.ensure_index(index[0], **index[1])
            else:
                self.collection.ensure_index(index)

        if type(self._textIndex) is dict:
            textIdx = [(k, 'text') for k in self._textIndex.keys()]
            try:
                self.collection.ensure_index(
                    textIdx,
                    weights=self._textIndex,
                    default_language=self._textLanguage)
            except pymongo.errors.OperationFailure:
                print(
                    TerminalColor.warning('WARNING: Text search not enabled.'))
示例#11
0
文件: model_base.py 项目: cjh1/girder
    def __init__(self):
        self.name = None
        self._indices = []
        self._textIndex = None
        self._textLanguage = None

        self.initialize()

        db_cfg = getDbConfig()
        db_connection = getDbConnection()
        dbName = db_cfg['database']
        self.database = db_connection[dbName]
        self.collection = self.database[self.name]

        for index in self._indices:
            if isinstance(index, (list, tuple)):
                self.collection.ensure_index(index[0], **index[1])
            else:
                self.collection.ensure_index(index)

        if type(self._textIndex) is dict:
            textIdx = [(k, 'text') for k in self._textIndex.keys()]
            try:
                self.collection.ensure_index(
                    textIdx, weights=self._textIndex,
                    default_language=self._textLanguage)
            except pymongo.errors.OperationFailure:
                print(
                    TerminalColor.warning('WARNING: Text search not enabled.'))
示例#12
0
    def __init__(self):
        self.name = None
        self._indices = []
        self._textIndex = None
        self._textLanguage = None

        self._filterKeys = {
            AccessType.READ: set(),
            AccessType.WRITE: set(),
            AccessType.ADMIN: set(),
            AccessType.SITE_ADMIN: set()
        }

        self.initialize()

        db_connection = getDbConnection()
        self.database = db_connection.get_default_database()
        self.collection = MongoProxy(self.database[self.name])

        for index in self._indices:
            if isinstance(index, (list, tuple)):
                self.collection.ensure_index(index[0], **index[1])
            else:
                self.collection.ensure_index(index)

        if type(self._textIndex) is dict:
            textIdx = [(k, 'text') for k in self._textIndex.keys()]
            try:
                self.collection.ensure_index(
                    textIdx, weights=self._textIndex,
                    default_language=self._textLanguage)
            except pymongo.errors.OperationFailure:
                print(
                    TerminalColor.warning('WARNING: Text search not enabled.'))
示例#13
0
    def testGridFsAssetstore(self):
        """
        Test usage of the GridFS assetstore type.
        """
        # Clear the assetstore database
        conn = getDbConnection()
        conn.drop_database('girder_assetstore_test')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createGridFsAssetstore(
            name='Test', db='girder_assetstore_test')
        self.assetstore = assetstore

        chunkColl = conn['girder_assetstore_test']['chunk']

        # Upload the two-chunk file
        file = self._testUploadFile('helloWorld1.txt')
        hash = sha512(chunk1 + chunk2).hexdigest()
        self.assertEqual(hash, file['sha512'])

        # We should have two chunks in the database
        self.assertEqual(chunkColl.find({'uuid': file['chunkUuid']}).count(), 2)

        self._testDownloadFile(file, chunk1 + chunk2)
        self._testDownloadFolder()

        # Delete the file, make sure chunks are gone from database
        self._testDeleteFile(file)
        self.assertEqual(chunkColl.find({'uuid': file['chunkUuid']}).count(), 0)

        empty = self._testEmptyUpload('empty.txt')
        self.assertEqual(sha512().hexdigest(), empty['sha512'])
        self._testDownloadFile(empty, '')
        self._testDeleteFile(empty)
示例#14
0
文件: base.py 项目: cvlucian/girder
def dropGridFSDatabase(dbName):
    """
    Clear all contents from a gridFS database used as an assetstore.
    :param dbName: the name of the database to drop.
    """
    db_connection = getDbConnection()
    db_connection.drop_database(dbName)
示例#15
0
def db(request):
    """
    Require a Mongo test database.

    Provides a Mongo test database named after the requesting test function. Mongo databases are
    created/destroyed based on the URI provided with the --mongo-uri option and tear-down
    behavior is modified by the --keep-db option.
    """
    from girder.models import _dbClients, getDbConnection, pymongo
    from girder.models import model_base
    from girder.models.user import User
    from girder.external import mongodb_proxy

    mockDb = request.config.getoption('--mock-db')
    dbUri = request.config.getoption('--mongo-uri')
    dbName = 'girder_test_%s' % hashlib.md5(_uid(
        request.node).encode('utf8')).hexdigest()
    keepDb = request.config.getoption('--keep-db')
    executable_methods = mongodb_proxy.EXECUTABLE_MONGO_METHODS
    realMongoClient = pymongo.MongoClient

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = set()
        pymongo.MongoClient = mongomock.MongoClient

    connection = getDbConnection(uri='%s/%s' % (dbUri, dbName), quiet=False)

    # Force getDbConnection from models to return our connection
    _dbClients[(None, None)] = connection

    connection.drop_database(dbName)

    # Since models store a local reference to the current database, we need to force them all to
    # reconnect
    for model in model_base._modelSingletons:
        model.reconnect()

    # Use faster password hashing to avoid unnecessary testing bottlenecks. Any test case
    # that creates a user goes through the password hashing process, so we avoid actual bcrypt.
    originalCryptContext = User()._cryptContext
    User()._cryptContext = originalCryptContext.copy(schemes=['plaintext'])

    yield connection

    User()._cryptContext = originalCryptContext

    if not keepDb:
        connection.drop_database(dbName)

    connection.close()

    # Clear connection cache and model singletons
    _dbClients.clear()
    for model in model_base._modelSingletons:
        model.__class__._instance = None

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = executable_methods
        pymongo.MongoClient = realMongoClient
示例#16
0
文件: base.py 项目: luisibanez/girder
def dropTestDatabase():
    """
    Call this to clear all contents from the test database.
    """
    from girder.models import getDbConnection
    db_connection = getDbConnection()
    db_connection.drop_database('%s_test' %
                                cherrypy.config['database']['database'])
示例#17
0
    def testGridFsAssetstore(self):
        """
        Test usage of the GridFS assetstore type.
        """
        # Must also lower GridFS's internal chunk size to support our small chunks
        gridfs_assetstore_adapter.CHUNK_SIZE, old = 6, gridfs_assetstore_adapter.CHUNK_SIZE

        # Clear any old DB data
        base.dropGridFSDatabase('girder_test_file_assetstore')
        # Clear the assetstore database
        conn = getDbConnection()
        conn.drop_database('girder_test_file_assetstore')

        Assetstore().remove(Assetstore().getCurrent())
        assetstore = Assetstore().createGridFsAssetstore(
            name='Test', db='girder_test_file_assetstore')
        self.assetstore = assetstore

        chunkColl = conn['girder_test_file_assetstore']['chunk']

        # Upload the two-chunk file
        file = self._testUploadFile('helloWorld1.txt')
        hash = sha512(chunkData).hexdigest()
        file = File().load(file['_id'], force=True)
        self.assertEqual(hash, file['sha512'])

        # The file should have no local path
        self.assertRaises(FilePathException, File().getLocalFilePath, file)

        # We should have two chunks in the database
        self.assertEqual(
            chunkColl.find({
                'uuid': file['chunkUuid']
            }).count(), 2)

        self._testDownloadFile(file, chunk1 + chunk2)

        # Reset chunk size so the large file testing isn't horribly slow
        gridfs_assetstore_adapter.CHUNK_SIZE = old

        self._testDownloadFolder()
        self._testDownloadCollection()

        # Delete the file, make sure chunks are gone from database
        self._testDeleteFile(file)
        self.assertEqual(
            chunkColl.find({
                'uuid': file['chunkUuid']
            }).count(), 0)

        empty = self._testEmptyUpload('empty.txt')
        self.assertEqual(sha512().hexdigest(), empty['sha512'])
        self._testDownloadFile(empty, '')
        self._testDeleteFile(empty)

        # Test copying a file
        copyTestFile = self._testUploadFile('helloWorld1.txt')
        self._testCopyFile(copyTestFile)
示例#18
0
文件: system.py 项目: manthey/girder
def _computeSlowStatus(process, status, db):
    status["diskPartitions"] = [_objectToDict(part) for part in psutil.disk_partitions()]
    try:
        # This fails in travis's environment, so guard it
        status["diskIO"] = _objectToDict(psutil.disk_io_counters())
    except Exception:
        pass
    # Report on the disk usage where the script is located
    if hasattr(girder, "__file__"):
        status["girderPath"] = os.path.abspath(girder.__file__)
        status["girderDiskUsage"] = _objectToDict(psutil.disk_usage(status["girderPath"]))
    # Report where our logs are and how much space is available for them
    status["logs"] = []
    for handler in logger.handlers:
        try:
            logInfo = {"path": handler.baseFilename}
            logInfo["diskUsage"] = _objectToDict(psutil.disk_usage(logInfo["path"]))
            status["logs"].append(logInfo)
        except Exception:
            # If we can't read information about the log, don't throw an
            # exception
            pass
    status["mongoDbStats"] = db.command("dbStats")
    try:
        # I don't know if this will work with a sharded database, so guard
        # it and don't throw an exception
        status["mongoDbPath"] = getDbConnection().admin.command("getCmdLineOpts")["parsed"]["storage"]["dbPath"]
        status["mongoDbDiskUsage"] = _objectToDict(psutil.disk_usage(status["mongoDbPath"]))
    except Exception:
        pass

    status["processDirectChildrenCount"] = len(process.children())
    status["processAllChildrenCount"] = len(process.children(True))
    status["openFiles"] = [_objectToDict(file) for file in process.open_files()]
    # I'd rather see textual names for the family and type of connections,
    # so make a lookup table for them
    connFamily = {getattr(socket, key): key for key in dir(socket) if key.startswith("AF_")}
    connType = {getattr(socket, key): key for key in dir(socket) if key.startswith("SOCK_")}
    connections = []
    for conn in process.connections():
        connDict = _objectToDict(conn)
        connDict.pop("raddr", None)
        connDict.pop("laddr", None)
        connDict["family"] = connFamily.get(connDict["family"], connDict["family"])
        connDict["type"] = connType.get(connDict["type"], connDict["type"])
        connections.append(connDict)
    status["connections"] = connections
    if hasattr(process, "io_counters"):
        status["ioCounters"] = _objectToDict(process.io_counters())

    status["cherrypyThreads"] = {}
    for threadId in cherrypy.tools.status.seenThreads:
        info = cherrypy.tools.status.seenThreads[threadId].copy()
        if "end" in info:
            info["duration"] = info["end"] - info["start"]
            info["idle"] = time.time() - info["end"]
        status["cherrypyThreads"][threadId] = info
示例#19
0
def dropAllTestDatabases():
    """
    Unless otherwise requested, drop all test databases.
    """
    if 'keepdb' not in os.environ.get('EXTRADEBUG', '').split():
        db_connection = getDbConnection()
        for dbName in usedDBs:
            db_connection.drop_database(dbName)
        usedDBs.clear()
示例#20
0
 def mongoCollection(self, connectionUri, collectionName):
     # TODO not sure if this is a good idea to do this db stuff here
     # maybe this suggests a new model?
     from girder.models import getDbConnection
     dbConn = getDbConnection(connectionUri)
     db = dbConn.get_default_database()
     from girder.external.mongodb_proxy import MongoProxy
     collection = MongoProxy(db[collectionName])
     return collection
示例#21
0
文件: base.py 项目: girder/girder
def dropAllTestDatabases():
    """
    Unless otherwise requested, drop all test databases.
    """
    if 'keepdb' not in os.environ.get('EXTRADEBUG', '').split():
        db_connection = getDbConnection()
        for dbName in usedDBs:
            db_connection.drop_database(dbName)
        usedDBs.clear()
示例#22
0
 def mongoCollection(self, connectionUri, collectionName):
     # TODO not sure if this is a good idea to do this db stuff here
     # maybe this suggests a new model?
     from girder.models import getDbConnection
     dbConn = getDbConnection(connectionUri)
     db = dbConn.get_default_database()
     from girder.external.mongodb_proxy import MongoProxy
     collection = MongoProxy(db[collectionName])
     return collection
示例#23
0
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     self.assetstore = assetstore
     self.chunkColl = getDbConnection()[assetstore['db']]['chunk']
     self.chunkColl.ensure_index([
         ('uuid', pymongo.ASCENDING),
         ('n', pymongo.ASCENDING)
     ], unique=True)
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     self.assetstore = assetstore
     self.chunkColl = getDbConnection()[assetstore['db']]['chunk']
     self.chunkColl.ensure_index([
         ('uuid', pymongo.ASCENDING),
         ('n', pymongo.ASCENDING)
     ], unique=True)
示例#25
0
def dropTestDatabase():
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    from girder.models import getDbConnection
    db_connection = getDbConnection()
    model_importer._modelInstances = {'core': {}}
    db_connection.drop_database('%s_test' %
                                cherrypy.config['database']['database'])
示例#26
0
文件: base.py 项目: girder/girder
def dropGridFSDatabase(dbName):
    """
    Clear all contents from a gridFS database used as an assetstore.
    :param dbName: the name of the database to drop.
    """
    db_connection = getDbConnection()
    if dbName in db_connection.list_database_names():
        if dbName not in usedDBs and 'newdb' in os.environ.get('EXTRADEBUG', '').split():
            raise Exception('Warning: database %s already exists' % dbName)
        db_connection.drop_database(dbName)
    usedDBs[dbName] = True
示例#27
0
def dropGridFSDatabase(dbName):
    """
    Clear all contents from a gridFS database used as an assetstore.
    :param dbName: the name of the database to drop.
    """
    db_connection = getDbConnection()
    if dbName in db_connection.database_names():
        if dbName not in usedDBs and 'newdb' in os.environ.get('EXTRADEBUG', '').split():
            raise Exception('Warning: database %s already exists' % dbName)
        db_connection.drop_database(dbName)
    usedDBs[dbName] = True
示例#28
0
    def testGridFsAssetstore(self):
        """
        Test usage of the GridFS assetstore type.
        """
        # Must also lower GridFS's internal chunk size to support our small chunks
        gridfs_assetstore_adapter.CHUNK_SIZE, old = 6, gridfs_assetstore_adapter.CHUNK_SIZE

        # Clear any old DB data
        base.dropGridFSDatabase('girder_test_file_assetstore')
        # Clear the assetstore database
        conn = getDbConnection()
        conn.drop_database('girder_test_file_assetstore')

        Assetstore().remove(Assetstore().getCurrent())
        assetstore = Assetstore().createGridFsAssetstore(
            name='Test', db='girder_test_file_assetstore')
        self.assetstore = assetstore

        chunkColl = conn['girder_test_file_assetstore']['chunk']

        # Upload the two-chunk file
        file = self._testUploadFile('helloWorld1.txt')
        hash = sha512(chunkData).hexdigest()
        file = File().load(file['_id'], force=True)
        self.assertEqual(hash, file['sha512'])

        # The file should have no local path
        self.assertRaises(FilePathException, File().getLocalFilePath, file)

        # We should have two chunks in the database
        self.assertEqual(chunkColl.find({'uuid': file['chunkUuid']}).count(), 2)

        self._testDownloadFile(file, chunk1 + chunk2)

        # Reset chunk size so the large file testing isn't horribly slow
        gridfs_assetstore_adapter.CHUNK_SIZE = old

        self._testDownloadFolder()
        self._testDownloadCollection()

        # Delete the file, make sure chunks are gone from database
        self._testDeleteFile(file)
        self.assertEqual(chunkColl.find({'uuid': file['chunkUuid']}).count(), 0)

        empty = self._testEmptyUpload('empty.txt')
        self.assertEqual(sha512().hexdigest(), empty['sha512'])
        self._testDownloadFile(empty, '')
        self._testDeleteFile(empty)

        # Test copying a file
        copyTestFile = self._testUploadFile('helloWorld1.txt')
        self._testCopyFile(copyTestFile)
示例#29
0
def dropTestDatabase():
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    from girder.models import getDbConnection
    db_connection = getDbConnection()
    model_importer.clearModels()  # Must clear the models so indices are rebuit
    dbName = cherrypy.config['database']['uri'].split('/')[-1]

    if 'girder_test_' not in dbName:
        raise Exception(
            'Expected a testing database name, but got {}'.format(dbName))
    db_connection.drop_database(dbName)
示例#30
0
def dropTestDatabase():
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    from girder.models import getDbConnection
    db_connection = getDbConnection()
    model_importer.clearModels()  # Must clear the models so indices are rebuilt
    dbName = cherrypy.config['database']['database']

    if 'girder_test_' not in dbName:
        raise Exception('Expected a testing database name, but got {}'
                        .format(dbName))
    db_connection.drop_database(dbName)
示例#31
0
文件: base.py 项目: chapmanbe/girder
def dropTestDatabase(dropModels=True):
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    db_connection = getDbConnection()

    dbName = cherrypy.config['database']['uri'].split('/')[-1]

    if 'girder_test_' not in dbName:
        raise Exception('Expected a testing database name, but got %s' % dbName)
    db_connection.drop_database(dbName)

    if dropModels:
        model_importer.reinitializeAll()
示例#32
0
文件: base.py 项目: cvlucian/girder
def dropTestDatabase(dropModels=True):
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    db_connection = getDbConnection()

    dbName = cherrypy.config['database']['uri'].split('/')[-1]

    if 'girder_test_' not in dbName:
        raise Exception('Expected a testing database name, but got %s' % dbName)
    db_connection.drop_database(dbName)

    if dropModels:
        model_importer.reinitializeAll()
示例#33
0
    def setUp(self):
        """
        Set up the mongo db for the external dataset, with 3 collections:
        a) tweetsgeo, which has tweet data that is geolocated (lat/long fields).
        b) polyGeoIndexed, w/2 polygons in a 2dsphere-indexed 'geometry' field
        c) polyGeoIndeces, same as above but without the 2dsphere index
        """
        super(MongoDatasetTestCase, self).setUp()

        self._user = self.model('user').createUser(
            'minervauser', 'password', 'minerva', 'user',
            '*****@*****.**')

        from girder.utility import config
        dbUri = config.getConfig()['database']['uri']
        self.dbName = 'minerva_test_external_mongo_dataset'
        dbUriParts = dbUri.split('/')[0:-1]
        self.dbUri = '/'.join(dbUriParts + [self.dbName])
        from girder.models import getDbConnection
        self.externalMongoDbConnection = getDbConnection(self.dbUri)
        self.externalMongoDb = self.externalMongoDbConnection.get_default_database()
        from girder.external.mongodb_proxy import MongoProxy
        self.geojsonIndexedName = 'polyGeoIndexed'
        self.geojsonNonIndexedName = 'polyGeoNonIndexed'
        self.polyIndexedCollection = MongoProxy(self.externalMongoDb[self.geojsonIndexedName])
        self.polyNonIndexedCollection = MongoProxy(self.externalMongoDb[self.geojsonNonIndexedName])
        self.pluginTestDir = os.path.dirname(os.path.realpath(__file__))
        geojsonPath = os.path.join(self.pluginTestDir, 'data', 'polygons.json')
        with open(geojsonPath) as geojsonFile:
            polys = json.load(geojsonFile)
            for poly in polys:
                self.polyIndexedCollection.save(poly)
                self.polyNonIndexedCollection.save(poly)
            self.polyIndexedCollection.create_index([('geometry', '2dsphere')])
        self.collectionName = 'tweetsgeo'
        self.tweetsgeoCollection = MongoProxy(self.externalMongoDb[self.collectionName])
        # add test data to external dataset
        self.pluginTestDir = os.path.dirname(os.path.realpath(__file__))
        tweets100Path = os.path.join(self.pluginTestDir, 'data', 'tweets100.json')
        z = zipfile.ZipFile('%s.zip' % tweets100Path)
        tweets = json.load(z.open('tweets100.json'))
        from datetime import datetime
        dateformat = '%Y-%m-%dT%H:%M:%S'
        for tweet in tweets:
            d = datetime.strptime((tweet['created_at']), dateformat)
            tweet['created_at'] = int((d - datetime(1970, 1, 1)).total_seconds())
            self.tweetsgeoCollection.save(tweet)
示例#34
0
    def atestGridFsAssetstore(self):
        """
        Test usage of the GridFS assetstore type.
        """
        # Clear any old DB data
        base.dropGridFSDatabase('girder_test_file_assetstore')
        # Clear the assetstore database
        conn = getDbConnection()
        conn.drop_database('girder_test_file_assetstore')

        self.model('assetstore').remove(self.model('assetstore').getCurrent())
        assetstore = self.model('assetstore').createGridFsAssetstore(
            name='Test', db='girder_test_file_assetstore')
        self.assetstore = assetstore

        chunkColl = conn['girder_test_file_assetstore']['chunk']

        # Upload the two-chunk file
        file = self._testUploadFile('helloWorld1.txt')
        hash = sha512(chunkData).hexdigest()
        self.assertEqual(hash, file['sha512'])

        # We should have two chunks in the database
        self.assertEqual(
            chunkColl.find({
                'uuid': file['chunkUuid']
            }).count(), 2)

        self._testDownloadFile(file, chunk1 + chunk2)
        self._testDownloadFolder()
        self._testDownloadCollection()

        # Delete the file, make sure chunks are gone from database
        self._testDeleteFile(file)
        self.assertEqual(
            chunkColl.find({
                'uuid': file['chunkUuid']
            }).count(), 0)

        empty = self._testEmptyUpload('empty.txt')
        self.assertEqual(sha512().hexdigest(), empty['sha512'])
        self._testDownloadFile(empty, '')
        self._testDeleteFile(empty)

        # Test copying a file
        copyTestFile = self._testUploadFile('helloWorld1.txt')
        self._testCopyFile(copyTestFile)
示例#35
0
文件: base.py 项目: cryos/girder
def dropTestDatabase(dropModels=True):
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    db_connection = getDbConnection()

    if dropModels:
        # Must clear the models to rebuild indices
        model_importer.clearModels()

    dbName = cherrypy.config['database']['uri'].split('/')[-1]

    if 'girder_test_' not in dbName:
        raise Exception('Expected a testing database name, but got {}'
                        .format(dbName))
    db_connection.drop_database(dbName)
示例#36
0
def db(request):
    """
    Require a Mongo test database.

    Provides a Mongo test database named after the requesting test function. Mongo databases are
    created/destroyed based on the URI provided with the --mongo-uri option and tear-down
    behavior is modified by the --keep-db option.
    """
    from girder.models import _dbClients, getDbConnection, pymongo
    from girder.models import model_base
    from girder.external import mongodb_proxy

    mockDb = request.config.getoption('--mock-db')
    dbUri = request.config.getoption('--mongo-uri')
    dbName = 'girder_test_%s' % hashlib.md5(_uid(
        request.node).encode('utf8')).hexdigest()
    keepDb = request.config.getoption('--keep-db')
    executable_methods = mongodb_proxy.EXECUTABLE_MONGO_METHODS
    realMongoClient = pymongo.MongoClient

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = set()
        pymongo.MongoClient = mongomock.MongoClient

    connection = getDbConnection(uri='%s/%s' % (dbUri, dbName), quiet=False)

    # Force getDbConnection from models to return our connection
    _dbClients[(None, None)] = connection

    connection.drop_database(dbName)

    # Since models store a local reference to the current database, we need to force them all to
    # reconnect
    for model in model_base._modelSingletons:
        model.reconnect()

    yield connection

    if not keepDb:
        connection.drop_database(dbName)

    connection.close()

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = executable_methods
        pymongo.MongoClient = realMongoClient
示例#37
0
文件: system.py 项目: manthey/girder
def getStatus(mode="basic", user=None):
    """
    Get a dictionary of status information regarding the girder server.

    :param mode: 'basic' returns values available to any anonymous user.
        'quick' returns only values that are cheap to acquire.
        'slow' provides all of that information and adds additional
    :param user: a user record.  Must have admin access to get anything other
                 than basic mode.
    :returns: a status dictionary.
    """
    isAdmin = user is not None and user.get("admin", False) is True

    status = {}
    status["bootTime"] = psutil.boot_time()
    status["currentTime"] = time.time()
    process = psutil.Process(os.getpid())
    status["processStartTime"] = process.create_time()

    if mode in ("quick", "slow") and isAdmin:
        status["virtualMemory"] = _objectToDict(psutil.virtual_memory())
        status["swap"] = _objectToDict(psutil.swap_memory())
        status["cpuCount"] = psutil.cpu_count()

        status["processMemory"] = _objectToDict(process.get_memory_info())
        status["processName"] = process.name()
        status["cmdline"] = process.cmdline()
        status["exe"] = process.exe()
        status["cwd"] = process.cwd()
        status["userName"] = process.username()
        status["processCpuTimes"] = _objectToDict(process.cpu_times())
        db = getDbConnection().get_default_database()
        status["mongoBuildInfo"] = db.command("buildInfo")
        status["cherrypyThreadsMaxUsed"] = len(cherrypy.tools.status.seenThreads)
        status["cherrypyThreadsInUse"] = len(
            [
                True
                for threadId in cherrypy.tools.status.seenThreads
                if "end" not in cherrypy.tools.status.seenThreads[threadId]
            ]
        )
        status["cherrypyThreadPoolSize"] = cherrypy.server.thread_pool

    if mode == "slow" and isAdmin:
        _computeSlowStatus(process, status, db)
    return status
示例#38
0
def db(request):
    """
    Require a Mongo test database.

    Provides a Mongo test database named after the requesting test function. Mongo databases are
    created/destroyed based on the URI provided with the --mongo-uri option and tear-down
    behavior is modified by the --keep-db option.
    """
    from girder.models import _dbClients, getDbConnection, pymongo
    from girder.models import model_base
    from girder.external import mongodb_proxy

    mockDb = request.config.getoption('--mock-db')
    dbUri = request.config.getoption('--mongo-uri')
    dbName = 'girder_test_%s' % hashlib.md5(_uid(request.node).encode('utf8')).hexdigest()
    keepDb = request.config.getoption('--keep-db')
    executable_methods = mongodb_proxy.EXECUTABLE_MONGO_METHODS
    realMongoClient = pymongo.MongoClient

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = set()
        pymongo.MongoClient = mongomock.MongoClient

    connection = getDbConnection(uri='%s/%s' % (dbUri, dbName), quiet=False)

    # Force getDbConnection from models to return our connection
    _dbClients[(None, None)] = connection

    connection.drop_database(dbName)

    # Since models store a local reference to the current database, we need to force them all to
    # reconnect
    for model in model_base._modelSingletons:
        model.reconnect()

    yield connection

    if not keepDb:
        connection.drop_database(dbName)

    connection.close()

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = executable_methods
        pymongo.MongoClient = realMongoClient
示例#39
0
def db(request):
    """
    Require a Mongo test database.

    Provides a Mongo test database named after the requesting test function. Mongo databases are
    created/destroyed based on the URI provided with the --mongo-uri option and tear-down
    semantics are handled by the --drop-db option.
    """
    from girder.models import _dbClients, getDbConnection, pymongo
    from girder.models.model_base import _modelSingletons
    from girder.external import mongodb_proxy

    mockDb = request.config.getoption('--mock-db')
    dbUri = request.config.getoption('--mongo-uri')
    dbName = 'girder_test_%s' % hashlib.md5(
        request.node.name.encode('utf8')).hexdigest()
    dropDb = request.config.getoption('--drop-db')
    executable_methods = mongodb_proxy.EXECUTABLE_MONGO_METHODS
    realMongoClient = pymongo.MongoClient

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = set()
        pymongo.MongoClient = mongomock.MongoClient

    connection = getDbConnection(uri='%s/%s' % (dbUri, dbName), quiet=False)

    # Force getDbConnection from models to return our connection
    _dbClients[(None, None)] = connection

    if dropDb == 'pre':
        connection.drop_database(dbName)

    for model in _modelSingletons:
        model.reconnect()

    yield connection

    if dropDb == 'post':
        connection.drop_database(dbName)

    connection.close()

    if mockDb:
        mongodb_proxy.EXECUTABLE_MONGO_METHODS = executable_methods
        pymongo.MongoClient = realMongoClient
示例#40
0
文件: base.py 项目: adsorensen/girder
def dropTestDatabase(dropModels=True):
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    db_connection = getDbConnection()

    dbName = cherrypy.config['database']['uri'].split('/')[-1]

    if 'girder_test_' not in dbName:
        raise Exception('Expected a testing database name, but got %s' % dbName)
    if dbName in db_connection.database_names():
        if dbName not in usedDBs and 'newdb' in os.environ.get('EXTRADEBUG', '').split():
            raise Exception('Warning: database %s already exists' % dbName)
        db_connection.drop_database(dbName)
    usedDBs[dbName] = True
    if dropModels:
        model_importer.reinitializeAll()
示例#41
0
def dropTestDatabase(dropModels=True):
    """
    Call this to clear all contents from the test database. Also forces models
    to reload.
    """
    db_connection = getDbConnection()

    dbName = cherrypy.config['database']['uri'].split('/')[-1]

    if 'girder_test_' not in dbName:
        raise Exception('Expected a testing database name, but got %s' % dbName)
    if dbName in db_connection.database_names():
        if dbName not in usedDBs and 'newdb' in os.environ.get('EXTRADEBUG', '').split():
            raise Exception('Warning: database %s already exists' % dbName)
        db_connection.drop_database(dbName)
    usedDBs[dbName] = True
    if dropModels:
        model_importer.reinitializeAll()
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     super(GridFsAssetstoreAdapter, self).__init__(assetstore)
     recent = False
     try:
         # Guard in case the connectionArgs is unhashable
         key = (self.assetstore.get('mongohost'),
                self.assetstore.get('replicaset'),
                self.assetstore.get('shard'))
         if key in _recentConnections:
             recent = (time.time() - _recentConnections[key]['created'] <
                       RECENT_CONNECTION_CACHE_TIME)
     except TypeError:
         key = None
     try:
         # MongoClient automatically reuses connections from a pool, but we
         # want to avoid redoing ensureChunkIndices each time we get such a
         # connection.
         client = getDbConnection(self.assetstore.get('mongohost'),
                                  self.assetstore.get('replicaset'),
                                  quiet=recent)
         self.chunkColl = MongoProxy(client[self.assetstore['db']].chunk)
         if not recent:
             _ensureChunkIndices(self.chunkColl)
             if self.assetstore.get('shard') == 'auto':
                 _setupSharding(self.chunkColl)
             if key is not None:
                 if len(_recentConnections) >= RECENT_CONNECTION_CACHE_MAX_SIZE:
                     _recentConnections.clear()
                 _recentConnections[key] = {
                     'created': time.time()
                 }
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      self.assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          self.assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
示例#43
0
def getStatus(mode='basic', user=None):
    """
    Get a dictionary of status information regarding the Girder server.

    :param mode: 'basic' returns values available to any anonymous user.
        'quick' returns only values that are cheap to acquire.
        'slow' provides all of that information and adds additional
    :param user: a user record.  Must have admin access to get anything other
                 than basic mode.
    :returns: a status dictionary.
    """
    isAdmin = (user is not None and user['admin'])

    status = {}
    status['bootTime'] = psutil.boot_time()
    status['currentTime'] = time.time()
    process = psutil.Process(os.getpid())
    status['processStartTime'] = process.create_time()

    if mode in ('quick', 'slow') and isAdmin:
        status['virtualMemory'] = _objectToDict(psutil.virtual_memory())
        status['swap'] = _objectToDict(psutil.swap_memory())
        status['cpuCount'] = psutil.cpu_count()

        status['processMemory'] = _objectToDict(process.memory_info())
        status['processName'] = process.name()
        status['cmdline'] = process.cmdline()
        status['exe'] = process.exe()
        status['cwd'] = process.cwd()
        status['userName'] = process.username()
        status['processCpuTimes'] = _objectToDict(process.cpu_times())
        db = getDbConnection().get_default_database()
        status['mongoBuildInfo'] = db.command('buildInfo')
        status['cherrypyThreadsMaxUsed'] = len(
            cherrypy.tools.status.seenThreads)
        status['cherrypyThreadsInUse'] = len([
            True for threadId in cherrypy.tools.status.seenThreads
            if 'end' not in cherrypy.tools.status.seenThreads[threadId]
        ])
        status['cherrypyThreadPoolSize'] = cherrypy.server.thread_pool

    if mode == 'slow' and isAdmin:
        _computeSlowStatus(process, status, db)
    return status
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     super(GridFsAssetstoreAdapter, self).__init__(assetstore)
     recent = False
     try:
         # Guard in case the connectionArgs is unhashable
         key = (self.assetstore.get('mongohost'),
                self.assetstore.get('replicaset'),
                self.assetstore.get('shard'))
         if key in _recentConnections:
             recent = (time.time() - _recentConnections[key]['created'] <
                       RECENT_CONNECTION_CACHE_TIME)
     except TypeError:
         key = None
     try:
         # MongoClient automatically reuses connections from a pool, but we
         # want to avoid redoing ensureChunkIndices each time we get such a
         # connection.
         client = getDbConnection(self.assetstore.get('mongohost'),
                                  self.assetstore.get('replicaset'),
                                  quiet=recent)
         self.chunkColl = MongoProxy(client[self.assetstore['db']].chunk)
         if not recent:
             _ensureChunkIndices(self.chunkColl)
             if self.assetstore.get('shard') == 'auto':
                 _setupSharding(self.chunkColl)
             if key is not None:
                 if len(_recentConnections) >= RECENT_CONNECTION_CACHE_MAX_SIZE:
                     _recentConnections.clear()
                 _recentConnections[key] = {
                     'created': time.time()
                 }
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      self.assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          self.assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
示例#45
0
def getStatus(mode='basic', user=None):
    """
    Get a dictionary of status information regarding the Girder server.

    :param mode: 'basic' returns values available to any anonymous user.
        'quick' returns only values that are cheap to acquire.
        'slow' provides all of that information and adds additional
    :param user: a user record.  Must have admin access to get anything other
                 than basic mode.
    :returns: a status dictionary.
    """
    isAdmin = (user is not None and user['admin'])

    status = {}
    status['bootTime'] = psutil.boot_time()
    status['currentTime'] = time.time()
    process = psutil.Process(os.getpid())
    status['processStartTime'] = process.create_time()

    if mode in ('quick', 'slow') and isAdmin:
        status['virtualMemory'] = _objectToDict(psutil.virtual_memory())
        status['swap'] = _objectToDict(psutil.swap_memory())
        status['cpuCount'] = psutil.cpu_count()

        status['processMemory'] = _objectToDict(process.memory_info())
        status['processName'] = process.name()
        status['cmdline'] = process.cmdline()
        status['exe'] = process.exe()
        status['cwd'] = process.cwd()
        status['userName'] = process.username()
        status['processCpuTimes'] = _objectToDict(process.cpu_times())
        db = getDbConnection().get_database()
        status['mongoBuildInfo'] = db.command('buildInfo')
        status['cherrypyThreadsMaxUsed'] = len(
            cherrypy.tools.status.seenThreads)
        status['cherrypyThreadsInUse'] = len([
            True for threadId in cherrypy.tools.status.seenThreads
            if 'end' not in cherrypy.tools.status.seenThreads[threadId]])
        status['cherrypyThreadPoolSize'] = cherrypy.server.thread_pool

    if mode == 'slow' and isAdmin:
        _computeSlowStatus(process, status, db)
    return status
示例#46
0
    def __init__(self):
        self.name = None
        self._indices = []
        self.initialize()

        assert type(self.name) == str

        db_cfg = getDbConfig()
        db_connection = getDbConnection()
        if cherrypy.config['server']['mode'] == 'testing':
            dbName = '%s_test' % db_cfg['database']
        else:
            dbName = db_cfg['database']  # pragma: no cover
        self.collection = db_connection[dbName][self.name]

        assert isinstance(self.collection, pymongo.collection.Collection)
        assert type(self._indices) == list

        for index in self._indices:
            self.collection.ensure_index(index)
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     self.assetstore = assetstore
     try:
         self.chunkColl = getDbConnection(
             assetstore.get('mongohost', None),
             assetstore.get('replicaset', None))[assetstore['db']].chunk
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
         return
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
         return
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     self.assetstore = assetstore
     try:
         self.chunkColl = getDbConnection(
             assetstore.get('mongohost', None),
             assetstore.get('replicaset', None))[assetstore['db']].chunk
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
         return
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
         return
示例#49
0
    def setUp(self):
        """
        Set up the mongo db for the external dataset, with a collection
        named tweetsgeo, which have tweet data that is geolocated.
        """
        super(ExternalMongoDatasetTestCase, self).setUp()

        self._user = self.model('user').createUser(
            'minervauser', 'password', 'minerva', 'user',
            '*****@*****.**')

        from girder.utility import config
        dbUri = config.getConfig()['database']['uri']
        self.dbName = 'minerva_test_external_mongo_dataset'
        dbUriParts = dbUri.split('/')[0:-1]
        self.dbUri = '/'.join(dbUriParts + [self.dbName])
        from girder.models import getDbConnection
        self.externalMongoDbConnection = getDbConnection(self.dbUri)
        self.externalMongoDb = self.externalMongoDbConnection.get_default_database()
        from girder.external.mongodb_proxy import MongoProxy
        self.collectionName = 'tweetsgeo'
        self.tweetsgeoCollection = MongoProxy(self.externalMongoDb[self.collectionName])
        # add test data to external dataset
        self.pluginTestDir = os.path.dirname(os.path.realpath(__file__))
        tweets100Path = os.path.join(self.pluginTestDir, 'data', 'tweets100.json')
        z = zipfile.ZipFile('%s.zip' % tweets100Path)
        tweets = json.load(z.open('tweets100.json'))
        from datetime import datetime
        dateformat = '%Y-%m-%dT%H:%M:%S'
        for tweet in tweets:
            d = datetime.strptime((tweet['created_at']), dateformat)
            tweet['created_at'] = int((d - datetime(1970, 1, 1)).total_seconds())
            self.tweetsgeoCollection.save(tweet)

        path = '/minerva_dataset/folder'
        params = {
            'userId': self._user['_id'],
        }
        # create a dataset folder
        self.request(path=path, method='POST', params=params, user=self._user)
示例#50
0
    def atestGridFsAssetstore(self):
        """
        Test usage of the GridFS assetstore type.
        """
        # Clear any old DB data
        base.dropGridFSDatabase("girder_test_file_assetstore")
        # Clear the assetstore database
        conn = getDbConnection()
        conn.drop_database("girder_test_file_assetstore")

        self.model("assetstore").remove(self.model("assetstore").getCurrent())
        assetstore = self.model("assetstore").createGridFsAssetstore(name="Test", db="girder_test_file_assetstore")
        self.assetstore = assetstore

        chunkColl = conn["girder_test_file_assetstore"]["chunk"]

        # Upload the two-chunk file
        file = self._testUploadFile("helloWorld1.txt")
        hash = sha512(chunkData).hexdigest()
        self.assertEqual(hash, file["sha512"])

        # We should have two chunks in the database
        self.assertEqual(chunkColl.find({"uuid": file["chunkUuid"]}).count(), 2)

        self._testDownloadFile(file, chunk1 + chunk2)
        self._testDownloadFolder()
        self._testDownloadCollection()

        # Delete the file, make sure chunks are gone from database
        self._testDeleteFile(file)
        self.assertEqual(chunkColl.find({"uuid": file["chunkUuid"]}).count(), 0)

        empty = self._testEmptyUpload("empty.txt")
        self.assertEqual(sha512().hexdigest(), empty["sha512"])
        self._testDownloadFile(empty, "")
        self._testDeleteFile(empty)

        # Test copying a file
        copyTestFile = self._testUploadFile("helloWorld1.txt")
        self._testCopyFile(copyTestFile)
    def validateInfo(doc):
        """
        Validate the assetstore -- make sure we can connect to it and that the
        necessary indexes are set up.
        """
        if not doc.get('db', ''):
            raise ValidationException('Database name must not be empty.', 'db')
        if '.' in doc['db'] or ' ' in doc['db']:
            raise ValidationException(
                'Database name cannot contain spaces'
                ' or periods.', 'db')

        try:
            chunkColl = getDbConnection(
                doc.get('mongohost'),
                doc.get('replicaset'),
                serverSelectionTimeoutMS=10000)[doc['db']].chunk
            _ensureChunkIndices(chunkColl)
        except pymongo.errors.ServerSelectionTimeoutError as e:
            raise ValidationException('Could not connect to the database: %s' %
                                      str(e))

        return doc
示例#52
0
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     super(GridFsAssetstoreAdapter, self).__init__(assetstore)
     try:
         self.chunkColl = getDbConnection(
             self.assetstore.get('mongohost', None),
             self.assetstore.get('replicaset',
                                 None))[self.assetstore['db']].chunk
         _ensureChunkIndices(self.chunkColl)
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      self.assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
         return
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          self.assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
         return
 def __init__(self, assetstore):
     """
     :param assetstore: The assetstore to act on.
     """
     super(GridFsAssetstoreAdapter, self).__init__(assetstore)
     try:
         self.chunkColl = getDbConnection(
             self.assetstore.get('mongohost', None),
             self.assetstore.get('replicaset', None)
         )[self.assetstore['db']].chunk
         _ensureChunkIndices(self.chunkColl)
     except pymongo.errors.ConnectionFailure:
         logger.error('Failed to connect to GridFS assetstore %s',
                      self.assetstore['db'])
         self.chunkColl = 'Failed to connect'
         self.unavailable = True
         return
     except pymongo.errors.ConfigurationError:
         logger.exception('Failed to configure GridFS assetstore %s',
                          self.assetstore['db'])
         self.chunkColl = 'Failed to configure'
         self.unavailable = True
         return
示例#54
0
def _computeSlowStatus(process, status, db):
    status['diskPartitions'] = [_objectToDict(part) for part in
                                psutil.disk_partitions()]
    try:
        # This fails in certain environments, so guard it
        status['diskIO'] = _objectToDict(psutil.disk_io_counters())
    except Exception:
        pass
    # Report on the disk usage where the script is located
    if hasattr(girder, '__file__'):
        status['girderPath'] = os.path.abspath(girder.__file__)
        status['girderDiskUsage'] = _objectToDict(
            psutil.disk_usage(status['girderPath']))
    # Report where our logs are and how much space is available for them
    status['logs'] = []
    for handler in logger.handlers:
        try:
            logInfo = {'path': handler.baseFilename}
            logInfo['diskUsage'] = _objectToDict(
                psutil.disk_usage(logInfo['path']))
            status['logs'].append(logInfo)
        except Exception:
            # If we can't read information about the log, don't throw an
            # exception
            pass
    status['mongoDbStats'] = db.command('dbStats')
    status['mongoDbPath'] = getDbConnection().admin.command(
        'getCmdLineOpts')['parsed']['storage']['dbPath']
    status['mongoDbDiskUsage'] = _objectToDict(
        psutil.disk_usage(status['mongoDbPath']))

    status['processDirectChildrenCount'] = len(process.children())
    status['processAllChildrenCount'] = len(process.children(True))
    status['openFiles'] = [_objectToDict(file) for file in
                           process.open_files()]
    # I'd rather see textual names for the family and type of connections,
    # so make a lookup table for them
    connFamily = {getattr(socket, key): key for key in dir(socket)
                  if key.startswith('AF_')}
    connType = {getattr(socket, key): key for key in dir(socket)
                if key.startswith('SOCK_')}
    connections = []
    for conn in process.connections():
        connDict = _objectToDict(conn)
        connDict.pop('raddr', None)
        connDict.pop('laddr', None)
        connDict['family'] = connFamily.get(connDict['family'],
                                            connDict['family'])
        connDict['type'] = connType.get(connDict['type'], connDict['type'])
        connections.append(connDict)
    status['connections'] = connections
    if hasattr(process, 'io_counters'):
        status['ioCounters'] = _objectToDict(process.io_counters())

    status['cherrypyThreads'] = {}
    for threadId in cherrypy.tools.status.seenThreads:
        info = cherrypy.tools.status.seenThreads[threadId].copy()
        if 'end' in info:
            info['duration'] = info['end'] - info['start']
            info['idle'] = time.time() - info['end']
        status['cherrypyThreads'][threadId] = info
示例#55
0
        'resource path.  "resource:admin" uses the default admin, '
        '"resourceid:<path" is the string id for the resource path.')
    parser.add_argument(
        '--yaml',
        help='Specify parameters for this script in a yaml file.  If no value '
        'is specified, this defaults to the environment variable of '
        'DSA_PROVISION_YAML.  No error is thrown if the file does not exist. '
        'The yaml file is a dictionary of keys as would be passed to the '
        'command line.')
    parser.add_argument(
        '--no-mongo-compat', action='store_false', dest='mongo_compat',
        default=None, help='Do not automatically set the mongo feature '
        'compatibility version to the current server version.')
    parser.add_argument(
        '--verbose', '-v', action='count', default=0, help='Increase verbosity')
    opts = parser.parse_args(args=sys.argv[1:])
    logger.addHandler(logging.StreamHandler(sys.stderr))
    logger.setLevel(max(1, logging.WARNING - 10 * opts.verbose))
    logger.debug('Parsed arguments: %r', opts)
    opts = merge_yaml_opts(opts, parser)
    # This loads plugins, allowing setting validation
    configureServer()
    if getattr(opts, 'mongo_compat', None) is not False:
        try:
            db = getDbConnection()
            db.admin.command({'setFeatureCompatibilityVersion': '.'.join(
                db.server_info()['version'].split('.')[:2])})
        except Exception:
            logger.warning('Could not set mongo feature compatibility version.')
    provision(opts)