def _removeUserAssetstore(): """ Remove the user assetstore if it exists. """ store = Assetstore().load(DB_ASSETSTORE_ObjectId) if store: Assetstore().remove(store)
def validateFile(file): """ If a file document contains the DB_INFO_KEY, check if it is in a database assetstore. If so, check that the data in DB_INFO_KEY is valid. Note that this won't check files without the DB_INFO_KEY, even if they are in the database assetstore to allow files to be created and then have database information added to them. :param file: the file document. """ if DB_INFO_KEY not in file or 'assetstoreId' not in file: return None assetstore = Assetstore().load(file['assetstoreId']) if assetstore.get('type') != AssetstoreType.DATABASE: # This can happen if the file was a database_assetstore file and then # is replaced, for instance, by uploading a new file. if DB_INFO_KEY in file: del file[DB_INFO_KEY] return None if not file[DB_INFO_KEY].get('table'): raise ValidationException( 'File database information entry must have a non-blank table ' 'value.') if not assetstore['database'].get('uri') and not file[DB_INFO_KEY].get('uri'): raise ValidationException( 'File database information must have a non-blank uri value on an ' 'assetstore that doesn\'t specify a single database.')
def getDbInfoForFile(file, assetstore=None): """ Given a file document, get the necessary information to connect to a database. :param file: the file document. :param assetstore: the assetstore document, or None to get it from the file information. :return: the dbinfo dictionary or None if the file is not in a database assetstore. """ if DB_INFO_KEY not in file or 'assetstoreId' not in file: return None if assetstore is None: assetstore = Assetstore().load(file['assetstoreId']) if assetstore.get('type') != AssetstoreType.DATABASE: return None if assetstore['database'].get('dbtype') == DB_ASSETSTORE_USER_TYPE: uri = file[DB_INFO_KEY]['uri'] else: uri = assetstore['database']['uri'] dbinfo = { 'uri': uri, 'table': file[DB_INFO_KEY]['table'], 'collection': file[DB_INFO_KEY]['table'] } for key in ('database', 'schema'): if key in file[DB_INFO_KEY]: dbinfo[key] = file[DB_INFO_KEY][key] return dbinfo
def testGridFSShardingAssetstoreUpload(self): verbose = 0 if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split(): verbose = 2 # Starting the sharding service takes time rscfg = mongo_replicaset.makeConfig(port=27073, shard=True, sharddb=None) mongo_replicaset.startMongoReplicaSet(rscfg, verbose=verbose) # Clear the assetstore database and create a GridFS assetstore Assetstore().remove(Assetstore().getCurrent()) self.assetstore = Assetstore().createGridFsAssetstore( name='Test', db='girder_assetstore_shard_upload_test', mongohost='mongodb://127.0.0.1:27073', shard='auto') self._testUpload() # Verify that we have successfully sharded the collection adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore) stat = adapter.chunkColl.database.command('collstats', adapter.chunkColl.name) self.assertTrue(bool(stat['sharded'])) # Although we have asked for multiple shards, the chunks may all be on # one shard. Make sure at least one shard is reported. self.assertGreaterEqual(len(stat['shards']), 1) # Asking for the same database again should also report sharding. Use # a slightly differt URI to ensure that the sharding is checked anew. assetstore = Assetstore().createGridFsAssetstore( name='Test 2', db='girder_assetstore_shard_upload_test', mongohost='mongodb://127.0.0.1:27073/?', shard='auto') adapter = assetstore_utilities.getAssetstoreAdapter(assetstore) stat = adapter.chunkColl.database.command('collstats', adapter.chunkColl.name) self.assertTrue(bool(stat['sharded'])) mongo_replicaset.stopMongoReplicaSet(rscfg)
def testS3AssetstoreUpload(self): # Clear the assetstore database and create an S3 assetstore Assetstore().remove(self.assetstore) params = { 'name': 'S3 Assetstore', 'bucket': 'bucketname', 'prefix': 'testprefix', 'accessKeyId': 'abc', 'secret': '123', 'service': base.mockS3Server.service } assetstore = Assetstore().createS3Assetstore(**params) self.assetstore = assetstore self._testUpload() # make an untracked upload to test that we can find and clear it client = boto3.client('s3', endpoint_url=base.mockS3Server.service, aws_access_key_id='abc', aws_secret_access_key='123') client.create_multipart_upload(Bucket='bucketname', Key='testprefix/abandoned_upload') resp = self.request(path='/system/uploads', user=self.admin) self.assertStatusOk(resp) self.assertEqual(len(resp.json), 1) # Ask to delete it resp = self.request(path='/system/uploads', method='DELETE', user=self.admin) self.assertStatusOk(resp) # Check that it is gone resp = self.request(path='/system/uploads', user=self.admin) self.assertStatusOk(resp) self.assertEqual(resp.json, [])
def validateFile(file): """ If a file document contains the DB_INFO_KEY, check if it is in a database assetstore. If so, check that the data in DB_INFO_KEY is valid. Note that this won't check files without the DB_INFO_KEY, even if they are in the database assetstore to allow files to be created and then have database information added to them. :param file: the file document. """ if DB_INFO_KEY not in file or 'assetstoreId' not in file: return None assetstore = Assetstore().load(file['assetstoreId']) if assetstore.get('type') != AssetstoreType.DATABASE: # This can happen if the file was a database_assetstore file and then # is replaced, for instance, by uploading a new file. if DB_INFO_KEY in file: del file[DB_INFO_KEY] return None if not file[DB_INFO_KEY].get('table'): raise ValidationException( 'File database information entry must have a non-blank table ' 'value.') if not assetstore['database'].get('uri') and not file[DB_INFO_KEY].get( 'uri'): raise ValidationException( 'File database information must have a non-blank uri value on an ' 'assetstore that doesn\'t specify a single database.')
def testGridFsAssetstore(self): """ Test usage of the GridFS assetstore type. """ # Must also lower GridFS's internal chunk size to support our small chunks gridfs_assetstore_adapter.CHUNK_SIZE, old = 6, gridfs_assetstore_adapter.CHUNK_SIZE # Clear any old DB data base.dropGridFSDatabase('girder_test_file_assetstore') # Clear the assetstore database conn = getDbConnection() conn.drop_database('girder_test_file_assetstore') Assetstore().remove(Assetstore().getCurrent()) assetstore = Assetstore().createGridFsAssetstore( name='Test', db='girder_test_file_assetstore') self.assetstore = assetstore chunkColl = conn['girder_test_file_assetstore']['chunk'] # Upload the two-chunk file file = self._testUploadFile('helloWorld1.txt') hash = sha512(chunkData).hexdigest() file = File().load(file['_id'], force=True) self.assertEqual(hash, file['sha512']) # The file should have no local path self.assertRaises(FilePathException, File().getLocalFilePath, file) # We should have two chunks in the database self.assertEqual( chunkColl.find({ 'uuid': file['chunkUuid'] }).count(), 2) self._testDownloadFile(file, chunk1 + chunk2) # Reset chunk size so the large file testing isn't horribly slow gridfs_assetstore_adapter.CHUNK_SIZE = old self._testDownloadFolder() self._testDownloadCollection() # Delete the file, make sure chunks are gone from database self._testDeleteFile(file) self.assertEqual( chunkColl.find({ 'uuid': file['chunkUuid'] }).count(), 0) empty = self._testEmptyUpload('empty.txt') self.assertEqual(sha512().hexdigest(), empty['sha512']) self._testDownloadFile(empty, '') self._testDeleteFile(empty) # Test copying a file copyTestFile = self._testUploadFile('helloWorld1.txt') self._testCopyFile(copyTestFile)
def testDeleteAssetstore(self): resp = self.request(path='/assetstore', method='GET', user=self.admin) self.assertStatusOk(resp) self.assertEqual(1, len(resp.json)) assetstore = Assetstore().load(resp.json[0]['_id']) # Create a second assetstore so that when we delete the first one, the # current assetstore will be switched to the second one. secondStore = Assetstore().createFilesystemAssetstore( 'Another Store', os.path.join(ROOT_DIR, 'tests', 'assetstore', 'server_assetstore_test2')) # make sure our original asset store is the current one current = Assetstore().getCurrent() self.assertEqual(current['_id'], assetstore['_id']) # Anonymous user should not be able to delete assetstores resp = self.request(path='/assetstore/%s' % assetstore['_id'], method='DELETE') self.assertStatus(resp, 401) # Simulate the existence of a file within the assetstore folders = Folder().childFolders(self.admin, 'user', user=self.admin) item = Item().createItem(name='x.txt', creator=self.admin, folder=six.next(folders)) file = File().createFile(creator=self.admin, item=item, name='x.txt', size=1, assetstore=assetstore, mimeType='text/plain') file['sha512'] = 'x' # add this dummy value to simulate real file resp = self.request(path='/assetstore/%s' % assetstore['_id'], method='DELETE', user=self.admin) self.assertStatus(resp, 400) self.assertEqual( resp.json['message'], 'You may not delete an ' 'assetstore that contains files.') # Delete the offending file, we can now delete the assetstore File().remove(file) resp = self.request(path='/assetstore/%s' % assetstore['_id'], method='DELETE', user=self.admin) self.assertStatusOk(resp) self.assertEqual(resp.json['message'], 'Deleted assetstore %s.' % assetstore['name']) resp = self.request(path='/assetstore', method='GET', user=self.admin) self.assertStatusOk(resp) self.assertEqual(1, len(resp.json)) # Get the current assetstore. It should now be the second store we # created current = Assetstore().getCurrent() self.assertEqual(current['_id'], secondStore['_id'])
def _provisionAssetstore(): if not Assetstore().findOne({'name': 'assetstore'}): if not settings.ISIC_ASSETSTORE_PATH.is_dir(): # This is expected to fail if the path is owned by root settings.ISIC_ASSETSTORE_PATH.mkdir(parents=True) Assetstore().createFilesystemAssetstore( name='assetstore', root=str(settings.ISIC_ASSETSTORE_PATH.resolve()), )
def testGridFSAssetstoreUpload(self): # Clear any old DB data base.dropGridFSDatabase('girder_test_upload_assetstore') # Clear the assetstore database and create a GridFS assetstore Assetstore().remove(Assetstore().getCurrent()) assetstore = Assetstore().createGridFsAssetstore( name='Test', db='girder_test_upload_assetstore') self.assetstore = assetstore self._testUpload()
def testUnknownAssetstoreType(self): assetstore = Assetstore().save({ 'name': 'Sample', 'type': 'unknown' }, validate=False) with self.assertRaises(GirderException): assetstore_utilities.getAssetstoreAdapter(assetstore) Assetstore().addComputedInfo(assetstore) self.assertEqual(assetstore['capacity']['total'], None)
def setUp(self, assetstoreType=None, dropModels=True): """ We want to start with a clean database each time, so we drop the test database before each test. We then add an assetstore so the file model can be used without 500 errors. :param assetstoreType: if 'gridfs' or 's3', use that assetstore. 'gridfsrs' uses a GridFS assetstore with a replicaset, and 'gridfsshard' one with a sharding server. For any other value, use a filesystem assetstore. """ self.assetstoreType = assetstoreType dropTestDatabase(dropModels=dropModels) assetstoreName = os.environ.get('GIRDER_TEST_ASSETSTORE', 'test') assetstorePath = os.path.join( ROOT_DIR, 'tests', 'assetstore', assetstoreName) if assetstoreType == 'gridfs': # Name this as '_auto' to prevent conflict with assetstores created # within test methods gridfsDbName = 'girder_test_%s_assetstore_auto' % assetstoreName dropGridFSDatabase(gridfsDbName) self.assetstore = Assetstore().createGridFsAssetstore(name='Test', db=gridfsDbName) elif assetstoreType == 'gridfsrs': gridfsDbName = 'girder_test_%s_rs_assetstore_auto' % assetstoreName self.replicaSetConfig = mongo_replicaset.makeConfig() mongo_replicaset.startMongoReplicaSet(self.replicaSetConfig) self.assetstore = Assetstore().createGridFsAssetstore( name='Test', db=gridfsDbName, mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,' '127.0.0.1:27072', replicaset='replicaset') elif assetstoreType == 'gridfsshard': gridfsDbName = 'girder_test_%s_shard_assetstore_auto' % assetstoreName self.replicaSetConfig = mongo_replicaset.makeConfig( port=27073, shard=True, sharddb=None) mongo_replicaset.startMongoReplicaSet(self.replicaSetConfig) self.assetstore = Assetstore().createGridFsAssetstore( name='Test', db=gridfsDbName, mongohost='mongodb://127.0.0.1:27073', shard='auto') elif assetstoreType == 's3': self.assetstore = Assetstore().createS3Assetstore( name='Test', bucket='bucketname', accessKeyId='test', secret='test', service=mockS3Server.service) else: dropFsAssetstore(assetstorePath) self.assetstore = Assetstore().createFilesystemAssetstore( name='Test', root=assetstorePath) addr = ':'.join(map(str, mockSmtp.address or ('localhost', 25))) settings = Setting() settings.set(SettingKey.SMTP_HOST, addr) settings.set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 0) settings.set(SettingKey.PLUGINS_ENABLED, enabledPlugins) if os.environ.get('GIRDER_TEST_DATABASE_CONFIG'): setup_database.main(os.environ['GIRDER_TEST_DATABASE_CONFIG'])
def _finalize_upload(self, upload, assetstore=None): if assetstore is None: assetstore = Assetstore().load(upload["assetstoreId"]) if str(upload["parentId"]).startswith("wtlocal:"): path, root_id = self.path_from_id(upload["parentId"]) root = Folder().load(root_id, force=True) # TODO make it obsolete else: root = Folder().load(upload["parentId"], force=True) path = pathlib.Path(root["fsPath"]) abspath = path / upload["name"] shutil.move(upload["tempFile"], abspath.as_posix()) abspath.chmod(assetstore.get("perms", DEFAULT_PERMS)) return self.vFile(abspath, root)
def __init__(self): super().__init__() self.resourceName = 'label' self.ann_file_name = "annotation.json" self.coll_m = Collection() self.file_m = File() self.folder_m = Folder() self.item_m = Item() self.upload_m = Upload() self.asset_m = Assetstore() self.setupRoutes()
def __init__(self): super().__init__() self.resourceName = 'labelImage' self.coll_m = Collection() self.file_m = File() self.folder_m = Folder() self.item_m = Item() self.upload_m = Upload() self.asset_m = Assetstore() self.label_image_folder_name = "LabelImages" self.setupRoutes()
def _createUserAssetstore(): """ Add a general user assetstore if it doesn't exist. This uses a fixed ID so that if it is deleted and the plugin starts again, existing db assets will still work. """ if not Assetstore().load(DB_ASSETSTORE_ObjectId): Assetstore().save({ '_id': DB_ASSETSTORE_ObjectId, 'type': AssetstoreType.DATABASE, 'name': DB_ASSETSTORE_USER_NAME, 'database': { 'dbtype': DB_ASSETSTORE_USER_TYPE } })
def provision(opts): """ Provision the instance. :param opts: the argparse options. """ # If there is are no users, create an admin user if User().findOne() is None: User().createUser('admin', 'password', 'Admin', 'Admin', '*****@*****.**') adminUser = User().findOne({'admin': True}) # Make sure we have an assetstore if Assetstore().findOne() is None: Assetstore().createFilesystemAssetstore('Assetstore', '/assetstore') # Make sure we have a demo collection and download some demo files if getattr(opts, 'samples', None): sampleFolder = get_sample_data( adminUser, getattr(opts, 'sample-collection', 'TCGA collection'), getattr(opts, 'sample-folder', 'Sample Images')) taskFolder = get_collection_folder(adminUser, 'Tasks', 'Slicer CLI Web Tasks') # Show label and macro images, plus tile and internal metadata for all users settings = { 'worker.broker': 'amqp://*****:*****@rabbitmq', 'worker.backend': 'rpc://*****:*****@rabbitmq', 'worker.api_url': 'http://girder:8080/api/v1', 'worker.direct_path': True, 'core.brand_name': 'Digital Slide Archive', 'histomicsui.webroot_path': 'histomics', 'histomicsui.alternate_webroot_path': 'histomicstk', 'homepage.markdown': """# Digital Slide Archive --- ## Bioinformatics Platform Welcome to the **Digital Slide Archive**. Developers who want to use the Girder REST API should check out the [interactive web API docs](api/v1). The [HistomicsUI](histomics) application is enabled.""", 'slicer_cli_web.task_folder': str(taskFolder['_id']), } for key, value in settings.items(): print([key, value, Setting().get(key)]) if (getattr(opts, 'force', None) or Setting().get(key) is None or Setting().get(key) == Setting().getDefault(key)): Setting().set(key, value)
def gcs_save_record(self, data: dict): """ https://cloud.google.com/pubsub/docs/push#receiving_messages """ try: payload = GCSPushNotificationPayload(**data) GCSNotificationRecord().create(payload.message) if payload.message.attributes.eventType == 'OBJECT_FINALIZE': # This is a create notification store = Assetstore().findOne({ 'type': 2, # S3 type AssetstoreRuleMarker: { '$exists': True }, 'bucket': payload.message.attributes.bucketId, # The only viable GSC Service string 'service': 'https://storage.googleapis.com', }) if store is not None: rule = NotificationRouterRule( **store[AssetstoreRuleMarker]) mountRoot = Folder().findOne( {'_id': ObjectId(rule.folderId)}) BucketNotification.processNotification( store, mountRoot, payload.message.attributes.objectId) except Exception as err: # exceptions must be swallowed to prevent pub/sub queue backups # message loss is always easily recoverable by running a manual # import through the admin console. logger.exception(f'Failed to process GCS notification {err}') return "done"
def provision(): # If there is are no users, create an admin user if User().findOne() is None: User().createUser('admin', 'password', 'Admin', 'Admin', '*****@*****.**') adminUser = User().findOne({'admin': True}) # Make sure we have an assetstore if Assetstore().findOne() is None: Assetstore().createFilesystemAssetstore('Assetstore', '/assetstore') # Make sure we have a demo collection and download some demo files folder = get_sample_data(adminUser) # Attach an example experiment to the folder with open('examples/experiment.json') as fptr: Folder().setMetadata(folder, metadata={'experiments': json.load(fptr)})
def _handleZip(self, prereviewFolder, user, zipFile): # Avoid circular import from .image import Image # Get full path of zip file in assetstore assetstore = Assetstore().getCurrent() assetstore_adapter = assetstore_utilities.getAssetstoreAdapter( assetstore) fullPath = assetstore_adapter.fullPath(zipFile) with ZipFileOpener(fullPath) as (fileList, fileCount): with ProgressContext(on=True, user=user, title='Processing "%s"' % zipFile['name'], total=fileCount, state=ProgressState.ACTIVE, current=0) as progress: for originalFilePath, originalFileRelpath in fileList: originalFileName = os.path.basename(originalFileRelpath) progress.update(increment=1, message='Extracting "%s"' % originalFileName) with open(originalFilePath, 'rb') as originalFileStream: Image().createImage( imageDataStream=originalFileStream, imageDataSize=os.path.getsize(originalFilePath), originalName=originalFileName, parentFolder=prereviewFolder, creator=user)
def __create_thumbnail(self, item, w, h): w = int(w) file = self.__get_file(item, item['name']) with File().open(file) as f: image = Image.open(BytesIO(f.read())) # incase we are currently processing png images, which have RGBA. # we convert to RGB, cos we save the thumbnail into a .jpg which cannot handle A channel image = image.convert("RGB") if not h: width, height = image.size h = (height / width) * w h = int(h) image.thumbnail((w, h)) buf = PILBytesIO() image.save(buf, "jpeg", quality=100) thumbnailFile = File().createFile( size=0, item=item, name="thumbnail_{}x{}.jpg".format(w, h), creator=self.user, assetstore=Assetstore().getCurrent(), mimeType="application/jpeg") writeBytes(self.user, thumbnailFile, buf.getvalue()) thumbnailFile = self.__get_file(item, "thumbnail_{}x{}.jpg".format(w, h)) return thumbnailFile
def validateSettings(event, plugin_name=None): """ Validate plugin-specific settings and prevent disabling this plugin if there are any files in database assetstores. :param plugin_name: the name of our plugin. :param event: the validation event """ key, val = event.info['key'], event.info['value'] # If we are validating the list of enabled plugins, and there are any # database assetstores with files, do not allow the plugin to be disabled. if (key == SettingKey.PLUGINS_ENABLED and plugin_name and plugin_name not in val): store = next(( store for store in Assetstore().list() if store['type'] == AssetstoreType.DATABASE and store['hasFiles']), None) if store: val.append(plugin_name) log.info( 'Won\'t disable %s because there are files in the %s assetstore' % (plugin_name, store['name'])) if (key == SettingKey.PLUGINS_ENABLED and plugin_name): if plugin_name not in val: _removeUserAssetstore() else: _createUserAssetstore()
def createThumbnail(currentUser, item): thumbnailFile = File().createFile(size=0, item=item, name=item['name'] + ".tmb.jpg", creator=currentUser, assetstore=Assetstore().getCurrent(), mimeType="application/jpeg")
def setUp(self): base.TestCase.setUp(self) # Create a set of users so we can have some folders. self.users = [ User().createUser('usr%s' % num, 'passwd', 'tst', 'usr', '*****@*****.**' % num) for num in [0, 1] ] folders = Folder().childFolders(self.users[0], 'user', user=self.users[0]) for folder in folders: if folder['name'] == 'Public': self.publicFolder = folder else: self.privateFolder = folder self.assetstore = Assetstore().getCurrent() root = self.assetstore['root'] # Clean out the test assetstore on disk shutil.rmtree(root) # First clean out the temp directory tmpdir = os.path.join(root, 'temp') if os.path.isdir(tmpdir): for tempname in os.listdir(tmpdir): os.remove(os.path.join(tmpdir, tempname))
def testGetAssetstoreFiles(self): resp = self.request(path='/assetstore', method='GET', user=self.admin) self.assertStatusOk(resp) self.assertEqual(1, len(resp.json)) assetstore = Assetstore().load(resp.json[0]['_id']) # Simulate the existence of a file within the assetstore folders = Folder().childFolders(self.admin, 'user', user=self.admin) item = Item().createItem(name='x.txt', creator=self.admin, folder=six.next(folders)) file = File().createFile(creator=self.admin, item=item, name='x.txt', size=1, assetstore=assetstore, mimeType='text/plain') file['sha512'] = 'x' # add this dummy value to simulate real file # Make sure we see the file resp = self.request(path='/assetstore/%s/files' % assetstore['_id'], method='GET', user=self.admin) self.assertStatus(resp, 200) self.assertEqual(len(resp.json), 1) self.assertEqual(resp.json[0]['name'], 'x.txt') # Remove the file and make sure we no longer see it. File().remove(file) resp = self.request(path='/assetstore/%s/files' % assetstore['_id'], method='GET', user=self.admin) self.assertStatus(resp, 200) self.assertEqual(len(resp.json), 0)
def load(self, info): Assetstore().exposeFields(AccessType.READ, AssetstoreRuleMarker) ModelImporter.registerModel( GCSNotificationRecord().name, GCSNotificationRecord, plugin='dive_server', ) info["apiRoot"].bucket_notifications = BucketNotification()
def getTablesUser(self, params): store = Assetstore().load(DB_ASSETSTORE_ID) error = checkUserImport(self.getCurrentUser(), params['uri']) if error: raise RestException(error) return getTableList(store, params['uri'], internalTables=params.get('internal'))
def __load_slides(file): printOk2(file) assetstore = Assetstore().load(file['assetstoreId']) slides, associated_images, slide_properties, slide_mpp = \ load_slide(slidefile=os.path.join(assetstore['root'], file['path']), tile_size=126) return slides
def _getAndCacheImage(self, item, imageFunc, checkAndCreate, keydict, **kwargs): if 'fill' in keydict and (keydict['fill']).lower() == 'none': del keydict['fill'] keydict = {k: v for k, v in six.viewitems(keydict) if v is not None} key = json.dumps(keydict, sort_keys=True, separators=(',', ':')) existing = File().findOne({ 'attachedToType': 'item', 'attachedToId': item['_id'], 'isLargeImageThumbnail': True, 'thumbnailKey': key }) if existing: if checkAndCreate: return True if kwargs.get('contentDisposition') != 'attachment': contentDisposition = 'inline' else: contentDisposition = kwargs['contentDisposition'] return File().download(existing, contentDisposition=contentDisposition) tileSource = self._loadTileSource(item, **kwargs) result = getattr(tileSource, imageFunc)(**kwargs) if result is None: thumbData, thumbMime = b'', 'application/octet-stream' else: thumbData, thumbMime = result # The logic on which files to save could be more sophisticated. maxThumbnailFiles = int(Setting().get( constants.PluginSettings.LARGE_IMAGE_MAX_THUMBNAIL_FILES)) saveFile = maxThumbnailFiles > 0 if saveFile: # Make sure we don't exceed the desired number of thumbnails self.removeThumbnailFiles(item, maxThumbnailFiles - 1) # Save the thumbnail as a file thumbfile = Upload().uploadFromFile(six.BytesIO(thumbData), size=len(thumbData), name='_largeImageThumbnail', parentType='item', parent=item, user=None, mimeType=thumbMime, attachParent=True) if not len(thumbData) and 'received' in thumbfile: thumbfile = Upload().finalizeUpload( thumbfile, Assetstore().load(thumbfile['assetstoreId'])) thumbfile.update({ 'isLargeImageThumbnail': True, 'thumbnailKey': key, }) # Ideally, we would check that the file is still wanted before we # save it. This is probably impossible without true transactions in # Mongo. File().save(thumbfile) # Return the data return thumbData, thumbMime
def set_notification_routing(self, assetstore, data: dict): """ Configure routing rules for notifications received from GCP on buckets aready mapped as assetstores """ assetstore[AssetstoreRuleMarker] = NotificationRouterRule( **data).dict() Assetstore().save(assetstore) return assetstore
def __enter__(self): assetstore = Assetstore().getCurrent() assetstoreAdapter = assetstore_utilities.getAssetstoreAdapter( assetstore) try: self.tempDir = tempfile.mkdtemp(dir=assetstoreAdapter.tempDir) except (AttributeError, OSError): self.tempDir = tempfile.mkdtemp() return self.tempDir
def __init__(self): super(Assetstore, self).__init__() self.resourceName = 'assetstore' self._model = AssetstoreModel() self.route('GET', (), self.find) self.route('GET', (':id',), self.getAssetstore) self.route('POST', (), self.createAssetstore) self.route('POST', (':id', 'import'), self.importData) self.route('PUT', (':id',), self.updateAssetstore) self.route('DELETE', (':id',), self.deleteAssetstore) self.route('GET', (':id', 'files'), self.getAssetstoreFiles)
def __init__(self): super().__init__() self.resourceName = 'label' self.coll_m = Collection() self.file_m = File() self.folder_m = Folder() self.item_m = Item() self.upload_m = Upload() self.asset_m = Assetstore() self.setupRoutes()
class Assetstore(Resource): """ API Endpoint for managing assetstores. Requires admin privileges. """ def __init__(self): super(Assetstore, self).__init__() self.resourceName = 'assetstore' self._model = AssetstoreModel() self.route('GET', (), self.find) self.route('GET', (':id',), self.getAssetstore) self.route('POST', (), self.createAssetstore) self.route('POST', (':id', 'import'), self.importData) self.route('PUT', (':id',), self.updateAssetstore) self.route('DELETE', (':id',), self.deleteAssetstore) self.route('GET', (':id', 'files'), self.getAssetstoreFiles) @access.admin @autoDescribeRoute( Description('Get information about an assetstore.') .modelParam('id', model=AssetstoreModel) .errorResponse() .errorResponse('You are not an administrator.', 403) ) def getAssetstore(self, assetstore): self._model.addComputedInfo(assetstore) return assetstore @access.admin @autoDescribeRoute( Description('List assetstores.') .pagingParams(defaultSort='name') .errorResponse() .errorResponse('You are not an administrator.', 403) ) def find(self, limit, offset, sort): return list(self._model.list(offset=offset, limit=limit, sort=sort)) @access.admin @autoDescribeRoute( Description('Create a new assetstore.') .responseClass('Assetstore') .notes('You must be an administrator to call this.') .param('name', 'Unique name for the assetstore.') .param('type', 'Type of the assetstore.', dataType='integer') .param('root', 'Root path on disk (for filesystem type).', required=False) .param('perms', 'File creation permissions (for filesystem type).', required=False) .param('db', 'Database name (for GridFS type)', required=False) .param('mongohost', 'Mongo host URI (for GridFS type)', required=False) .param('replicaset', 'Replica set name (for GridFS type)', required=False) .param('shard', 'Shard the collection (for GridFS type). Set to ' '"auto" to set up sharding.', required=False) .param('bucket', 'The S3 bucket to store data in (for S3 type).', required=False) .param('prefix', 'Optional path prefix within the bucket under which ' 'files will be stored (for S3 type).', required=False, default='') .param('accessKeyId', 'The AWS access key ID to use for authentication ' '(for S3 type).', required=False) .param('secret', 'The AWS secret key to use for authentication (for ' 'S3 type).', required=False) .param('service', 'The S3 service host (for S3 type). Default is ' 's3.amazonaws.com. This can be used to specify a protocol and ' 'port as well using the form [http[s]://](host domain)[:(port)]. ' 'Do not include the bucket name here.', required=False, default='') .param('readOnly', 'If this assetstore is read-only, set this to true.', required=False, dataType='boolean', default=False) .param('region', 'The AWS region to which the S3 bucket belongs.', required=False, default=DEFAULT_REGION) .param('inferCredentials', 'The credentials for connecting to S3 will be inferred ' 'by Boto rather than explicitly passed. Inferring credentials will ' 'ignore accessKeyId and secret.', dataType='boolean', required=False) .errorResponse() .errorResponse('You are not an administrator.', 403) ) def createAssetstore(self, name, type, root, perms, db, mongohost, replicaset, shard, bucket, prefix, accessKeyId, secret, service, readOnly, region, inferCredentials): if type == AssetstoreType.FILESYSTEM: self.requireParams({'root': root}) return self._model.createFilesystemAssetstore( name=name, root=root, perms=perms) elif type == AssetstoreType.GRIDFS: self.requireParams({'db': db}) return self._model.createGridFsAssetstore( name=name, db=db, mongohost=mongohost, replicaset=replicaset, shard=shard) elif type == AssetstoreType.S3: self.requireParams({'bucket': bucket}) return self._model.createS3Assetstore( name=name, bucket=bucket, prefix=prefix, secret=secret, accessKeyId=accessKeyId, service=service, readOnly=readOnly, region=region, inferCredentials=inferCredentials) else: raise RestException('Invalid type parameter') @access.admin(scope=TokenScope.DATA_WRITE) @autoDescribeRoute( Description('Import existing data into an assetstore.') .notes('This does not move or copy the existing data, it just creates ' 'references to it in the Girder data hierarchy. Deleting ' 'those references will not delete the underlying data. This ' 'operation is currently only supported for S3 assetstores.') .modelParam('id', model=AssetstoreModel) .param('importPath', 'Root path within the underlying storage system ' 'to import.', required=False) .param('destinationId', 'ID of a folder, collection, or user in Girder ' 'under which the data will be imported.') .param('destinationType', 'Type of the destination resource.', enum=('folder', 'collection', 'user')) .param('progress', 'Whether to record progress on the import.', dataType='boolean', default=False, required=False) .param('leafFoldersAsItems', 'Whether folders containing only files should be ' 'imported as items.', dataType='boolean', required=False, default=False) .param('fileIncludeRegex', 'If set, only filenames matching this regular ' 'expression will be imported.', required=False) .param('fileExcludeRegex', 'If set, only filenames that do not match this regular ' 'expression will be imported. If a file matches both the include and exclude regex, ' 'it will be excluded.', required=False) .errorResponse() .errorResponse('You are not an administrator.', 403) ) def importData(self, assetstore, importPath, destinationId, destinationType, progress, leafFoldersAsItems, fileIncludeRegex, fileExcludeRegex): user = self.getCurrentUser() parent = self.model(destinationType).load( destinationId, user=user, level=AccessType.ADMIN, exc=True) with ProgressContext(progress, user=user, title='Importing data') as ctx: return self._model.importData( assetstore, parent=parent, parentType=destinationType, params={ 'fileIncludeRegex': fileIncludeRegex, 'fileExcludeRegex': fileExcludeRegex, 'importPath': importPath, }, progress=ctx, user=user, leafFoldersAsItems=leafFoldersAsItems) @access.admin @autoDescribeRoute( Description('Update an existing assetstore.') .responseClass('Assetstore') .modelParam('id', model=AssetstoreModel) .param('name', 'Unique name for the assetstore.', strip=True) .param('root', 'Root path on disk (for Filesystem type)', required=False) .param('perms', 'File creation permissions (for Filesystem type)', required=False) .param('db', 'Database name (for GridFS type)', required=False) .param('mongohost', 'Mongo host URI (for GridFS type)', required=False) .param('replicaset', 'Replica set name (for GridFS type)', required=False) .param('shard', 'Shard the collection (for GridFS type). Set to ' '"auto" to set up sharding.', required=False) .param('bucket', 'The S3 bucket to store data in (for S3 type).', required=False) .param('prefix', 'Optional path prefix within the bucket under which ' 'files will be stored (for S3 type).', required=False, default='') .param('accessKeyId', 'The AWS access key ID to use for authentication ' '(for S3 type).', required=False) .param('secret', 'The AWS secret key to use for authentication (for ' 'S3 type).', required=False) .param('service', 'The S3 service host (for S3 type). Default is ' 's3.amazonaws.com. This can be used to specify a protocol and ' 'port as well using the form [http[s]://](host domain)[:(port)]. ' 'Do not include the bucket name here.', required=False, default='') .param('readOnly', 'If this assetstore is read-only, set this to true.', required=False, dataType='boolean') .param('region', 'The AWS region to which the S3 bucket belongs.', required=False, default=DEFAULT_REGION) .param('current', 'Whether this is the current assetstore', dataType='boolean') .param('inferCredentials', 'The credentials for connecting to S3 will be inferred ' 'by Boto rather than explicitly passed. Inferring credentials will ' 'ignore accessKeyId and secret.', dataType='boolean', required=False) .errorResponse() .errorResponse('You are not an administrator.', 403) ) def updateAssetstore(self, assetstore, name, root, perms, db, mongohost, replicaset, shard, bucket, prefix, accessKeyId, secret, service, readOnly, region, current, inferCredentials, params): assetstore['name'] = name assetstore['current'] = current if assetstore['type'] == AssetstoreType.FILESYSTEM: self.requireParams({'root': root}) assetstore['root'] = root if perms is not None: assetstore['perms'] = perms elif assetstore['type'] == AssetstoreType.GRIDFS: self.requireParams({'db': db}) assetstore['db'] = db if mongohost is not None: assetstore['mongohost'] = mongohost if replicaset is not None: assetstore['replicaset'] = replicaset if shard is not None: assetstore['shard'] = shard elif assetstore['type'] == AssetstoreType.S3: self.requireParams({ 'bucket': bucket }) assetstore['bucket'] = bucket assetstore['prefix'] = prefix assetstore['accessKeyId'] = accessKeyId assetstore['secret'] = secret assetstore['service'] = service assetstore['region'] = region assetstore['inferCredentials'] = inferCredentials if readOnly is not None: assetstore['readOnly'] = readOnly else: event = events.trigger('assetstore.update', info={ 'assetstore': assetstore, 'params': dict( name=name, current=current, readOnly=readOnly, root=root, perms=perms, db=db, mongohost=mongohost, replicaset=replicaset, shard=shard, bucket=bucket, prefix=prefix, accessKeyId=accessKeyId, secret=secret, service=service, region=region, **params ) }) if event.defaultPrevented: return return self._model.save(assetstore) @access.admin @autoDescribeRoute( Description('Delete an assetstore.') .notes('This will fail if there are any files in the assetstore.') .modelParam('id', model=AssetstoreModel) .errorResponse(('A parameter was invalid.', 'The assetstore is not empty.')) .errorResponse('You are not an administrator.', 403) ) def deleteAssetstore(self, assetstore): self._model.remove(assetstore) return {'message': 'Deleted assetstore %s.' % assetstore['name']} @access.admin @autoDescribeRoute( Description('Get a list of files controlled by an assetstore.') .modelParam('id', model=AssetstoreModel) .pagingParams(defaultSort='_id') .errorResponse() .errorResponse('You are not an administrator.', 403) ) def getAssetstoreFiles(self, assetstore, limit, offset, sort): return list(File().find( query={'assetstoreId': assetstore['_id']}, offset=offset, limit=limit, sort=sort))
class LabelResource(Resource): def __init__(self): super().__init__() self.resourceName = 'label' self.coll_m = Collection() self.file_m = File() self.folder_m = Folder() self.item_m = Item() self.upload_m = Upload() self.asset_m = Assetstore() self.setupRoutes() def setupRoutes(self): self.route('GET', (), handler=self.getLabelList) self.route('GET', (':label_id',), self.getLabel) self.route('GET', ('meta',), self.getLabelMeta) self.route('GET', ('create',), self.createLabelFile) self.route('GET', ('by_name',), self.getLabelByName) self.route('POST', (), self.postLabel) def createNewFile(self, folder, file_name): item = self.item_m.createItem(file_name, creator=self.getCurrentUser(), folder=folder, description='label file', reuseExisting=False) file = self.file_m.createFile(size=0, item=item, name=file_name, creator=self.getCurrentUser(), assetstore=self.asset_m.getCurrent(), mimeType="application/json") return file def copy(self, srcFile, destFile): upload = self.upload_m.createUploadToFile(destFile, self.getCurrentUser(), srcFile['size']) self.upload_m.handleChunk(upload=upload, chunk=RequestBodyStream(self.file_m.open(srcFile), size=destFile['size']), user=self.getCurrentUser()) return upload @access.public @autoDescribeRoute( Description('Get label list')) @rest.rawResponse def getLabelList(self): printOk('getLabelsList() was called!') try: collection = list(self.coll_m.list(user=self.getCurrentUser(), offset=0, limit=1))[0] files = self.coll_m.fileList(collection, user=self.getCurrentUser(), data=False, includeMetadata=True, mimeFilter=['application/json']) files = list(files) cherrypy.response.headers["Content-Type"] = "application/json" return dumps(files) except: printFail(traceback.print_exc) @staticmethod def getOwnerId(folder): aclList = Folder().getFullAccessList(folder) for acl in aclList['users']: if acl['level'] == AccessType.ADMIN: return str(acl['id']) return None def getConfigFolder(self, label_folder_id): label_folder = Folder().load(label_folder_id, user=self.getCurrentUser(), level=AccessType.READ) ownerId = self.getOwnerId(label_folder) config_folder = self.folder_m.load(label_folder['meta'][ownerId], level=AccessType.READ, user=self.getCurrentUser()) return config_folder def findConfig(self, folder_id): folder = self.getConfigFolder(folder_id) printOk2("Config folder {}".format(folder)) files = self.folder_m.fileList(folder, self.getCurrentUser(), data=False) for file_path, file in files: printOk(file) if file['name'] == "config.json": return file def __findFile(self, folder, file_name): item = list(self.item_m.find({'folderId': folder['_id'], 'name': file_name}).limit(1)) if not item: return None item = item[0] file = list(self.file_m.find({'itemId': item['_id']}).limit(1)) if not file: return None return file[0] @access.public @autoDescribeRoute( Description('Create a new label file if it doesnt exist') .param('file_name', 'label file name').param('folder_id', 'the parent folder id')) @rest.rawResponse def createLabelFile(self, file_name, folder_id): try: folder = self.folder_m.load(folder_id, user=self.getCurrentUser(), level=AccessType.WRITE) file = self.__findFile(folder, file_name) if not file: file = self.createNewFile(folder, file_name) config_file = self.findConfig(folder_id) if not config_file: printFail("No config file found") return errorMessage("No config file found") else: res = self.copy(config_file, file) return dumps({ "label_id": res['fileId'] }) return dumps({ "label_id": file['_id'] }) except: printFail(traceback.print_exc) cherrypy.response.status = 500 @access.public @autoDescribeRoute( Description('Get labels by file_name') .param('file_name', 'label file name').param('folder_id', 'the parent folder id')) @rest.rawResponse def getLabelByName(self, file_name, folder_id): try: folder = self.folder_m.load(folder_id, user=self.getCurrentUser(), level=AccessType.READ) file = self.__findFile(folder, file_name) cherrypy.response.headers["Content-Type"] = "application/json" if file: return self.file_m.download(file) else: return dumps({}) except: printFail(traceback.print_exc) cherrypy.response.status = 500 @access.public @autoDescribeRoute( Description('Get label by id') .param('label_id', 'label file id')) @rest.rawResponse def getLabel(self, label_id): try: file = self.file_m.load(label_id, level=AccessType.READ, user=self.getCurrentUser()) printOk2(file) cherrypy.response.headers["Content-Type"] = "application/json" return self.file_m.download(file) except: # Unknown slug printFail(traceback.print_exc) cherrypy.response.status = 404 @access.public @autoDescribeRoute( Description('Get label by id') .param('label_id', 'label file id')) def getLabelMeta(self, label_id): try: file = self.file_m.load(label_id, level=AccessType.READ, user=self.getCurrentUser()) cherrypy.response.headers["Content-Type"] = "application/json" return dumps(file) except: # Unknown slug printFail(traceback.print_exc) cherrypy.response.status = 404 @access.public @autoDescribeRoute( Description('Post label by id') .param('label_id', 'label file id')) @rest.rawResponse def postLabel(self, label_id, params): try: file = self.file_m.load(label_id, level=AccessType.WRITE, user=self.getCurrentUser()) cherrypy.response.headers["Content-Type"] = "application/json" params['labels'] = json.loads(params['labels']) data = json.dumps(params, indent=2, sort_keys=True) upload = writeData(self.getCurrentUser(), file, data) printOk2(file) printOk(upload) return dumps(upload) except: # Unknown slug printFail(traceback.print_exc) cherrypy.response.status = 404 @access.public @autoDescribeRoute( Description('Post label by id') .param('label_id', 'label file id')) @rest.rawResponse def strokeToOutline(self, strokes): pass