def load(info): ModelImporter.model('job', 'jobs').exposeFields( level=AccessType.ADMIN, fields='processedFiles') ModelImporter.model('job', 'jobs').exposeFields( level=AccessType.SITE_ADMIN, fields='processedFiles') Osumo._cp_config['tools.staticdir.dir'] = os.path.join( os.path.relpath(info['pluginRootDir'], info['config']['/']['tools.staticdir.root']), 'web-external') # Move girder app to /girder, serve sumo app from / info['apiRoot'].osumo = Osumo() ( info['serverRoot'], info['serverRoot'].girder ) = ( info['apiRoot'].osumo, info['serverRoot'] ) info['serverRoot'].api = info['serverRoot'].girder.api info['serverRoot'].girder.api events.bind('data.process', 'osumo', info['apiRoot'].osumo.dataProcess)
def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] in ['worker_handler', 'celery_handler']: # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED, JobStatus.SUCCESS, JobStatus.ERROR]: # Set the job status to canceling ModelImporter.model('job', 'jobs').updateJob(job, status=CustomJobStatus.CANCELING) # Send the revoke request. asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp()) asyncResult.revoke()
def propagateSizeChange(self, item, sizeIncrement, updateItemSize=True): """ Propagates a file size change (or file creation) to the necessary parents in the hierarchy. Internally, this records subtree size in the item, the parent folder, and the root node under which the item lives. Should be called anytime a new file is added, a file is deleted, or a file size changes. :param item: The parent item of the file. :type item: dict :param sizeIncrement: The change in size to propagate. :type sizeIncrement: int :param updateItemSize: Whether the item size should be updated. Set to False if you plan to delete the item immediately and don't care to update its size. """ from .folder import Folder from .item import Item if updateItemSize: # Propagate size up to item Item().increment(query={ '_id': item['_id'] }, field='size', amount=sizeIncrement, multi=False) # Propagate size to direct parent folder Folder().increment(query={ '_id': item['folderId'] }, field='size', amount=sizeIncrement, multi=False) # Propagate size up to root data node ModelImporter.model(item['baseParentType']).increment(query={ '_id': item['baseParentId'] }, field='size', amount=sizeIncrement, multi=False)
def _getLargeImagePath(self): try: largeImageFileId = self.item['largeImage']['fileId'] # Access control checking should already have been done on # item, so don't repeat. # TODO: is it possible that the file is on a different item, so # do we want to repeat the access check? largeImageFile = ModelImporter.model('file').load( largeImageFileId, force=True) # TODO: can we move some of this logic into Girder core? assetstore = ModelImporter.model('assetstore').load( largeImageFile['assetstoreId']) adapter = assetstore_utilities.getAssetstoreAdapter(assetstore) if not isinstance( adapter, assetstore_utilities.FilesystemAssetstoreAdapter): raise TileSourceAssetstoreException( 'Non-filesystem assetstores are not supported') largeImagePath = adapter.fullPath(largeImageFile) return largeImagePath except TileSourceAssetstoreException: raise except (KeyError, ValidationException, TileSourceException) as e: raise TileSourceException( 'No large image file in this item: %s' % e.message)
def process_annotations(event): """Add annotations to an image on a ``data.process`` event""" info = event.info if 'anot' in info.get('file', {}).get('exts', []): reference = info.get('reference', None) try: reference = json.loads(reference) except (ValueError, TypeError): print(TerminalColor.error( 'Warning: Could not get reference from the annotation param. ' 'Make sure you have at ctk-cli>=1.3.1 installed.' )) return if 'userId' not in reference or 'itemId' not in reference: print(TerminalColor.error( 'Annotation reference does not contain required information.' )) return userId = reference['userId'] imageId = reference['itemId'] # load model classes Item = ModelImporter.model('item') File = ModelImporter.model('file') User = ModelImporter.model('user') Annotation = ModelImporter.model('annotation', plugin='large_image') # load models from the database user = User.load(userId, force=True) image = File.load(imageId, level=AccessType.READ, user=user) item = Item.load(image['itemId'], level=AccessType.WRITE, user=user) file = File.load( info.get('file', {}).get('_id'), level=AccessType.READ, user=user ) if not (item and user and file): print(TerminalColor.error( 'Could not load models from the database' )) return try: data = json.loads( ''.join(File.download(file)()) ) except Exception: print(TerminalColor.error( 'Could not parse annotation file' )) return Annotation.createAnnotation( item, user, data )
def _onUpload(event): """ Look at uploads containing references related to this plugin. If found, they are used to link item task outputs back to a job document. """ try: ref = json.loads(event.info.get('reference')) except (ValueError, TypeError): return if isinstance(ref, dict) and ref.get('type') == 'item_tasks.output': jobModel = ModelImporter.model('job', 'jobs') tokenModel = ModelImporter.model('token') token = event.info['currentToken'] if tokenModel.hasScope(token, 'item_tasks.job_write:%s' % ref['jobId']): job = jobModel.load(ref['jobId'], force=True, exc=True) else: job = jobModel.load( ref['jobId'], level=AccessType.WRITE, user=event.info['currentUser'], exc=True) file = event.info['file'] item = ModelImporter.model('item').load(file['itemId'], force=True) # Add link to job model to the output item jobModel.updateJob(job, otherFields={ 'itemTaskBindings.outputs.%s.itemId' % ref['id']: item['_id'] }) # Also a link in the item to the job that created it item['createdByJob'] = job['_id'] ModelImporter.model('item').save(item)
def schedule(event): """ This is bound to the "jobs.schedule" event, and will be triggered any time a job is scheduled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] == 'worker_handler': task = job.get('celeryTaskName', 'girder_worker.run') # Set the job status to queued ModelImporter.model('job', 'jobs').updateJob(job, status=JobStatus.QUEUED) # Send the task to celery asyncResult = getCeleryApp().send_task( task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={ 'jobInfoSpec': jobInfoSpec(job, job.get('token', None)), 'apiUrl': getWorkerApiUrl() }) # Record the task ID from celery. ModelImporter.model('job', 'jobs').updateJob(job, otherFields={ 'celeryTaskId': asyncResult.task_id }) # Stop event propagation since we have taken care of scheduling. event.stopPropagation()
def updateItemLicense(event): """ REST event handler to update item with license parameter, if provided. """ params = event.info['params'] if 'license' not in params: return itemModel = ModelImporter.model('item') item = itemModel.load(event.info['returnVal']['_id'], force=True, exc=True) newLicense = validateString(params['license']) if item['license'] == newLicense: return # Ensure that new license name is in configured list of licenses. # # Enforcing this here, instead of when validating the item, avoids an extra # database lookup (for the settings) on every future item save. if newLicense: licenseSetting = ModelImporter.model('setting').get( PluginSettings.LICENSES) validLicense = any( license['name'] == newLicense for group in licenseSetting for license in group['licenses']) if not validLicense: raise ValidationException( 'License name must be in configured list of licenses.', 'license') item['license'] = newLicense item = itemModel.save(item) event.preventDefault() event.addResponse(item)
def load(info): info["apiRoot"].user.route("GET", (":id", "gravatar"), getGravatar) ModelImporter.model("user").exposeFields(level=AccessType.READ, fields="gravatar_baseUrl") events.bind("model.setting.validate", "gravatar", _validateSettings) events.bind("rest.put.user/:id.before", "gravatar", _userUpdate)
def load(info): # Bind REST events events.bind('model.file.download.request', 'download_statistics', _onDownloadFileRequest) events.bind('model.file.download.complete', 'download_statistics', _onDownloadFileComplete) # Add download count fields to file model ModelImporter.model('file').exposeFields(level=AccessType.READ, fields='downloadStatistics')
def load(info): events.bind('jobs.schedule', 'worker', schedule) events.bind('jobs.status.validate', 'worker', validateJobStatus) events.bind('model.setting.validate', 'worker', validateSettings) ModelImporter.model('job', 'jobs').exposeFields( AccessType.SITE_ADMIN, {'celeryTaskId'})
def load(info): info['apiRoot'].user.route('GET', (':id', 'gravatar'), getGravatar) ModelImporter.model('user').exposeFields( level=AccessType.READ, fields='gravatar_baseUrl') events.bind('model.user.save', 'gravatar', _userUpdate)
def parentsToRoot(self, folder, curPath=None, user=None, force=False, level=AccessType.READ): """ Get the path to traverse to a root of the hierarchy. :param folder: The folder whose root to find :type folder: dict :returns: an ordered list of dictionaries from root to the current folder """ curPath = curPath or [] curParentId = folder['parentId'] curParentType = folder['parentCollection'] if curParentType in ('user', 'collection'): curParentObject = ModelImporter.model(curParentType).load( curParentId, user=user, level=level, force=force) if force: parentFiltered = curParentObject else: parentFiltered = ModelImporter.model(curParentType).filter(curParentObject, user) return [{ 'type': curParentType, 'object': parentFiltered }] + curPath else: curParentObject = self.load(curParentId, user=user, level=level, force=force) curPath = [{ 'type': curParentType, 'object': curParentObject if force else self.filter(curParentObject, user) }] + curPath return self.parentsToRoot(curParentObject, curPath, user=user, force=force)
def nearestNeighbors(self, item, limit, params): limit = int(limit) desc_index = self.descriptorIndexFromItem(item) nn_index = self.nearestNeighborIndex(item, getCurrentUser(), desc_index) if nn_index is None: raise RestException('Nearest neighbor index could not be found.') try: smqtk_uuid = item['meta']['smqtk_uuid'] descriptor = desc_index.get_descriptor(smqtk_uuid) except KeyError: raise RestException('Unable to retrieve image descriptor for querying object.') neighbors, dists = nn_index.nn(descriptor, limit) uuid_dist = dict(zip([x.uuid() for x in neighbors], dists)) smqtkFolder = ModelImporter.model('folder').load(item['folderId'], user=getCurrentUser()) items = list(ModelImporter.model('folder').childItems(smqtkFolder, filters={'meta.smqtk_uuid': { '$in': [x.uuid() for x in neighbors] }})) for item in items: item['smqtk_distance'] = uuid_dist[item['meta']['smqtk_uuid']] return items
def isOrphan(self, file): """ Returns True if this file is orphaned (its item or attached entity is missing). :param file: The file to check. :type file: dict """ if file.get('attachedToId'): attachedToType = file.get('attachedToType') if isinstance(attachedToType, six.string_types): modelType = ModelImporter.model(attachedToType) elif isinstance(attachedToType, list) and len(attachedToType) == 2: modelType = ModelImporter.model(*attachedToType) else: # Invalid 'attachedToType' return True if isinstance(modelType, (acl_mixin.AccessControlMixin, AccessControlledModel)): attachedDoc = modelType.load( file.get('attachedToId'), force=True) else: attachedDoc = modelType.load( file.get('attachedToId')) else: from .item import Item attachedDoc = Item().load(file.get('itemId'), force=True) return not attachedDoc
def validateSettings(event): if event.info['key'] == PluginSettings.SCORING_USER_ID: if not event.info['value']: raise ValidationException( 'Scoring user ID must not be empty.', 'value') ModelImporter.model('user').load( event.info['value'], force=True, exc=True) event.preventDefault().stopPropagation()
def load(info): info['apiRoot'].user.route('GET', (':id', 'gravatar'), getGravatar) ModelImporter.model('user').exposeFields( level=AccessType.READ, fields='gravatar_baseUrl') events.bind('model.setting.validate', 'gravatar', validateSettings) events.bind('rest.put.user/:id.before', 'gravatar', userUpdate)
def validate_settings(event): """Validate minerva specific settings.""" key = event.info['key'] val = event.info['value'] if key == 'minerva.geonames_folder': ModelImporter.model('folder').load(val, exc=True, force=True) event.preventDefault().stopPropagation()
def load(info): info['apiRoot'].thumbnail = rest.Thumbnail() for model in ('item', 'collection', 'folder', 'user'): ModelImporter.model(model).exposeFields(level=AccessType.READ, fields='_thumbnails') events.bind('model.%s.remove' % model, info['name'], removeThumbnails) events.bind('model.file.remove', info['name'], removeThumbnailLink) events.bind('data.process', info['name'], _onUpload)
def load(info): events.bind('jobs.schedule', 'worker', schedule) events.bind('jobs.status.validate', 'worker', validateJobStatus) events.bind('jobs.status.validTransitions', 'worker', validTransitions) events.bind('jobs.cancel', 'worker', cancel) events.bind('model.job.save.after', 'worker', attachJobInfoSpec) events.bind('model.job.save', 'worker', attachParentJob) ModelImporter.model('job', 'jobs').exposeFields( AccessType.SITE_ADMIN, {'celeryTaskId', 'celeryQueue'})
def _onDownloadFileRequest(event): if event.info['startByte'] == 0: ModelImporter.model('file').increment( query={'_id': event.info['file']['_id']}, field='downloadStatistics.started', amount=1) ModelImporter.model('file').increment( query={'_id': event.info['file']['_id']}, field='downloadStatistics.requested', amount=1)
def _userUpdate(event): """ Called when the user document is being changed. If the email field changes, we wipe the cached gravatar URL so it will be recomputed on next request. """ if "email" in event.info["params"]: user = ModelImporter.model("user").load(event.info["id"], force=True) if user["email"] != event.info["params"]["email"] and user.get("gravatar_baseUrl"): del user["gravatar_baseUrl"] ModelImporter.model("user").save(user)
def _onSettingRemove(self, event): settingDoc = event.info if settingDoc['key'] == SettingKey.CONTACT_EMAIL_ADDRESS: self.updateHtmlVars({'contactEmail': ModelImporter.model('setting').getDefault( SettingKey.CONTACT_EMAIL_ADDRESS)}) elif settingDoc['key'] == SettingKey.BRAND_NAME: self.updateHtmlVars({'brandName': ModelImporter.model('setting').getDefault( SettingKey.BRAND_NAME)}) elif settingDoc['key'] == SettingKey.BANNER_COLOR: self.updateHtmlVars({'bannerColor': settingDoc['value']})
def onUserCreated(event): user = event.info # make all users private # TODO: make users visible to the "study creators" group user['public'] = False ModelImporter.model('user').save(user) if ModelImporter.model('setting').get(constants.PluginSettings.DEMO_MODE): addUserToAllUDAGroups(user)
def userUpdate(event): """ Called when the user document is being changed. If the email field changes, we wipe the cached gravatar URL so it will be recomputed on next request. """ if 'email' in event.info['params']: user = ModelImporter.model('user').load(event.info['id'], force=True) if (user['email'] != event.info['params']['email'] and user.get('gravatar_baseUrl')): del user['gravatar_baseUrl'] ModelImporter.model('user').save(user)
def createSession(self, params): sessionsFolder = getCreateSessionsFolder() sessionId = requests.post(self.search_url + '/session').json()['sid'] item = ModelImporter.model('item').createItem(name=sessionId, creator=getCurrentUser(), folder=sessionsFolder) ModelImporter.model('item').setMetadata(item, { 'sid': sessionId }) return item
def handleZip(images_folder, user, zip_file): Image = ModelImporter.model('image', 'isic_archive') # Get full path of zip file in assetstore assetstore = ModelImporter.model('assetstore').getCurrent() assetstore_adapter = assetstore_utilities.getAssetstoreAdapter(assetstore) full_path = assetstore_adapter.fullPath(zip_file) with ZipFileOpener(full_path) as (file_list, file_count): with ProgressContext( on=True, user=user, title='Processing "%s"' % zip_file['name'], total=file_count, state=ProgressState.ACTIVE, current=0) as progress: for original_file_path, original_file_relpath in file_list: original_file_name = os.path.basename(original_file_relpath) progress.update( increment=1, message='Extracting "%s"' % original_file_name) image_item = Image.createImage( creator=user, parentFolder=images_folder ) Image.setMetadata(image_item, { 'originalFilename': os.path.splitext(original_file_name)[0] }) # upload original image image_mimetype = mimetypes.guess_type(original_file_name)[0] with open(original_file_path, 'rb') as original_file_obj: ModelImporter.model('upload').uploadFromFile( obj=original_file_obj, size=os.path.getsize(original_file_path), name='%s%s' % ( image_item['name'], os.path.splitext(original_file_name)[1].lower() ), parentType='item', parent=image_item, user=user, mimeType=image_mimetype, ) # reload image_item, since its 'size' has changed in the database image_item = Image.load(image_item['_id'], force=True) image_data = Image.imageData(image_item) image_item['meta']['acquisition']['pixelsY'] = image_data.shape[0] image_item['meta']['acquisition']['pixelsX'] = image_data.shape[1] Image.save(image_item)
def load(info): geospatialItem = GeospatialItem() info['apiRoot'].item.route('POST', ('geospatial',), geospatialItem.create) info['apiRoot'].item.route('GET', ('geospatial',), geospatialItem.find) info['apiRoot'].item.route('GET', ('geospatial', 'intersects'), geospatialItem.intersects) info['apiRoot'].item.route('GET', ('geospatial', 'near'), geospatialItem.near) info['apiRoot'].item.route('GET', ('geospatial', 'within'), geospatialItem.within) info['apiRoot'].item.route('GET', (':id', 'geospatial'), geospatialItem.getGeospatial) info['apiRoot'].item.route('PUT', (':id', 'geospatial'), geospatialItem.setGeospatial) ModelImporter.model('item').exposeFields(level=AccessType.READ, fields={GEOSPATIAL_FIELD})
def findAnalysisByName(currentUser, name): analysisFolder = findAnalysisFolder(currentUser) filters = {} filters["$text"] = {"$search": name} analyses = [ ModelImporter.model("item").filter(item, currentUser) for item in ModelImporter.model("folder").childItems(folder=analysisFolder, filters=filters) ] if len(analyses) > 0: return analyses[0] else: return None
def jobMM(job, minerva_metadata=None, save=True): if minerva_metadata is None: if 'meta' not in job or 'minerva' not in job['meta']: return {} return job['meta']['minerva'] else: if 'meta' not in job: job['meta'] = {} job['meta']['minerva'] = minerva_metadata if save: ModelImporter.model('job', 'jobs').save(job) return job['meta']['minerva']
def terminate(self, id, params): (user, token) = self.getCurrentUser(returnToken=True) job = self._model.load(id, user=user, level=AccessType.ADMIN) if not job: raise RestException('Job not found.', code=404) cluster_model = ModelImporter.model('cluster', 'cumulus') cluster = cluster_model.load(job['clusterId'], user=user, level=AccessType.ADMIN) base_url = cumulus.config.girder.baseUrl self._model.update_status(user, id, JobState.TERMINATING) log_url = '%s/jobs/%s/log' % (base_url, id) # Clean up job job = self._clean(job) girder_token = self.get_task_token()['_id'] tasks.job.terminate_job.delay(cluster, job, log_write_url=log_url, girder_token=girder_token) return job
def _setCommonCORSHeaders(): """ Set CORS headers that should be passed back with either a preflight OPTIONS or a simple CORS request. We set these headers anytime there is an Origin header present since browsers will simply ignore them if the request is not cross-origin. """ origin = cherrypy.request.headers.get('origin') if not origin: # If there is no origin header, this is not a cross origin request return allowed = ModelImporter.model('setting').get(SettingKey.CORS_ALLOW_ORIGIN) if allowed: setResponseHeader('Access-Control-Allow-Credentials', 'true') allowed_list = [o.strip() for o in allowed.split(',')] key = 'Access-Control-Allow-Origin' if len(allowed_list) == 1: setResponseHeader(key, allowed_list[0]) elif origin in allowed_list: setResponseHeader(key, origin)
def jobInfoSpec(job, token=None, logPrint=True): """ Build the jobInfo specification for a task to write status and log output back to a Girder job. :param job: The job document representing the worker task. :type job: dict :param token: The token to use. Creates a job token if not passed. :type token: str or dict :param logPrint: Whether standard output from the job should be """ if token is None: token = ModelImporter.model('job', 'jobs').createJobToken(job) if isinstance(token, dict): token = token['_id'] return { 'method': 'PUT', 'url': '/'.join((getWorkerApiUrl(), 'job', str(job['_id']))), 'reference': str(job['_id']), 'headers': {'Girder-Token': token}, 'logPrint': logPrint }
def _addOptionalOutputParamBindings(opt_output_params, bspec, hargs, user, token): for param in opt_output_params: if not _is_on_girder(param): continue # check if it was requested in the REST request if (param.identifier() + _girderOutputFolderSuffix not in hargs['params'] or param.identifier() + _girderOutputNameSuffix not in hargs['params']): continue curModel = ModelImporter.model('folder') curId = hargs['params'][param.identifier() + _girderOutputFolderSuffix] doc = curModel.load(id=curId, level=AccessType.WRITE, user=user) if doc: hargs[param.identifier()] = doc if param.identifier() in hargs: bspec[param.identifier()] = _createOutputParamBindingSpec( param, hargs, user, token)
def delete(self, taskflow, params): user = self.getCurrentUser() status = self._model.status(user, taskflow) if status == TaskFlowState.RUNNING: raise RestException('Taskflow is running', 400) constructor = load_class(taskflow['taskFlowClass']) token = ModelImporter.model('token').createToken(user=user, days=7) if taskflow['status'] != TaskFlowState.DELETING: taskflow['status'] = TaskFlowState.DELETING self._model.save(taskflow) workflow = constructor( id=str(taskflow['_id']), girder_token=token['_id'], girder_api_url=cumulus.config.girder.baseUrl) workflow.delete() # Check if we have any active tasks, it not then we are done and # can delete the tasks and taskflows taskflow = self._model.load(taskflow['_id'], user=user, level=AccessType.ADMIN) if taskflow['activeTaskCount'] == 0: self._model.delete(taskflow) cherrypy.response.status = 200 taskflow['status'] = TaskFlowState.DELETED return taskflow cherrypy.response.status = 202 return taskflow
def getCurrentUser(returnToken=False): """ Returns the currently authenticated user based on the token header or parameter. :param returnToken: Whether we should return a tuple that also contains the token. :type returnToken: bool :returns: the user document from the database, or None if the user is not logged in or the token is invalid or expired. If returnToken=True, returns a tuple of (user, token). """ event = events.trigger('auth.user.get') if event.defaultPrevented and len(event.responses) > 0: return event.responses[0] token = getCurrentToken() def retVal(user, token): if returnToken: return user, token else: return user if (token is None or token['expires'] < datetime.datetime.utcnow() or 'userId' not in token): return retVal(None, token) else: try: ensureTokenScopes(token, getattr( cherrypy.request, 'requiredScopes', TokenScope.USER_AUTH)) except AccessException: return retVal(None, token) user = ModelImporter.model('user').load(token['userId'], force=True) return retVal(user, token)
def ensureTokenScopes(token, scope): """ Call this to validate a token scope for endpoints that require tokens other than a user authentication token. Raises an AccessException if the required scopes are not allowed by the given token. :param token: The token object used in the request. :type token: dict :param scope: The required scope or set of scopes. :type scope: `str or list of str` """ tokenModel = ModelImporter.model('token') if tokenModel.hasScope(token, TokenScope.USER_AUTH): return if not tokenModel.hasScope(token, scope): setCurrentUser(None) if isinstance(scope, six.string_types): scope = (scope,) raise AccessException( 'Invalid token scope.\n' 'Required: %s.\n' 'Allowed: %s' % ( ' '.join(scope), ' '.join(tokenModel.getAllowedScopes(token))))
def findNamedFolder(currentUser, user, parent, parentType, name, create=False, joinShareGroup=None, public=False): folders = \ [ModelImporter.model('folder').filter(folder, currentUser) for folder in ModelImporter.model('folder').childFolders( parent=parent, parentType=parentType, user=currentUser, filters={'name': name})] # folders should have len of 0 or 1, since we are looking in a # user folder for a folder with a certain name if len(folders) == 0: if create and currentUser: folder = ModelImporter.model('folder').createFolder( parent, name, parentType=parentType, public=public, creator=currentUser) if joinShareGroup: groupModel = ModelImporter.model('group') datasetSharingGroup = groupModel.findOne( query={'name': PluginSettings.DATASET_SHARING_GROUP_NAME}) ModelImporter.model('folder').setGroupAccess( folder, datasetSharingGroup, 0, currentUser=currentUser, save=True) return folder else: return None else: return folders[0]
def createSession(self, params): smqtkFolder = params['smqtkFolder'] sessionsFolder = getCreateSessionsFolder() # Get the folder with images in it, since this is what's used for computing # what descriptor set table to use dataFolderId = ModelImporter.model('folder').load( ObjectId(smqtkFolder), user=getCurrentUser()) dataFolderId = str(dataFolderId['parentId']) # Create session named after its id session = ModelImporter.model('item').createItem( 'placeholder_name', getCurrentUser(), sessionsFolder) session['name'] = str(session['_id']) ModelImporter.model('item').save(session) sessionId = str(session['_id']) ModelImporter.model('item').setMetadata( session, { 'smqtk_folder_id': smqtkFolder, 'data_folder_id': dataFolderId, 'pos_uuids': [], 'neg_uuids': [] }) # already registered in the controller, return if self.controller.has_session_uuid(sessionId): return session iqrs = IqrSession(self.positive_seed_neighbors, session_uid=sessionId) with self.controller: with iqrs: # because classifier maps locked by session self.controller.add_session(iqrs) self.session_classifiers[sessionId] = None self.session_classifier_dirty[sessionId] = True return session
def propagateSizeChange(folder, inc): ModelImporter.model(folder['baseParentType']).increment( query={'_id': folder['baseParentId']}, field='size', amount=inc, multi=False)
def addDefaultFolders(event): user = event.info notebookFolder = ModelImporter.model('folder').createFolder( user, 'Notebooks', parentType='user', public=False, creator=user) ModelImporter.model('folder').setUserAccess( notebookFolder, user, AccessType.ADMIN, save=True)
def __init__(self, name): self.name = name self.folderModel = ModelImporter.model('folder') self.itemModel = ModelImporter.model('item') self.fileModel = ModelImporter.model('file')
def test_update(self): body = {'name': 'myProject', 'type': 'PyFR', 'steps': ['onestep']} json_body = json.dumps(body) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 201) project = r.json # Fetch the project so we get the right updated time ( its a timestamp # truncation thing ) r = self.request('/projects/%s' % str(project['_id']), method='GET', type='application/json', user=self._user) self.assertStatusOk(r) project = r.json updated = project['updated'] # Now try and update one of the immutable properties body = {'type': 'FooBar'} json_body = json.dumps(body) r = self.request('/projects/%s' % str(project['_id']), method='PATCH', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 400) # Now try add some bogus data to our project body = {'metadata': {'foo': 'bogus'}} json_body = json.dumps(body) r = self.request('/projects/%s' % str(project['_id']), method='PATCH', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 200) self.assertNotEqual(updated, r.json['updated']) # Check the data was added project_model = ModelImporter.model('project', 'hpccloud').load(project['_id'], force=True) self.assertEqual(project_model['metadata'], body['metadata']) # Now try changing the name body = {'name': 'FooBar'} json_body = json.dumps(body) r = self.request('/projects/%s' % str(project['_id']), method='PATCH', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 200) # Check the name was updated project_model = ModelImporter.model('project', 'hpccloud').load(project['_id'], force=True) self.assertEqual(project_model['name'], body['name']) # Now try changing the description body = {'description': 'FooBar'} json_body = json.dumps(body) r = self.request('/projects/%s' % str(project['_id']), method='PATCH', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 200) # Check the description was updated project_model = ModelImporter.model('project', 'hpccloud').load(project['_id'], force=True) self.assertEqual(project_model['description'], body['description'])
def load(info): HashedFile(info['apiRoot'].file) ModelImporter.model('file').exposeFields( level=AccessType.READ, fields=HashedFile.supportedAlgorithms)
def run(job): job_model = ModelImporter.model('job', 'jobs') job_model.updateJob(job, status=JobStatus.RUNNING) try: configFile = os.path.join(os.path.dirname(__file__), "bsve.json") if os.path.exists(configFile): bsveConfig = json.load(open(configFile))['bsve'] else: bsveConfig = {} kwargs = job['kwargs'] bsveSearchParams = kwargs['params']['bsveSearchParams'] datasetId = str(kwargs['dataset']['_id']) # TODO better to create a job token rather than a user token? token = kwargs['token'] bsveUtility = BsveUtility( user=bsveConfig.get( 'USER_NAME', os.environ.get('BSVE_USERNAME')), apikey=bsveConfig.get( 'API_KEY', os.environ.get('BSVE_APIKEY')), secret=bsveConfig.get( 'SECRET_KEY', os.environ.get('BSVE_SECRETKEY')), base=bsveConfig.get('BASE_URL') ) # TODO sleeping in async thread, probably starving other tasks # would be better to split this into two or more parts, creating # additional jobs as needed searchResult = bsveUtility.search(bsveSearchParams) # write the output to a json file tmpdir = tempfile.mkdtemp() outFilepath = tempfile.mkstemp(suffix='.json', dir=tmpdir)[1] writer = open(outFilepath, 'w') writer.write(json.dumps(searchResult)) writer.close() # rename the file so it will have the right name when uploaded # could probably be done post upload outFilename = 'search.json' humanFilepath = os.path.join(tmpdir, outFilename) shutil.move(outFilepath, humanFilepath) # connect to girder and upload the file # TODO will probably have to change this from local to girder worker # so that can work on worker machine # at least need host connection info girderPort = config.getConfig()['server.socket_port'] client = girder_client.GirderClient(port=girderPort) client.token = token['_id'] client.uploadFileToItem(datasetId, humanFilepath) # TODO some stuff here using models will only work on a local job # will have to be rewritten using girder client to work in girder worker # non-locally user_model = ModelImporter.model('user') user = user_model.load(job['userId'], force=True) item_model = ModelImporter.model('item') dataset = item_model.load(datasetId, level=AccessType.WRITE, user=user) minerva_metadata = mM(dataset) file_model = ModelImporter.model('file') existing = file_model.findOne({ 'itemId': dataset['_id'], 'name': outFilename }) if existing: minerva_metadata['original_files'] = [{ '_id': existing['_id'], 'name': outFilename }] else: raise (Exception('Cannot find file %s in dataset %s' % (outFilename, datasetId))) jsonRow = jsonArrayHead(humanFilepath, limit=1)[0] minerva_metadata['json_row'] = jsonRow # Generate the geojson for this dataset and set # dataset_type = geojson geojsonFilename = 'search.geojson' geojsonFilepath = os.path.join(tmpdir, geojsonFilename) mapping = { "dateKeypath": "", "latitudeKeypath": "data.Latitude", "longitudeKeypath": "data.Longitude" } geojsonMapper = GeoJsonMapper(objConverter=None, mapping=mapping) objects = jsonObjectReader(humanFilepath) geojsonMapper.mapToJsonFile(tmpdir, objects, geojsonFilepath) client.uploadFileToItem(datasetId, geojsonFilepath) shutil.rmtree(tmpdir) minerva_metadata['mapper'] = mapping minerva_metadata['dataset_type'] = 'geojson' existing = file_model.findOne({ 'itemId': dataset['_id'], 'name': geojsonFilename }) if existing: minerva_metadata['geojson_file'] = { '_id': existing['_id'], 'name': geojsonFilename } else: raise (Exception('Cannot find file %s in dataset %s' % (geojsonFilename, datasetId))) mM(dataset, minerva_metadata) job_model.updateJob(job, status=JobStatus.SUCCESS) except Exception: t, val, tb = sys.exc_info() log = '%s: %s\n%s' % (t.__name__, repr(val), traceback.extract_tb(tb)) # TODO only works locally job_model.updateJob(job, status=JobStatus.ERROR, log=log) raise
def load(info): HashedFile(info['apiRoot'].file) ModelImporter.model('file').exposeFields( level=AccessType.READ, fields=SUPPORTED_ALGORITHMS) events.bind('data.process', info['name'], _computeHashHook)
def getGravatar(user, size): if not user.get('gravatar_baseUrl'): # the save hook will cause the gravatar base URL to be computed user = ModelImporter.model('user').save(user) raise cherrypy.HTTPRedirect(user['gravatar_baseUrl'] + '&s=%d' % size)
def test_create(self): project_name = 'myProject' description = 'asdf asdfasdf' body = { 'name': project_name, 'description': description, 'type': 'PyFR', 'steps': ['onestep'] } json_body = json.dumps(body) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 201) self.assertEqual(r.json['description'], description) self.assertIsNotNone(r.json['updated']) self.assertIsNotNone(r.json['created']) # Check that a project folder was created hpccloud_folder = get_hpccloud_folder(user=self._user) filters = {'name': project_name} project_folder = ModelImporter.model('folder').childFolders( parentType='folder', user=self._user, parent=hpccloud_folder, filters=filters, limit=1) self.assertEqual(len(list(project_folder)), 1) # Test missing name body = {'type': 'PyFR', 'steps': ['onestep']} json_body = json.dumps(body) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 400) # Test unique name body = {'name': 'dup', 'type': 'PyFR', 'steps': ['onestep']} json_body = json.dumps(body) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 201) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 400) # Another should be able to reuse the name r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._another_user) self.assertStatus(r, 201)
def test_delete(self): body = {'name': 'deleteme', 'type': 'PyFR', 'steps': ['onestep']} json_body = json.dumps(body) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._another_user) self.assertStatus(r, 201) project = r.json project_folder = ModelImporter.model('folder').load( project['folderId'], user=self._another_user) # Create a test folder folder = ModelImporter.model('folder').createFolder( project_folder, 'Delete me please', creator=self._another_user) # Create a test item item = ModelImporter.model('item').createItem('deleteme', self._another_user, project_folder) # Create a test file r = self.request(path='/assetstore', method='GET', user=self._user) self.assertStatusOk(r) self.assertEqual(1, len(r.json)) assetstore = r.json[0] file_item = ModelImporter.model('item').createItem( 'fileItem', self._another_user, project_folder) file = ModelImporter.model('file').createFile(self._another_user, file_item, 'test', 100, assetstore) file['sha512'] = 'dummy' ModelImporter.model('file').save(file) # Now delete the project r = self.request('/projects/%s' % str(project['_id']), method='DELETE', type='application/json', body=json_body, user=self._another_user) self.assertStatusOk(r) # Check that the project was deleted self.assertIsNone( ModelImporter.model('project', 'hpccloud').load(project['_id'], force=True)) # Check that the folder was deleted self.assertIsNone( ModelImporter.model('folder').load(folder['_id'], force=True)) # Check that the item was deleted self.assertIsNone( ModelImporter.model('item').load(item['_id'], force=True)) # Check that the file was deleted self.assertIsNone( ModelImporter.model('file').load(file['_id'], force=True)) # Check that the project folder was remove self.assertIsNone( ModelImporter.model('folder').load(project['folderId'], force=True)) # Try deleting a project containing a simulation body = {'name': 'deleteme', 'type': 'PyFR', 'steps': ['onestep']} json_body = json.dumps(body) r = self.request('/projects', method='POST', type='application/json', body=json_body, user=self._another_user) self.assertStatus(r, 201) project = r.json json_body = json.dumps(self.simulationBody) r = self.request('/projects/%s/simulations' % str(project['_id']), method='POST', type='application/json', body=json_body, user=self._another_user) self.assertStatus(r, 201) # The delete should fail r = self.request('/projects/%s' % str(project['_id']), method='DELETE', type='application/json', body=json_body, user=self._another_user) self.assertStatus(r, 400)
def setUp(self): super(UploadTestCase, self).setUp() self._user = ModelImporter.model('user').createUser( email='*****@*****.**', login='******', firstName='First', lastName='Last', password='******') self._folder = six.next( ModelImporter.model('folder').childFolders( self._user, parentType='user', force=True, filters={'name': 'Public'})) item = ModelImporter.model('item').createItem(name='bob.txt', creator=self._user, folder=self._folder) path = os.path.abspath('plugins/cumulus/plugin_tests/fixtures/bob.txt') file = ModelImporter.model('file').createFile( creator=self._user, item=item, name='bob.txt', size=os.path.getsize(path), mimeType='text/plain', assetstore=self.assetstore) file['imported'] = True file['path'] = path ModelImporter.model('file').save(file) path = os.path.abspath( 'plugins/cumulus/plugin_tests/fixtures/bill.txt') file = ModelImporter.model('file').createFile( creator=self._user, item=item, name='bill.txt', size=os.path.getsize(path), mimeType='text/plain', assetstore=self.assetstore) file['imported'] = True file['path'] = path ModelImporter.model('file').save(file) self._sub_folder = ModelImporter.model('folder').createFolder( parentType='folder', parent=self._folder, creator=self._user, public=False, name='subfolder') item = ModelImporter.model('item').createItem(name='will.txt', creator=self._user, folder=self._sub_folder) path = os.path.abspath( 'plugins/cumulus/plugin_tests/fixtures/will.txt') file = ModelImporter.model('file').createFile( creator=self._user, item=item, name='will.txt', size=os.path.getsize(path), mimeType='text/plain', assetstore=self.assetstore) file['imported'] = True file['path'] = path ModelImporter.model('file').save(file)
def initUpload(self, parentType, parentId, name, size, mimeType, linkUrl, reference, assetstoreId): """ Before any bytes of the actual file are sent, a request should be made to initialize the upload. This creates the temporary record of the forthcoming upload that will be passed in chunks to the readChunk method. If you pass a "linkUrl" parameter, it will make a link file in the designated parent. """ user = self.getCurrentUser() parent = ModelImporter.model(parentType).load(id=parentId, user=user, level=AccessType.WRITE, exc=True) if linkUrl is not None: return self._model.filter( self._model.createLinkFile(url=linkUrl, parent=parent, name=name, parentType=parentType, creator=user, size=size, mimeType=mimeType), user) else: self.requireParams({'size': size}) assetstore = None if assetstoreId: self.requireAdmin( user, message= 'You must be an admin to select a destination assetstore.') assetstore = Assetstore().load(assetstoreId) chunk = None if size > 0 and cherrypy.request.headers.get('Content-Length'): ct = cherrypy.request.body.content_type.value if (ct not in cherrypy.request.body.processors and ct.split( '/', 1)[0] not in cherrypy.request.body.processors): chunk = RequestBodyStream(cherrypy.request.body) if chunk is not None and chunk.getSize() <= 0: chunk = None try: # TODO: This can be made more efficient by adding # save=chunk is None # to the createUpload call parameters. However, since this is # a breaking change, that should be deferred until a major # version upgrade. upload = Upload().createUpload(user=user, name=name, parentType=parentType, parent=parent, size=size, mimeType=mimeType, reference=reference, assetstore=assetstore) except OSError as exc: if exc.errno == errno.EACCES: raise GirderException( 'Failed to create upload.', 'girder.api.v1.file.create-upload-failed') raise if upload['size'] > 0: if chunk: return Upload().handleChunk(upload, chunk, filter=True, user=user) return upload else: return self._model.filter(Upload().finalizeUpload(upload), user)
def create(self, user, project, simulation, create_step_folders=True): """ Create a simulation. :param user: The user creating the simulation. :param project: The project this simulation is associated with. :param simulation: The simulation object """ simulation['projectId'] = project['_id'] simulation['userId'] = user['_id'] now = datetime.datetime.utcnow() simulation['created'] = now simulation['updated'] = now # validate first, so we know we have the properties we need self.validate(simulation) simulations_folder = get_simulations_folder(user, project) simulation_folder = ModelImporter.model('folder').createFolder( simulations_folder, simulation['name'], parentType='folder', creator=user) simulation['folderId'] = simulation_folder['_id'] # Set the status of all the steps to 'created' and create the folders # for each step for name, step in six.iteritems(simulation['steps']): step['status'] = 'created' if create_step_folders: step_folder = ModelImporter.model('folder').createFolder( simulation_folder, name, parentType='folder', creator=user) step['folderId'] = step_folder['_id'] # We should share with the same users and groups associated with the # project users = [ user_access for user_access in project['access']['users'] if user_access['id'] != user['_id'] ] groups = project['access']['groups'] # Give admin access to creator simulation = self.setUserAccess(simulation, user=user, level=AccessType.ADMIN, save=False) simulation['access']['users'] += users simulation['access']['groups'] += groups # Share the simulation folder simulation_folder = ModelImporter.model('folder').load( simulation['folderId'], user=user) simulation_folder['access']['users'] += users simulation_folder['access']['groups'] += groups ModelImporter.model('folder').setAccessList( simulation_folder, simulation_folder['access'], save=True, recurse=True, user=user) return self.save(simulation)
def copyFolder(self, srcFolder, parent=None, name=None, description=None, parentType=None, public=None, creator=None, progress=None, firstFolder=None): """ Copy a folder, including all child items and child folders. :param srcFolder: the folder to copy. :type srcFolder: dict :param parent: The parent document. Must be a folder, user, or collection. :type parent: dict :param name: The name of the new folder. None to copy the original name. :type name: str :param description: Description for the new folder. None to copy the original description. :type description: str :param parentType: What type the parent is: ('folder' | 'user' | 'collection') :type parentType: str :param public: Public read access flag. None to inherit from parent, 'original' to inherit from original folder. :type public: bool, None, or 'original'. :param creator: user representing the creator of the new folder. :type creator: dict :param progress: a progress context to record process on. :type progress: girder.utility.progress.ProgressContext or None. :param firstFolder: if not None, the first folder copied in a tree of folders. :returns: the new folder document. """ setResponseTimeLimit() if parentType is None: parentType = srcFolder['parentCollection'] parentType = parentType.lower() if parentType not in ('folder', 'user', 'collection'): raise ValidationException('The parentType must be folder, ' 'collection, or user.') if parent is None: parent = ModelImporter.model(parentType).load( srcFolder['parentId'], force=True) if name is None: name = srcFolder['name'] if description is None: description = srcFolder['description'] if public is not None and isinstance(public, str): if public == 'original': public = srcFolder.get('public', None) else: public = public == 'true' newFolder = self.createFolder(parentType=parentType, assign=assign, parent=parent, name=name, description=description, public=public, creator=creator, allowRename=True) if firstFolder is None: firstFolder = newFolder return self.copyFolderComponents(srcFolder, newFolder, creator, progress, firstFolder)
def load(info): events.bind('jobs.schedule', 'worker', schedule) events.bind('jobs.status.validate', 'worker', validateJobStatus) ModelImporter.model('job', 'jobs').exposeFields(AccessType.SITE_ADMIN, {'celeryTaskId'})
from girder.api.describe import Description, autoDescribeRoute from girder.api.rest import Resource, filtermodel, getCurrentToken, getCurrentUser, getApiUrl, RestException from girder.utility.model_importer import ModelImporter from girder.constants import AccessType from smqtk.representation.descriptor_set.postgres import PostgresDescriptorSet from smqtk.algorithms.nn_index.lsh.functors.itq import ItqFunctor from smqtk.representation.data_element.girder import GirderDataElement from smqtk.representation.key_value.memory import MemoryKeyValueStore from smqtk.algorithms.nn_index.lsh import LSHNearestNeighborIndex from .utils import localSmqtkFileIdFromName import functools setting = ModelImporter.model('setting') class NearestNeighbors(Resource): def __init__(self): self.resourceName = 'smqtk_nearest_neighbors' self.route('GET', ('nn', ), self.nearestNeighbors) @staticmethod def descriptorSetFromItem(item): """ Get the descriptor set related to the item (its folder id). Note that this only works for top level items in the directory, meaning images must have been processed for the directory this item is in. Ideally, when processing images works recursively, this
def allChildItems(parent, parentType, user, limit=0, offset=0, sort=None, _internal=None, **kwargs): """ This generator will yield all items that are children of the resource or recursively children of child folders of the resource, with access policy filtering. Passes any kwargs to the find function. :param parent: The parent object. :type parentType: Type of the parent object. :param parentType: The parent type. :type parentType: 'user', 'folder', or 'collection' :param user: The user running the query. Only returns items that this user can see. :param limit: Result limit. :param offset: Result offset. :param sort: The sort structure to pass to pymongo. Child folders are served depth first, and this sort is applied within the resource and then within each child folder. Child items are processed before child folders. """ if _internal is None: _internal = {'limit': limit, 'offset': offset, 'done': False} model = ModelImporter.model(parentType) if hasattr(model, 'childItems'): if parentType == 'folder': kwargs = kwargs.copy() kwargs['includeVirtual'] = True for item in model.childItems(parent, user=user, limit=_internal['limit'] + _internal['offset'], offset=0, sort=sort, **kwargs): if _internal['offset']: _internal['offset'] -= 1 else: yield item if _internal['limit']: _internal['limit'] -= 1 if not _internal['limit']: _internal['done'] = True return for folder in Folder().childFolders(parentType=parentType, parent=parent, user=user, limit=0, offset=0, sort=sort, **kwargs): if _internal['done']: return for item in allChildItems(folder, 'folder', user, sort=sort, _internal=_internal, **kwargs): yield item
def validateCeleryUserId(doc): if not doc['value']: raise ValidationException('Celery user ID must not be empty.', 'value') ModelImporter.model('user').load(doc['value'], force=True, exc=True)
def getSessions(self, params): sessionsFolder = getCreateSessionsFolder() return list( ModelImporter.model('folder').childItems(folder=sessionsFolder))
def conversions(self, output_format, params): user = self.getCurrentUser() if output_format not in Molecule.output_formats: raise RestException('Output output_format not supported.', code=404) body = self.getBodyJson() if 'fileId' not in body: raise RestException('Invalid request body.', code=400) file_id = body['fileId'] file = ModelImporter.model('file').load(file_id, user=user) input_format = file['name'].split('.')[-1] if input_format not in Molecule.input_formats: raise RestException('Input format not supported.', code=400) if file is None: raise RestException('File not found.', code=404) with File().load(file) as f: data_str = f.read().decode() if output_format.startswith('inchi'): atom_count = 0 if input_format == 'pdb': props = openbabel.properties(data_str, input_format) atom_count = props['atomCount'] else: atom_count = int(avogadro.atom_count(data_str, input_format)) if atom_count > 1024: raise RestException( 'Unable to generate InChI, molecule has more than 1024 atoms.', code=400) if input_format == 'pdb': (inchi, inchikey) = openbabel.to_inchi(data_str, input_format) else: sdf = avogadro.convert_str(data_str, input_format, 'sdf') (inchi, inchikey) = openbabel.to_inchi(sdf, 'sdf') if output_format == 'inchi': return inchi else: return inchikey else: output = '' mime = 'text/plain' if input_format == 'pdb': (output, mime) = openbabel.convert_str(data_str, input_format, output_format) else: output = avogadro.convert_str(data_str, input_format, output_format) def stream(): cherrypy.response.headers['Content-Type'] = mime yield output return stream
def import_recursive(job): try: root = job['kwargs']['root'] token = job['kwargs']['token'] jobModel = ModelImporter.model('job', 'jobs') userModel = ModelImporter.model('user') user = userModel.load(job['userId'], force=True) childModel = ModelImporter.model('cohort', 'digital_slide_archive') children = list( ModelImporter.model('folder').childFolders(root, 'collection', user=user)) count = len(children) progress = 0 job = jobModel.updateJob(job, log='Started TCGA import\n', status=JobStatus.RUNNING, progressCurrent=progress, progressTotal=count) logger.info('Starting recursive TCGA import') for child in children: progress += 1 try: msg = 'Importing "%s"' % child.get('name', '') job = jobModel.updateJob(job, log=msg, progressMessage=msg + '\n', progressCurrent=progress) logger.debug(msg) childModel.importDocument(child, recurse=True, user=user, token=token, job=job) job = jobModel.load(id=job['_id'], force=True) # handle any request to stop execution if (not job or job['status'] in (JobStatus.CANCELED, JobStatus.ERROR)): logger.info('TCGA import job halted with') return except ValidationException: logger.warning('Failed to import %s' % child.get('name', '')) logger.info('Starting recursive TCGA import') job = jobModel.updateJob(job, log='Finished TCGA import\n', status=JobStatus.SUCCESS, progressCurrent=count, progressMessage='Finished TCGA import') except Exception as e: logger.exception('Importing TCGA failed with %s' % str(e)) job = jobModel.updateJob(job, log='Import failed with %s\n' % str(e), status=JobStatus.ERROR)