def parentsToRoot(self, folder, curPath=None, user=None, force=False, level=AccessType.READ): """ Get the path to traverse to a root of the hierarchy. :param folder: The folder whose root to find :type folder: dict :returns: an ordered list of dictionaries from root to the current folder """ curPath = curPath or [] curParentId = folder['parentId'] curParentType = folder['parentCollection'] if curParentType in ('user', 'collection'): curParentObject = ModelImporter.model(curParentType).load( curParentId, user=user, level=level, force=force) if force: parentFiltered = curParentObject else: parentFiltered = ModelImporter.model(curParentType).filter(curParentObject, user) return [{ 'type': curParentType, 'object': parentFiltered }] + curPath else: curParentObject = self.load(curParentId, user=user, level=level, force=force) curPath = [{ 'type': curParentType, 'object': curParentObject if force else self.filter(curParentObject, user) }] + curPath return self.parentsToRoot(curParentObject, curPath, user=user, force=force)
def load(info): ModelImporter.model('job', 'jobs').exposeFields( level=AccessType.ADMIN, fields='processedFiles') ModelImporter.model('job', 'jobs').exposeFields( level=AccessType.SITE_ADMIN, fields='processedFiles') Osumo._cp_config['tools.staticdir.dir'] = os.path.join( os.path.relpath(info['pluginRootDir'], info['config']['/']['tools.staticdir.root']), 'web-external') # Move girder app to /girder, serve sumo app from / info['apiRoot'].osumo = Osumo() ( info['serverRoot'], info['serverRoot'].girder ) = ( info['apiRoot'].osumo, info['serverRoot'] ) info['serverRoot'].api = info['serverRoot'].girder.api info['serverRoot'].girder.api events.bind('data.process', 'osumo', info['apiRoot'].osumo.dataProcess)
def mongoSearch(self, params): self.requireParams(('type', 'q'), params) allowed = { 'collection': ['_id', 'name', 'description'], 'folder': ['_id', 'name', 'description'], 'item': ['_id', 'name', 'description', 'folderId'], 'user': ['_id', 'firstName', 'lastName', 'login'] } limit, offset, sort = self.getPagingParameters(params, 'name') coll = params['type'] events.trigger('mongo_search.allowed_collections', info=allowed) if coll not in allowed: raise RestException('Invalid resource type: {}'.format(coll)) try: query = bson.json_util.loads(params['q']) except ValueError: raise RestException('The query parameter must be a JSON object.') model = ModelImporter().model(coll) if hasattr(model, 'filterResultsByPermission'): cursor = model.find( query, fields=allowed[coll] + ['public', 'access'], limit=0) return [r for r in model.filterResultsByPermission( cursor, user=self.getCurrentUser(), level=AccessType.READ, limit=limit, offset=offset, removeKeys=('public', 'access'))] else: return [r for r in model.find(query, fields=allowed[coll], limit=limit, offset=offset)]
def load(info): # Bind REST events events.bind('model.file.download.request', 'download_statistics', _onDownloadFileRequest) events.bind('model.file.download.complete', 'download_statistics', _onDownloadFileComplete) # Add download count fields to file model ModelImporter.model('file').exposeFields(level=AccessType.READ, fields='downloadStatistics')
def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] in ['worker_handler', 'celery_handler']: # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED, JobStatus.SUCCESS, JobStatus.ERROR]: # Set the job status to canceling ModelImporter.model('job', 'jobs').updateJob(job, status=CustomJobStatus.CANCELING) # Send the revoke request. asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp()) asyncResult.revoke()
def checkAccess(self, level=AccessType.READ, priv=False, fail=True): g = self.gritsInfo()['group'] p = self.gritsInfo()['groupPriv'] user = self.getCurrentUser() groupModel = ModelImporter().model('group') try: groupModel.requireAccess(p, user, level) except AccessException: p = False try: groupModel.requireAccess(g, user, level) except AccessException: g = False if priv and not p: if not fail: return False raise RestException("Access denied", code=403) if not priv and not (p or g): if not fail: return False raise RestException("Access denied", code=403) return True
def propagateSizeChange(self, item, sizeIncrement, updateItemSize=True): """ Propagates a file size change (or file creation) to the necessary parents in the hierarchy. Internally, this records subtree size in the item, the parent folder, and the root node under which the item lives. Should be called anytime a new file is added, a file is deleted, or a file size changes. :param item: The parent item of the file. :type item: dict :param sizeIncrement: The change in size to propagate. :type sizeIncrement: int :param updateItemSize: Whether the item size should be updated. Set to False if you plan to delete the item immediately and don't care to update its size. """ from .folder import Folder from .item import Item if updateItemSize: # Propagate size up to item Item().increment(query={ '_id': item['_id'] }, field='size', amount=sizeIncrement, multi=False) # Propagate size to direct parent folder Folder().increment(query={ '_id': item['folderId'] }, field='size', amount=sizeIncrement, multi=False) # Propagate size up to root data node ModelImporter.model(item['baseParentType']).increment(query={ '_id': item['baseParentId'] }, field='size', amount=sizeIncrement, multi=False)
def _onUpload(event): """ Look at uploads containing references related to this plugin. If found, they are used to link item task outputs back to a job document. """ try: ref = json.loads(event.info.get('reference')) except (ValueError, TypeError): return if isinstance(ref, dict) and ref.get('type') == 'item_tasks.output': jobModel = ModelImporter.model('job', 'jobs') tokenModel = ModelImporter.model('token') token = event.info['currentToken'] if tokenModel.hasScope(token, 'item_tasks.job_write:%s' % ref['jobId']): job = jobModel.load(ref['jobId'], force=True, exc=True) else: job = jobModel.load( ref['jobId'], level=AccessType.WRITE, user=event.info['currentUser'], exc=True) file = event.info['file'] item = ModelImporter.model('item').load(file['itemId'], force=True) # Add link to job model to the output item jobModel.updateJob(job, otherFields={ 'itemTaskBindings.outputs.%s.itemId' % ref['id']: item['_id'] }) # Also a link in the item to the job that created it item['createdByJob'] = job['_id'] ModelImporter.model('item').save(item)
def load(info): events.bind('jobs.schedule', 'worker', schedule) events.bind('jobs.status.validate', 'worker', validateJobStatus) events.bind('model.setting.validate', 'worker', validateSettings) ModelImporter.model('job', 'jobs').exposeFields( AccessType.SITE_ADMIN, {'celeryTaskId'})
def updateItemLicense(event): """ REST event handler to update item with license parameter, if provided. """ params = event.info['params'] if 'license' not in params: return itemModel = ModelImporter.model('item') item = itemModel.load(event.info['returnVal']['_id'], force=True, exc=True) newLicense = validateString(params['license']) if item['license'] == newLicense: return # Ensure that new license name is in configured list of licenses. # # Enforcing this here, instead of when validating the item, avoids an extra # database lookup (for the settings) on every future item save. if newLicense: licenseSetting = ModelImporter.model('setting').get( PluginSettings.LICENSES) validLicense = any( license['name'] == newLicense for group in licenseSetting for license in group['licenses']) if not validLicense: raise ValidationException( 'License name must be in configured list of licenses.', 'license') item['license'] = newLicense item = itemModel.save(item) event.preventDefault() event.addResponse(item)
def load(info): info['apiRoot'].user.route('GET', (':id', 'gravatar'), getGravatar) ModelImporter.model('user').exposeFields( level=AccessType.READ, fields='gravatar_baseUrl') events.bind('model.user.save', 'gravatar', _userUpdate)
def schedule(event): """ This is bound to the "jobs.schedule" event, and will be triggered any time a job is scheduled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] == 'worker_handler': task = job.get('celeryTaskName', 'girder_worker.run') # Set the job status to queued ModelImporter.model('job', 'jobs').updateJob(job, status=JobStatus.QUEUED) # Send the task to celery asyncResult = getCeleryApp().send_task( task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={ 'jobInfoSpec': jobInfoSpec(job, job.get('token', None)), 'apiUrl': getWorkerApiUrl() }) # Record the task ID from celery. ModelImporter.model('job', 'jobs').updateJob(job, otherFields={ 'celeryTaskId': asyncResult.task_id }) # Stop event propagation since we have taken care of scheduling. event.stopPropagation()
def isOrphan(self, file): """ Returns True if this file is orphaned (its item or attached entity is missing). :param file: The file to check. :type file: dict """ if file.get('attachedToId'): attachedToType = file.get('attachedToType') if isinstance(attachedToType, six.string_types): modelType = ModelImporter.model(attachedToType) elif isinstance(attachedToType, list) and len(attachedToType) == 2: modelType = ModelImporter.model(*attachedToType) else: # Invalid 'attachedToType' return True if isinstance(modelType, (acl_mixin.AccessControlMixin, AccessControlledModel)): attachedDoc = modelType.load( file.get('attachedToId'), force=True) else: attachedDoc = modelType.load( file.get('attachedToId')) else: from .item import Item attachedDoc = Item().load(file.get('itemId'), force=True) return not attachedDoc
def load(info): info["apiRoot"].user.route("GET", (":id", "gravatar"), getGravatar) ModelImporter.model("user").exposeFields(level=AccessType.READ, fields="gravatar_baseUrl") events.bind("model.setting.validate", "gravatar", _validateSettings) events.bind("rest.put.user/:id.before", "gravatar", _userUpdate)
def nearestNeighbors(self, item, limit, params): limit = int(limit) desc_index = self.descriptorIndexFromItem(item) nn_index = self.nearestNeighborIndex(item, getCurrentUser(), desc_index) if nn_index is None: raise RestException('Nearest neighbor index could not be found.') try: smqtk_uuid = item['meta']['smqtk_uuid'] descriptor = desc_index.get_descriptor(smqtk_uuid) except KeyError: raise RestException('Unable to retrieve image descriptor for querying object.') neighbors, dists = nn_index.nn(descriptor, limit) uuid_dist = dict(zip([x.uuid() for x in neighbors], dists)) smqtkFolder = ModelImporter.model('folder').load(item['folderId'], user=getCurrentUser()) items = list(ModelImporter.model('folder').childItems(smqtkFolder, filters={'meta.smqtk_uuid': { '$in': [x.uuid() for x in neighbors] }})) for item in items: item['smqtk_distance'] = uuid_dist[item['meta']['smqtk_uuid']] return items
def _getLargeImagePath(self): try: largeImageFileId = self.item['largeImage']['fileId'] # Access control checking should already have been done on # item, so don't repeat. # TODO: is it possible that the file is on a different item, so # do we want to repeat the access check? largeImageFile = ModelImporter.model('file').load( largeImageFileId, force=True) # TODO: can we move some of this logic into Girder core? assetstore = ModelImporter.model('assetstore').load( largeImageFile['assetstoreId']) adapter = assetstore_utilities.getAssetstoreAdapter(assetstore) if not isinstance( adapter, assetstore_utilities.FilesystemAssetstoreAdapter): raise TileSourceAssetstoreException( 'Non-filesystem assetstores are not supported') largeImagePath = adapter.fullPath(largeImageFile) return largeImagePath except TileSourceAssetstoreException: raise except (KeyError, ValidationException, TileSourceException) as e: raise TileSourceException( 'No large image file in this item: %s' % e.message)
def process_annotations(event): """Add annotations to an image on a ``data.process`` event""" info = event.info if 'anot' in info.get('file', {}).get('exts', []): reference = info.get('reference', None) try: reference = json.loads(reference) except (ValueError, TypeError): print(TerminalColor.error( 'Warning: Could not get reference from the annotation param. ' 'Make sure you have at ctk-cli>=1.3.1 installed.' )) return if 'userId' not in reference or 'itemId' not in reference: print(TerminalColor.error( 'Annotation reference does not contain required information.' )) return userId = reference['userId'] imageId = reference['itemId'] # load model classes Item = ModelImporter.model('item') File = ModelImporter.model('file') User = ModelImporter.model('user') Annotation = ModelImporter.model('annotation', plugin='large_image') # load models from the database user = User.load(userId, force=True) image = File.load(imageId, level=AccessType.READ, user=user) item = Item.load(image['itemId'], level=AccessType.WRITE, user=user) file = File.load( info.get('file', {}).get('_id'), level=AccessType.READ, user=user ) if not (item and user and file): print(TerminalColor.error( 'Could not load models from the database' )) return try: data = json.loads( ''.join(File.download(file)()) ) except Exception: print(TerminalColor.error( 'Could not parse annotation file' )) return Annotation.createAnnotation( item, user, data )
def validate_settings(event): """Validate minerva specific settings.""" key = event.info['key'] val = event.info['value'] if key == 'minerva.geonames_folder': ModelImporter.model('folder').load(val, exc=True, force=True) event.preventDefault().stopPropagation()
def load(info): info['apiRoot'].user.route('GET', (':id', 'gravatar'), getGravatar) ModelImporter.model('user').exposeFields( level=AccessType.READ, fields='gravatar_baseUrl') events.bind('model.setting.validate', 'gravatar', validateSettings) events.bind('rest.put.user/:id.before', 'gravatar', userUpdate)
def validateSettings(event): if event.info['key'] == PluginSettings.SCORING_USER_ID: if not event.info['value']: raise ValidationException( 'Scoring user ID must not be empty.', 'value') ModelImporter.model('user').load( event.info['value'], force=True, exc=True) event.preventDefault().stopPropagation()
def load(info): info['apiRoot'].thumbnail = rest.Thumbnail() for model in ('item', 'collection', 'folder', 'user'): ModelImporter.model(model).exposeFields(level=AccessType.READ, fields='_thumbnails') events.bind('model.%s.remove' % model, info['name'], removeThumbnails) events.bind('model.file.remove', info['name'], removeThumbnailLink) events.bind('data.process', info['name'], _onUpload)
def load(info): events.bind('jobs.schedule', 'worker', schedule) events.bind('jobs.status.validate', 'worker', validateJobStatus) events.bind('jobs.status.validTransitions', 'worker', validTransitions) events.bind('jobs.cancel', 'worker', cancel) events.bind('model.job.save.after', 'worker', attachJobInfoSpec) events.bind('model.job.save', 'worker', attachParentJob) ModelImporter.model('job', 'jobs').exposeFields( AccessType.SITE_ADMIN, {'celeryTaskId', 'celeryQueue'})
def submit(self, params): url = params.get('url') content = params.get('content') user = self.getCurrentUser() # check permissions group = ModelImporter().model('group').find({'name': config['group']}) if group.count(): # the group must exist group = group[0] # the user must have read access to the group ModelImporter().model('group').requireAccess(group, user, AccessType.READ) else: raise AccessException('Invalid group name configured') # Create the diagnosis task statusMethod = server_support.handleDiagnosis(content=content, url=url) # Get the initial status status = statusMethod() # Get the maximum number of times to poll the task maxLoops = config['maxTaskWait']/config['pollingInterval'] # Loop until the task is finished iloop = 0 while status['status'] == 'pending' and iloop < maxLoops: iloop += 1 time.sleep(config['pollingInterval']) status = statusMethod() # Get status and report errors if status['status'] == 'pending': raise RestException("Task timed out.", code=408) if status['status'] == 'failure': raise RestException(status['message'], code=400) # check access to private data group = ModelImporter().model('group').find({'name': config['privateGroup']}) hasAccess = False if group.count(): group = group[0] try: ModelImporter().model('group').requireAccess(group, user, AccessType.READ) hasAccess = True except AccessException: pass # Append content data if the user has access if hasAccess: status["result"]["scrapedData"] = status["content"] return status["result"]
def _userUpdate(event): """ Called when the user document is being changed. If the email field changes, we wipe the cached gravatar URL so it will be recomputed on next request. """ if "email" in event.info["params"]: user = ModelImporter.model("user").load(event.info["id"], force=True) if user["email"] != event.info["params"]["email"] and user.get("gravatar_baseUrl"): del user["gravatar_baseUrl"] ModelImporter.model("user").save(user)
def _onDownloadFileRequest(event): if event.info['startByte'] == 0: ModelImporter.model('file').increment( query={'_id': event.info['file']['_id']}, field='downloadStatistics.started', amount=1) ModelImporter.model('file').increment( query={'_id': event.info['file']['_id']}, field='downloadStatistics.requested', amount=1)
def userSaved(event): if '_id' in event.info: # Only add to group at new user creation time. return groupModel = ModelImporter().model('group') cursor = groupModel.find({'name': _groupName}, limit=1, fields=('_id',)) if cursor.count(True) > 0: group = cursor.next() event.info['groups'] = [group['_id']]
def _onSettingRemove(self, event): settingDoc = event.info if settingDoc['key'] == SettingKey.CONTACT_EMAIL_ADDRESS: self.updateHtmlVars({'contactEmail': ModelImporter.model('setting').getDefault( SettingKey.CONTACT_EMAIL_ADDRESS)}) elif settingDoc['key'] == SettingKey.BRAND_NAME: self.updateHtmlVars({'brandName': ModelImporter.model('setting').getDefault( SettingKey.BRAND_NAME)}) elif settingDoc['key'] == SettingKey.BANNER_COLOR: self.updateHtmlVars({'bannerColor': settingDoc['value']})
def onUserCreated(event): user = event.info # make all users private # TODO: make users visible to the "study creators" group user['public'] = False ModelImporter.model('user').save(user) if ModelImporter.model('setting').get(constants.PluginSettings.DEMO_MODE): addUserToAllUDAGroups(user)
def points(self, params): self.requireParams(('q',), params) limit, offset, sort = self.getPagingParameters(params, 'name') latitude = params.get('latitude', 'meta.latitude') longitude = params.get('longitude', 'meta.longitude') spec = { 'type': 'point', 'latitude': latitude, 'longitude': longitude, 'keys': ['meta', 'name', 'description', '_id'], 'flatten': ['meta'] } try: query = bson.json_util.loads(params['q']) except ValueError: # pragma: no cover raise RestException('The query parameter must be a JSON object.') events.trigger('geojson.points', info={ 'spec': spec, 'query': query }) # make sure the lat/lon are whitelisted keys to prevent private # data leaking if spec['latitude'].split('.')[0] not in spec['keys'] or \ spec['longitude'].split('.')[0] not in spec['keys']: raise RestException('Invalid latitude/longitude key.', code=402) coll = features.FeatureCollection(points=spec) item = ModelImporter().model('item') cursor = item.find( query, limit=0 ) cursor = item.filterResultsByPermission( cursor, user=self.getCurrentUser(), level=AccessType.READ, limit=limit, offset=offset ) try: obj = coll(points=cursor) except features.GeoJSONException: raise RestException( 'Could not assemble a geoJSON object from spec.', code=401 ) return obj
def userUpdate(event): """ Called when the user document is being changed. If the email field changes, we wipe the cached gravatar URL so it will be recomputed on next request. """ if 'email' in event.info['params']: user = ModelImporter.model('user').load(event.info['id'], force=True) if (user['email'] != event.info['params']['email'] and user.get('gravatar_baseUrl')): del user['gravatar_baseUrl'] ModelImporter.model('user').save(user)
def computeBaseUrl(user): """ Compute the base gravatar URL for a user and return it. For the moment, the current default image is cached in this URL. It is the caller's responsibility to save this value on the user document. """ global _cachedDefaultImage if _cachedDefaultImage is None: _cachedDefaultImage = ModelImporter.model('setting').get( PluginSettings.DEFAULT_IMAGE, default='identicon') md5 = hashlib.md5(user['email'].encode('utf8')).hexdigest() return 'https://www.gravatar.com/avatar/%s?d=%s' % (md5, _cachedDefaultImage)
def _validateLogo(doc): try: logoFile = ModelImporter.model('file').load(doc['value'], level=AccessType.READ, user=None, exc=True) except ValidationException as e: # Invalid ObjectId, or non-existent document raise ValidationException(e.message, 'value') except AccessException as e: raise ValidationException('Logo must be publicly readable', 'value') # Store this field natively as an ObjectId doc['value'] = logoFile['_id']
def stream(): zip = ziputil.ZipGenerator() for kind in resources: model = ModelImporter.model(kind) for id in resources[kind]: doc = model.load(id=id, user=user, level=AccessType.READ) for (path, file) in model.fileList( doc=doc, user=user, includeMetadata=includeMetadata, subpath=True): for data in zip.addFile(file, path): yield data yield zip.footer()
def test_delete(self): sim = self._create_simulation(self._project1, self._another_user, 'sim') # Assert that a folder has been created for this simulation self.assertIsNotNone( ModelImporter.model('folder').load(sim['folderId'], force=True)) # Now delete the simulation r = self.request('/simulations/%s' % str(sim['_id']), method='DELETE', type='application/json', user=self._another_user) self.assertStatusOk(r) # Confirm the deletion self.assertIsNone( ModelImporter.model('simulation', 'hpccloud').load(sim['_id'], force=True)) # Confirm that the folder was also removed self.assertIsNone( ModelImporter.model('folder').load(sim['folderId'], force=True))
def get_group_id(self): if not self._group_id: group = ModelImporter.model('group').find({ 'name': cumulus.config.girder.group }) if group.count() != 1: raise Exception('Unable to load group "%s"' % cumulus.config.girder.group) self._group_id = group.next()['_id'] return self._group_id
def validate(self, doc): try: validate(doc, Calculation.schema) except ValidationError as ex: raise ValidationException(ex.message) # If we have a moleculeId check it valid if 'moleculeId' in doc: mol = ModelImporter.model('molecule', 'molecules').load(doc['moleculeId'], force=True) doc['moleculeId'] = mol['_id'] return doc
def getCeleryApp(): """ Lazy loader for the celery app. Reloads anytime the settings are updated. """ global _celeryapp if _celeryapp is None: settings = ModelImporter.model('setting') backend = settings.get(PluginSettings.BACKEND) or \ 'mongodb://localhost/romanesco' broker = settings.get(PluginSettings.BROKER) or \ 'mongodb://localhost/romanesco' _celeryapp = celery.Celery('romanesco', backend=backend, broker=broker) return _celeryapp
def getCreateSessionsFolder(): user = getCurrentUser() folder = ModelImporter.model('folder') # @todo Assumes a Private folder will always exist/be accessible privateFolder = list( folder.childFolders(parentType='user', parent=user, user=user, filters={'name': 'Private'}))[0] return folder.createFolder(privateFolder, 'iqr_sessions', reuseExisting=True)
def detach_complete(self, volume, params): # First remove from cluster user = getCurrentUser() cluster = ModelImporter.model('cluster', 'cumulus').load(volume['clusterId'], user=user, level=AccessType.ADMIN) cluster.setdefault('volumes', []).remove(volume['_id']) del volume['clusterId'] for attr in ['path', 'msg']: try: del volume[attr] except KeyError: pass volume['status'] = VolumeState.AVAILABLE ModelImporter.model('cluster', 'cumulus').save(cluster) self._model.save(volume) send_status_notification('volume', volume)
def removeThumbnailLink(event): """ When a thumbnail file is deleted, we remove the reference to it from the resource to which it is attached. """ doc = event.info if doc.get('isThumbnail'): model = ModelImporter.model(doc['attachedToType']) resource = model.load(doc['attachedToId'], force=True) if doc['_id'] in resource.get('_thumbnails', ()): resource['_thumbnails'].remove(doc['_id']) model.save(resource, validate=False)
def attachThumbnail(file, thumbnail, attachToType, attachToId, width, height): """ Add the required information to the thumbnail file and the resource it is being attached to, and save the documents. :param file: The file from which the thumbnail was derived. :type file: dict :param thumbnail: The newly generated thumbnail file document. :type thumbnail: dict :param attachToType: The type to which the thumbnail is being attached. :type attachToType: str :param attachToId: The ID of the document to attach the thumbnail to. :type attachToId: str or ObjectId :param width: Thumbnail width. :type width: int :param height: Thumbnail height. :type height: int :returns: The updated thumbnail file document. """ parentModel = ModelImporter.model(attachToType) parent = parentModel.load(attachToId, force=True) parent['_thumbnails'] = parent.get('_thumbnails', []) parent['_thumbnails'].append(thumbnail['_id']) parentModel.save(parent) thumbnail['attachedToType'] = attachToType thumbnail['attachedToId'] = parent['_id'] thumbnail['isThumbnail'] = True thumbnail['derivedFrom'] = { 'type': 'file', 'id': file['_id'], 'process': 'thumbnail', 'width': width, 'height': height } return ModelImporter.model('file').save(thumbnail)
def _onUpload(event): """ Look at uploads containing references related to this plugin. If found, they are used to link item task outputs back to a job document. """ try: ref = json.loads(event.info.get('reference')) except (ValueError, TypeError): return if isinstance(ref, dict) and ref.get('type') == 'item_tasks.output': jobModel = ModelImporter.model('job', 'jobs') tokenModel = ModelImporter.model('token') token = event.info['currentToken'] if tokenModel.hasScope(token, 'item_tasks.job_write:%s' % ref['jobId']): job = jobModel.load(ref['jobId'], force=True, exc=True) else: job = jobModel.load(ref['jobId'], level=AccessType.WRITE, user=event.info['currentUser'], exc=True) file = event.info['file'] item = ModelImporter.model('item').load(file['itemId'], force=True) # Add link to job model to the output item jobModel.updateJob( job, otherFields={ 'itemTaskBindings.outputs.%s.itemId' % ref['id']: item['_id'] }) # Also a link in the item to the job that created it item['createdByJob'] = job['_id'] ModelImporter.model('item').save(item)
def test_access_group(self): project1 = self._create_project('project1', self._user) my_group = ModelImporter.model('group').createGroup( 'myGroup', self._user) ModelImporter.model('group').addUser(my_group, self._another_user) # Now share the project with write access to the group body = {'groups': [str(my_group['_id'])], 'level': 1} json_body = json.dumps(body) r = self.request('/projects/%s/access' % str(project1['_id']), method='PATCH', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 200) # # Check that _another_user has write access to the project folder r = self.request('/folder/%s/access' % str(project1['folderId']), method='GET', type='application/json', user=self._user) self.assertStatus(r, 200) self.assertTrue( str(my_group['_id']) in [str(item['id']) for item in r.json['groups']]) # # check that _another_user can create simulations json_body = json.dumps(self.simulationBody) r = self.request('/projects/%s/simulations' % str(project1['_id']), method='POST', type='application/json', body=json_body, user=self._another_user) self.assertStatus(r, 201) self.assertEqual(r.json['name'], self.simulationBody['name'])
def loadModel(resource, model, plugin='_core', id=None, allowCookie=False, level=None): """ Load a model based on id using the current cherrypy token parameter for authentication, caching the results. This must be called in a cherrypy context. :param resource: the resource class instance calling the function. Used for access to the current user and model importer. :param model: the model name, e.g., 'item'. :param plugin: the plugin name when loading a plugin model. :param id: a string id of the model to load. :param allowCookie: true if the cookie authentication method is allowed. :param level: access level desired. :returns: the loaded model. """ key = tokenStr = None if 'token' in cherrypy.request.params: # Token as a parameter tokenStr = cherrypy.request.params.get('token') elif 'Girder-Token' in cherrypy.request.headers: tokenStr = cherrypy.request.headers['Girder-Token'] elif 'girderToken' in cherrypy.request.cookie and allowCookie: tokenStr = cherrypy.request.cookie['girderToken'].value key = (model, tokenStr, id) cacheEntry = LoadModelCache.get(key) if cacheEntry and cacheEntry['expiry'] > time.time(): entry = cacheEntry['result'] cacheEntry['hits'] += 1 else: # we have to get the token separately from the user if we are using # cookies. if allowCookie: getCurrentToken(allowCookie) cherrypy.request.girderAllowCookie = True entry = ModelImporter.model(model, plugin).load( id=id, level=level, user=resource.getCurrentUser()) # If the cache becomes too large, just dump it -- this is simpler # than dropping the oldest values and avoids having to add locking. if len(LoadModelCache) > LoadModelCacheMaxEntries: LoadModelCache.clear() LoadModelCache[key] = { 'id': id, 'model': model, 'tokenId': tokenStr, 'expiry': time.time() + LoadModelCacheExpiryDuration, 'result': entry, 'hits': 0 } return entry
def test_celery_task_chained_bad_token_fails(self, params): jobModel = ModelImporter.model('job', 'jobs') result = (fibonacci.s(5) | request_private_path.si( 'admin', girder_client_token='')).delay() # Bypass the raised exception from the second job try: result.wait(timeout=10) except Exception: pass user = self.getCurrentUser() job_1 = result.job job_2 = jobModel.load(job_1['parentId'], user=user) return [job_1, job_2]
def _getResourceModel(self, kind, funcName=None): """ Load and return a model with a specific function or throw an exception. :param kind: the name of the model to load :param funcName: a function name to ensure that each model contains. :returns: the loaded model. """ try: model = ModelImporter.model(kind) except Exception: model = None if not model or (funcName and not hasattr(model, funcName)): raise RestException('Invalid resources format.') return model
def attach_complete(self, volume, cluster, params): user = getCurrentUser() path = params.get('path', None) # Is path being passed in as apart of the body json? if path is None: path = getBodyJson().get('path', None) if path is not None: cluster.setdefault('volumes', []) cluster['volumes'].append(volume['_id']) cluster['volumes'] = list(set(cluster['volumes'])) volume['status'] = VolumeState.INUSE volume['path'] = path # TODO: removing msg should be refactored into # a general purpose 'update_status' function # on the volume model. This way msg only referes # to the current status. try: del volume['msg'] except KeyError: pass # Add cluster id to volume volume['clusterId'] = cluster['_id'] ModelImporter.model('cluster', 'cumulus').save(cluster) self._model.update_volume(user, volume) else: volume['status'] = VolumeState.ERROR volume['msg'] = 'Volume path was not communicated on complete' self._model.update_volume(user, volume)
def getChallengeUserEmails(challenge, accessLevel): """ Get a list of the email addresses for users with admin access on a challenge. :param challenge: the challenge document :type challenge: dict :param accessLevel: the minimum access level :type accessLevel: girder.AccessType """ acl = ModelImporter.model('challenge', 'covalic').getFullAccessList(challenge) users = _getUsers(acl, accessLevel) emails = [user['email'] for user in users] return emails
def load(self, info): getPlugin('large_image').load(info) ModelImporter.registerModel('annotation', Annotation, 'large_image') info['apiRoot'].annotation = AnnotationResource() # Ask for some models to make sure their singletons are initialized. # Also migrate the database as a one-time action. Annotation()._migrateDatabase() # add copyAnnotations option to POST resource/copy, POST item/{id}/copy # and POST folder/{id}/copy info['apiRoot'].resource.copyResources.description.param( 'copyAnnotations', 'Copy annotations when copying resources (default true)', required=False, dataType='boolean') info['apiRoot'].item.copyItem.description.param( 'copyAnnotations', 'Copy annotations when copying item (default true)', required=False, dataType='boolean') info['apiRoot'].folder.copyFolder.description.param( 'copyAnnotations', 'Copy annotations when copying folder (default true)', required=False, dataType='boolean') events.bind( 'data.process', 'large_image_annotation.annotations', handlers.process_annotations)
def _renderHTML(self): self.vars['pluginCss'] = [] self.vars['pluginJs'] = [] builtDir = os.path.join(STATIC_ROOT_DIR, 'clients', 'web', 'static', 'built', 'plugins') self.vars['plugins'] = ModelImporter.model('setting').get( SettingKey.PLUGINS_ENABLED, ()) for plugin in self.vars['plugins']: if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.css')): self.vars['pluginCss'].append(plugin) if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.js')): self.vars['pluginJs'].append(plugin) return super(Webroot, self)._renderHTML()
def _descriptorSetFromSessionId(sid): """ Return the PostgresDescriptorSet object from a given session id. This essentially does the postfixing of the data folder's ID to form the table name. :param sid: ID of the session :returns: Descriptor set representing the data folder related to the sid or None if no session exists :rtype: PostgresDescriptorSet|None """ session = ModelImporter.model('item').findOne({'_id': ObjectId(sid)}) if not session: return None else: setting = ModelImporter.model('setting') return PostgresDescriptorSet( 'descriptor_set_%s' % session['meta']['data_folder_id'], db_name=setting.get('smqtk_girder.db_name'), db_host=setting.get('smqtk_girder.db_host'), db_user=setting.get('smqtk_girder.db_user'), db_pass=setting.get('smqtk_girder.db_pass'))
def get_hpccloud_folder(user): """ Get the users HPCCloud folder. If the folder doesn't exist it is created. :param user: The user to get the folder for. """ filters = { 'name': 'Private' } try: private_folder = six.next(ModelImporter.model('folder').childFolders( parentType='user', user=user, parent=user, filters=filters, limit=1)) except StopIteration: raise Exception('Unable to find users private folder') # Now see if we already have a HPCCloud folder filters = { 'name': 'HPCCloud' } hpccloud_folder = None try: hpccloud_folder = six.next(ModelImporter.model('folder').childFolders( parentType='folder', user=user, parent=private_folder, filters=filters, limit=1)) except StopIteration: pass if hpccloud_folder: return hpccloud_folder # Create the folder return ModelImporter.model('folder').createFolder( private_folder, 'HPCCloud', description='Folder for HPCCloud data', parentType='folder', creator=user)
def getResourceItems(self, id, params): user = self.getCurrentUser() modelType = params['type'] model = ModelImporter.model(modelType) doc = model.load(id=id, user=user, level=AccessType.READ) if not doc: raise RestException('Resource not found.') limit, offset, sort = self.getPagingParameters(params, '_id') return list( allChildItems(parentType=modelType, parent=doc, user=user, limit=limit, offset=offset, sort=sort))
def process_file(f): data = {} try: if f['size'] <= MAX_FILE_SIZE: # download file and try to parse dicom stream = ModelImporter.model('file').download(f, headers=False) fp = six.BytesIO(b''.join(stream())) ds = dicom.read_file(fp, stop_before_pixels=True) # human-readable keys for key in ds.dir(): value = coerce(ds.data_element(key).value) if value is not None: data[key] = value # hex keys for key, value in ds.items(): key = 'x%04x%04x' % (key.group, key.element) value = coerce(value.value) if value is not None: data[key] = value except Exception: pass # store dicom data in file f['dicom'] = data return ModelImporter.model('file').save(f)
def upload_molecule(mol): settings = ModelImporter.model('setting') uri_base = settings.get(PluginSettings.SEMANTIC_URI_BASE) if uri_base is None: uri_base = 'http://localhost:8888' uri_base = uri_base.rstrip('/') gainesville_graph = gainesville.create_molecule_graph(uri_base, mol) gainesville_id = '%s_gainesville' % mol['_id'] jena.upload_rdf(gainesville_id, gainesville_graph) cheminf_graph = cheminf.create_molecule_graph(uri_base, mol) cheminf_id = '%s_cheminf' % mol['_id'] jena.upload_rdf(cheminf_id, cheminf_graph)
def __init__(self): super(Job, self).__init__() self.resourceName = 'jobs' self.route('POST', (), self.create) self.route('PATCH', (':id', ), self.update) self.route('GET', (':id', 'status'), self.status) self.route('PUT', (':id', 'terminate'), self.terminate) self.route('POST', (':id', 'log'), self.append_to_log) self.route('GET', (':id', 'log'), self.log) self.route('GET', (':id', 'output'), self.output) self.route('DELETE', (':id', ), self.delete) self.route('GET', (':id', ), self.get) self.route('GET', (), self.find) self._model = ModelImporter.model('job', 'cumulus')
def _registerLdapUser(attrs, email, server): first, last = None, None if attrs.get('givenName'): first = attrs['givenName'][0].decode('utf8') elif attrs.get('cn'): first = attrs['cn'][0].decode('utf8').split()[0] if attrs.get('sn'): last = attrs['sn'][0].decode('utf8') elif attrs.get('cn'): last = attrs['cn'][0].decode('utf8').split()[-1] if not first or not last: raise Exception('No LDAP name entry found for %s.' % email) # Try using the search field value as the login. If it's an email address, # use the part before the @. try: login = attrs[server['searchField']][0].decode('utf8').split('@')[0] return ModelImporter.model('user').createUser( login, password=None, firstName=first, lastName=last, email=email) except ValidationException as e: if e.field != 'login': raise # Fall back to deriving login from user's name for i in six.moves.range(_MAX_NAME_ATTEMPTS): login = ''.join((first, last, str(i) if i else '')) try: return ModelImporter.model('user').createUser( login, password=None, firstName=first, lastName=last, email=email) except ValidationException as e: if e.field != 'login': raise raise Exception('Failed to generate login name for LDAP user %s.' % email)
def _onJobSave(event): """ If a job is finalized (i.e. success or failure status) and contains a temp token, we remove the token. """ params = event.info['params'] job = event.info['job'] if 'itemTaskTempToken' in job and params['status'] in (JobStatus.ERROR, JobStatus.SUCCESS): token = ModelImporter.model('token').load(job['itemTaskTempToken'], objectId=False, force=True) if token: ModelImporter.model('token').remove(token) # Remove the itemTaskTempToken field from the job ModelImporter.model('job', 'jobs').update( {'_id': job['_id']}, update={'$unset': { 'itemTaskTempToken': True }}, multi=False) del job['itemTaskTempToken']
def getAssetsFolder(challenge, user, testAccess=True): """ Get the Assets folder for a given challenge, creating one if it does not already exist. Ensures the specified user has read access on the folder if it already exists. :param challenge: The challenge. :type challenge: dict :param user: The user requesting the assets folder info. :type user: dict :param testAccess: Whether to verify that the user has read access to the folder. :type testAccess: bool :returns: The assets folder. """ collection = ModelImporter.model('collection').load( challenge['collectionId'], force=True) if user is None and challenge['creatorId']: user = ModelImporter.model('user').load(challenge['creatorId'], force=True) folderModel = ModelImporter.model('folder') folder = folderModel.createFolder( parentType='collection', parent=collection, name='Assets', creator=user, reuseExisting=True, description='Assets related to this challenge.') if testAccess: folderModel.requireAccess(folder, user=user, level=AccessType.READ) return folder
def _setCommonCORSHeaders(): """ Set CORS headers that should be passed back with either a preflight OPTIONS or a simple CORS request. We set these headers anytime there is an Origin header present since browsers will simply ignore them if the request is not cross-origin. """ if not cherrypy.request.headers.get('origin'): # If there is no origin header, this is not a cross origin request return origins = ModelImporter.model('setting').get(SettingKey.CORS_ALLOW_ORIGIN) if origins: cherrypy.response.headers['Access-Control-Allow-Origin'] = origins cherrypy.response.headers['Access-Control-Allow-Credentials'] = 'true'