def find(self, project, composite, sample, runId, fitted=False, offset=0, limit=None, sort=None): query = { 'sampleId': sample['_id'], 'runId': { '$in': [None, ObjectId(runId)] } } cursor = TimeSeriesModel().find(query=query, offset=offset, sort=sort, user=getCurrentUser()) timeseries = list(TimeSeriesModel().filterResultsByPermission( cursor=cursor, user=getCurrentUser(), level=AccessType.READ, limit=limit, offset=offset)) if fitted and False: for t in timeseries: t['data'] = make_models(t['data'], (1, 3), 0.15) return timeseries
def validate(self): profile_id = parse('profileId').find(self.volume)[0].value profile = self.model('aws', 'cumulus').load(profile_id, user=getCurrentUser()) if not profile: raise ValidationException('Invalid profile id') valid_fs = ['ext2', 'ext3', 'ext4'] if 'fs' in self.volume and self.volume['fs'] not in valid_fs: raise ValidationException('Unsupported file system type', 'fs') try: int(self.volume['size']) except ValueError: raise ValidationException('size number in an integer', 'size') # Name should be unique user = getCurrentUser() query = { 'name': self.volume['name'], 'userId': user['_id'] } if '_id' in self.volume: query['_id'] = {'$ne': self.volume['_id']} volume = self.model('volume', 'cumulus').findOne(query) if volume: raise ValidationException('A volume with that name already exists', 'name') return self.volume
def nearestNeighbors(self, item, limit, params): limit = int(limit) desc_set = self.descriptorSetFromItem(item) nn_index = self.nearestNeighborIndex(item, getCurrentUser(), desc_set) if nn_index is None: raise RestException('Nearest neighbor index could not be found.') try: smqtk_uuid = item['meta']['smqtk_uuid'] descriptor = desc_set.get_descriptor(smqtk_uuid) except KeyError: raise RestException( 'Unable to retrieve image descriptor for querying object.') neighbors, dists = nn_index.nn(descriptor, limit) uuid_dist = dict(zip([x.uuid() for x in neighbors], dists)) smqtkFolder = ModelImporter.model('folder').load(item['folderId'], user=getCurrentUser()) items = list( ModelImporter.model('folder').childItems( smqtkFolder, filters={ 'meta.smqtk_uuid': { '$in': [x.uuid() for x in neighbors] } })) for item in items: item['smqtk_distance'] = uuid_dist[item['meta']['smqtk_uuid']] return items
def nearestNeighbors(self, item, limit, params): limit = int(limit) desc_index = self.descriptorIndexFromItem(item) nn_index = self.nearestNeighborIndex(item, getCurrentUser(), desc_index) if nn_index is None: raise RestException('Nearest neighbor index could not be found.') try: smqtk_uuid = item['meta']['smqtk_uuid'] descriptor = desc_index.get_descriptor(smqtk_uuid) except KeyError: raise RestException('Unable to retrieve image descriptor for querying object.') neighbors, dists = nn_index.nn(descriptor, limit) uuid_dist = dict(zip([x.uuid() for x in neighbors], dists)) smqtkFolder = ModelImporter.model('folder').load(item['folderId'], user=getCurrentUser()) items = list(ModelImporter.model('folder').childItems(smqtkFolder, filters={'meta.smqtk_uuid': { '$in': [x.uuid() for x in neighbors] }})) for item in items: item['smqtk_distance'] = uuid_dist[item['meta']['smqtk_uuid']] return items
def find_samples(project, composite, runId=None, platemapId=None, elements=None, offset=0, limit=None, sort=None): platemap = PlateMapModel().load(ObjectId(platemapId), level=AccessType.READ, user=getCurrentUser()) and_exp = [{'$eq': ['$plateId', platemap['plateId']]}] match_samples = { '$match': { '$expr': { '$and': and_exp }, } } if elements is not None: and_exp.append({'$setIsSubset': ['$composition.elements', elements]}) lookup_fom_match = {'$expr': {'$and': [{'$eq': ["$sampleId", "$$id"]}]}} if runId is not None: lookup_fom_match['$expr']['$and'].append( {'$eq': ['$runId', ObjectId(runId)]}) lookup_fom = { '$lookup': { 'from': 'edp.fom', 'let': { 'id': '$_id' }, 'pipeline': [{ '$match': lookup_fom_match }, { '$project': { 'sampleId': 0 } }], 'as': 'fom' } } exclude_empty = {"$match": {"fom": {"$ne": []}}} pipeline = [match_samples, lookup_fom, exclude_empty] cursor = SampleModel().collection.aggregate(pipeline) return list(SampleModel().filterResultsByPermission(cursor=cursor, user=getCurrentUser(), level=AccessType.READ, limit=limit, offset=offset))
def search(self, params): limit, offset, sort = parse_pagination_params(params) query_string = params.get('q') formula = params.get('formula') cactus = params.get('cactus') if query_string is None and formula is None and cactus is None: raise RestException( 'Either \'q\', \'formula\' or \'cactus\' is required.') if query_string is not None: try: mongo_query = query.to_mongo_query(query_string) except query.InvalidQuery: raise RestException('Invalid query', 400) fields = ['inchikey', 'smiles', 'properties', 'name'] cursor = MoleculeModel().find(query=mongo_query, fields=fields, limit=limit, offset=offset, sort=sort) mols = [x for x in cursor] num_matches = cursor.collection.count_documents(mongo_query) return search_results_dict(mols, num_matches, limit, offset, sort) elif formula: # Search using formula return MoleculeModel().findmol(params) elif cactus: if getCurrentUser() is None: raise RestException('Must be logged in to search with cactus.') # Disable cert verification for now # TODO Ensure we have the right root certs so this just works. r = requests.get( 'https://cactus.nci.nih.gov/chemical/structure/%s/file?format=sdf' % cactus, verify=False) if r.status_code == 404: return [] else: r.raise_for_status() sdf_data = r.content.decode('utf8') provenance = 'cactus: ' + cactus mol = create_molecule(sdf_data, 'sdf', getCurrentUser(), True, provenance=provenance) return search_results_dict([mol], 1, limit, offset, sort)
def create_calc(self, params): body = getBodyJson() if 'cjson' not in body and ('fileId' not in body or 'format' not in body): raise RestException('Either cjson or fileId is required.') user = getCurrentUser() cjson = body.get('cjson') props = body.get('properties', {}) molecule_id = body.get('moleculeId', None) geometry_id = body.get('geometryId', None) public = body.get('public', True) notebooks = body.get('notebooks', []) image = body.get('image') input_parameters = body.get('input', {}).get('parameters') if input_parameters is None: input_parameters = body.get('inputParameters', {}) file_id = None file_format = body.get('format', 'cjson') if 'fileId' in body: file = File().load(body['fileId'], user=getCurrentUser()) file_id = file['_id'] cjson = self._file_to_cjson(file, file_format) if molecule_id is None: mol = create_molecule(json.dumps(cjson), 'cjson', user, public, parameters=input_parameters) molecule_id = mol['_id'] calc = CalculationModel().create_cjson( user, cjson, props, molecule_id, geometry_id=geometry_id, image=image, input_parameters=input_parameters, file_id=file_id, notebooks=notebooks, public=public) cherrypy.response.status = 201 cherrypy.response.headers['Location'] \ = '/calculations/%s' % (str(calc['_id'])) return CalculationModel().filter(calc, user)
def ingest_calc(self, calculation, body, detectBonds=None): self.requireParams(['fileId', 'format'], body) file = File().load(body['fileId'], user=getCurrentUser()) cjson = self._file_to_cjson(file, body['format']) calc_props = calculation['properties'] # The calculation is no longer pending if 'pending' in calc_props: del calc_props['pending'] # Add bonds if they were not there already if detectBonds is None: detectBonds = False bonds = cjson.get('bonds') if bonds is None and detectBonds: new_cjson = openbabel.autodetect_bonds(cjson) if new_cjson.get('bonds') is not None: cjson['bonds'] = new_cjson['bonds'] calculation['properties'] = calc_props calculation['cjson'] = cjson calculation['fileId'] = file['_id'] image = body.get('image') if image is not None: calculation['image'] = image code = body.get('code') if code is not None: calculation['code'] = code scratch_folder_id = body.get('scratchFolderId') if scratch_folder_id is not None: calculation['scratchFolderId'] = scratch_folder_id # If this was a geometry optimization, create a geometry from it task = parse('input.parameters.task').find(calculation) if task and task[0].value == 'optimize': moleculeId = calculation.get('moleculeId') provenanceType = 'calculation' provenanceId = calculation.get('_id') # The cjson will be whitelisted geometry = GeometryModel().create(getCurrentUser(), moleculeId, cjson, provenanceType, provenanceId) calculation['optimizedGeometryId'] = geometry.get('_id') return CalculationModel().save(calculation)
def detach(self, volume, params): profile_id = parse('profileId').find(volume)[0].value profile, secret_key = _get_profile(profile_id) girder_callback_info = { 'girder_api_url': cumulus.config.girder.baseUrl, 'girder_token': get_task_token()['_id'] } log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl, volume['_id']) p = CloudProvider(dict(secretAccessKey=secret_key, **profile)) aws_volume = p.get_volume(volume) if aws_volume is None or aws_volume['state'] != VolumeState.INUSE: raise RestException('This volume is not attached ' 'to a cluster', 400) if 'clusterId' not in volume: raise RestException('clusterId is not set on this volume!', 400) try: volume['path'] except KeyError: raise RestException('path is not set on this volume!', 400) cluster = ModelImporter.model('cluster', 'cumulus').load(volume['clusterId'], user=getCurrentUser(), level=AccessType.ADMIN) master = p.get_master_instance(cluster['_id']) if master['state'] != InstanceState.RUNNING: raise RestException('Master instance is not running!', 400) user = getCurrentUser() cluster = ModelImporter.model('cluster', 'cumulus').filter(cluster, user, passphrase=False) cumulus.ansible.tasks.volume.detach_volume\ .delay(profile, cluster, master, self._model.filter(volume, user), secret_key, log_write_url, girder_callback_info) volume['status'] = VolumeState.DETACHING volume = self._model.update_volume(user, volume) return self._model.filter(volume, user)
def _loadModel(self, name, info, id, model): if info['force']: doc = model.load(id, force=True, **info['kwargs']) elif info['level'] is not None: doc = model.load(id=id, level=info['level'], user=getCurrentUser(), **info['kwargs']) else: doc = model.load(id, **info['kwargs']) if doc is None and info['exc']: raise RestException('Invalid %s id (%s).' % (model.name, str(id))) if info['requiredFlags']: model.requireAccessFlags(doc, user=getCurrentUser(), flags=info['requiredFlags']) return doc
def create(self, **kwargs): model = {} for prop in self.create_props: prop_value = kwargs.get(prop['name'], prop.get('default')) if prop_value is not None: if prop.get('type') == 'file': file = File().load(prop_value, user=getCurrentUser(), level=AccessType.READ) if file is None: raise ValidationException('File doesn\'t exists: %s' % prop_value) if not isinstance(prop_value, ObjectId): prop_value = ObjectId(prop_value) elif prop.get('type') == ObjectId: if isinstance(prop_value, list): prop_value = [ObjectId(x) for x in prop_value] else: prop_value = ObjectId(prop_value) elif prop.get('type') == 'timestamp': prop_value = parseTimestamp(prop_value) model[prop['name']] = prop_value self.setPublic(model, public=kwargs.get('public', False)) user = kwargs.get('user') self.setUserAccess(model, user=user, level=AccessType.ADMIN) model['owner'] = user['_id'] if edp_group() is not None: self.setGroupAccess(model, edp_group(), AccessType.ADMIN) saved_model = self.save(model) # Now spawn thumbnail jobs if the model contains any image for prop in self.create_props: prop_value = kwargs.get(prop['name'], prop.get('default')) if prop_value is not None and prop.get('type') == 'file': file = File().load(prop_value, user=getCurrentUser(), level=AccessType.READ) mime_type = file.get('mimeType', '') if mime_type is not None and mime_type.startswith('image/'): self._create_thumbnail(file, saved_model, prop['name'], user) return saved_model
def find_id(self, id, params): cal = self._model.load(id, level=AccessType.READ, user=getCurrentUser()) if not cal: raise RestException('Calculation not found.', code=404) return cal
def provision(self): self.status = ClusterStatus.PROVISIONING base_url = getApiUrl() log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id']) girder_token = get_task_token()['_id'] profile, secret_key = _get_profile(self.cluster['profileId']) playbook = get_property( 'config.provision.spec', self.cluster, default=self.DEFAULT_PLAYBOOK) playbook_params = get_property( 'config.provision.params', self.cluster, default={}) provision_ssh_user = get_property( 'config.provision.ssh.user', self.cluster, default='ubuntu') playbook_params['cluster_state'] = ClusterStatus.RUNNING playbook_params['ansible_ssh_user'] = provision_ssh_user cumulus.ansible.tasks.cluster.provision_cluster \ .delay(playbook, self._model.filter(self.cluster, getCurrentUser(), passphrase=False), profile, secret_key, playbook_params, girder_token, log_write_url, ClusterStatus.RUNNING) return self.cluster
def append_to_log(self, id, params): user = getCurrentUser() if not self._model.load(id, user=user, level=AccessType.ADMIN): raise RestException('Volume not found.', code=404) return self._model.append_to_log(user, id, getBodyJson())
def create(self, params): body = getBodyJson() self.requireParams(['name', 'type', 'size', 'profileId'], body) if not VolumeType.is_valid_type(body['type']): raise RestException('Invalid volume type.', code=400) profile_id = parse('profileId').find(body) if not profile_id: raise RestException('A profile id must be provided', 400) profile_id = profile_id[0].value profile, secret_key = _get_profile(profile_id) if not profile: raise RestException('Invalid profile', 400) if 'zone' in body: zone = body['zone'] else: zone = profile['availabilityZone'] volume = self._create_ebs(body, zone) cherrypy.response.status = 201 cherrypy.response.headers['Location'] = '/volumes/%s' % volume['_id'] return self._model.filter(volume, getCurrentUser())
def find_calc(self, moleculeId=None, geometryId=None, imageName=None, inputParameters=None, inputGeometryHash=None, name=None, inchi=None, inchikey=None, smiles=None, formula=None, creatorId=None, pending=None, limit=None, offset=None, sort=None): return CalculationModel().findcal( molecule_id=moleculeId, geometry_id=geometryId, image_name=imageName, input_parameters=inputParameters, input_geometry_hash=inputGeometryHash, name=name, inchi=inchi, inchikey=inchikey, smiles=smiles, formula=formula, creator_id=creatorId, pending=pending, limit=limit, offset=offset, sort=sort, user=getCurrentUser())
def get_all(self, params): user = getCurrentUser() limit, offset, _ = self.getPagingParameters(params) cursor = self._model.find(limit=limit, offset=offset) return list(self._model.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ))
def ingest_calc(self, calculation, body, detectBonds=None): self.requireParams(['fileId', 'format'], body) file = File().load(body['fileId'], user=getCurrentUser()) cjson = self._file_to_cjson(file, body['format']) calc_props = calculation['properties'] # The calculation is no longer pending if 'pending' in calc_props: del calc_props['pending'] # Add bonds if they were not there already if detectBonds is None: detectBonds = False bonds = cjson.get('bonds') if bonds is None and detectBonds: new_cjson = openbabel.autodetect_bonds(cjson) if new_cjson.get('bonds') is not None: cjson['bonds'] = new_cjson['bonds'] calculation['properties'] = calc_props calculation['cjson'] = cjson calculation['fileId'] = file['_id'] image = body.get('image') if image is not None: calculation['image'] = image scratch_folder_id = body.get('scratchFolderId') if scratch_folder_id is not None: calculation['scratchFolderId'] = scratch_folder_id return CalculationModel().save(calculation)
def delete(self, id, params): user = getCurrentUser() cal = self._model.load(id, level=AccessType.READ, user=user) if not cal: raise RestException('Calculation not found.', code=404) return self._model.remove(cal, user)
def validate(self, cluster): if not cluster['name']: raise ValidationException('Name must not be empty.', 'name') if not cluster['type']: raise ValidationException('Type must not be empty.', 'type') scheduler_type = parse('config.scheduler.type').find(cluster) if scheduler_type: scheduler_type = scheduler_type[0].value else: scheduler_type = QueueType.SGE config = cluster.setdefault('config', {}) scheduler = config.setdefault('scheduler', {}) scheduler['type'] = scheduler_type if not queue.is_valid_type(scheduler_type): raise ValidationException('Unsupported scheduler.', 'type') # If inserting, ensure no other clusters have the same name field amd # type if '_id' not in cluster: query = { 'name': cluster['name'], 'userId': getCurrentUser()['_id'], 'type': cluster['type'] } if self.findOne(query): raise ValidationException('A cluster with that name already ' 'exists', 'name') adapter = get_cluster_adapter(cluster) return adapter.validate()
def start(self, *args, **kwargs): user = getCurrentUser() cluster = kwargs.get('cluster') image = kwargs.get('image') container = kwargs.get('container') if cluster is None: raise Exception('Unable to extract cluster.') if '_id' not in cluster and 'name' not in cluster: raise Exception('Unable to extract cluster.') if image is None: raise Exception('Unable to extract the image name.') if container is None: raise Exception('Unable to extract container type.') cluster_id = parse('cluster._id').find(kwargs) if cluster_id: cluster_id = cluster_id[0].value model = ModelImporter.model('cluster', 'cumulus') cluster = model.load(cluster_id, user=user, level=AccessType.ADMIN) super(ContainerPullTaskFlow, self).start(start.s(user, cluster, image, container), *args, **kwargs)
def get_task_folder(): folder = Setting().get(PluginSettings.SLICER_CLI_WEB_TASK_FOLDER) if not folder: return None return Folder().load(folder, level=AccessType.WRITE, user=getCurrentUser())
def createSession(self, params): smqtkFolder = params['smqtkFolder'] sessionsFolder = getCreateSessionsFolder() # Get the folder with images in it, since this is what's used for computing # what descriptor index table to use dataFolderId = ModelImporter.model('folder').load(ObjectId(smqtkFolder), user=getCurrentUser()) dataFolderId = str(dataFolderId['parentId']) # Create session named after its id session = ModelImporter.model('item').createItem('placeholder_name', getCurrentUser(), sessionsFolder) session['name'] = str(session['_id']) ModelImporter.model('item').save(session) sessionId = str(session['_id']) ModelImporter.model('item').setMetadata(session, { 'smqtk_folder_id': smqtkFolder, 'data_folder_id': dataFolderId, 'pos_uuids': [], 'neg_uuids': [] }) # already registered in the controller, return if self.controller.has_session_uuid(sessionId): return session iqrs = IqrSession(self.positive_seed_neighbors, session_uid=sessionId) with self.controller: with iqrs: # because classifier maps locked by session self.controller.add_session(iqrs) self.session_classifiers[sessionId] = None self.session_classifier_dirty[sessionId] = True return session
def create_profile(self, userId, name, profile_type, access_key_id, secret_access_key, region_name, availability_zone, public_ips): user = getCurrentUser() profile = { 'name': name, 'cloudProvider': profile_type, 'accessKeyId': access_key_id, 'secretAccessKey': secret_access_key, 'regionName': region_name, 'availabilityZone': availability_zone, 'userId': userId, 'status': 'creating', 'publicIPs': public_ips } profile = self.setUserAccess(profile, user, level=AccessType.ADMIN, save=False) group = { '_id': ObjectId(self.get_group_id()) } profile = self.setGroupAccess(profile, group, level=AccessType.ADMIN) return self.save(profile)
def attach_complete(self, volume, cluster, params): user = getCurrentUser() path = getBodyJson().get('path', None) if path is not None: cluster.setdefault('volumes', []) cluster['volumes'].append(volume['_id']) cluster['volumes'] = list(set(cluster['volumes'])) volume['status'] = VolumeState.INUSE volume['path'] = path # TODO: removing msg should be refactored into # a general purpose 'update_status' function # on the volume model. This way msg only referes # to the current status. try: del volume['msg'] except KeyError: pass # Add cluster id to volume volume['clusterId'] = cluster['_id'] self.model('cluster', 'cumulus').save(cluster) self._model.update_volume(user, volume) else: volume['status'] = VolumeState.ERROR volume['msg'] = 'Volume path was not communicated on complete' self._model.update_volume(user, volume)
def find_id(self, id, params): mol = self._model.load(id, level=AccessType.READ, user=getCurrentUser()) if not mol: raise RestException('Molecule not found.', code=404) return self._clean(mol)
def provision(self): self.status = ClusterStatus.PROVISIONING base_url = cumulus.config.girder.baseUrl log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id']) girder_token = get_task_token()['_id'] profile, secret_key = _get_profile(self.cluster['profileId']) playbook = get_property('config.provision.spec', self.cluster, default=self.DEFAULT_PLAYBOOK) playbook_params = get_property('config.provision.params', self.cluster, default={}) provision_ssh_user = get_property('config.provision.ssh.user', self.cluster, default='ubuntu') playbook_params['cluster_state'] = ClusterStatus.RUNNING playbook_params['ansible_ssh_user'] = provision_ssh_user cumulus.ansible.tasks.cluster.provision_cluster \ .delay(playbook, self._model.filter(self.cluster, getCurrentUser(), passphrase=False), profile, secret_key, playbook_params, girder_token, log_write_url, ClusterStatus.RUNNING) return self.cluster
def update_step(self, simulation, stepName, updates, params): user = getCurrentUser() immutable = ['type', 'folderId'] if stepName not in simulation.get('steps', {}): raise RestException( 'Simulation %s doesn\'t contain step %s' % (simulation['_id'], stepName), 400) for p in updates: if p in immutable: raise RestException('\'%s\' is an immutable property' % p, 400) try: ref_resolver = jsonschema.RefResolver.from_schema( schema.definitions) jsonschema.validate(updates, schema.definitions['stepUpdate'], resolver=ref_resolver) except jsonschema.ValidationError as ve: raise RestException(ve.message, 400) status = updates.get('status') metadata = updates.get('metadata') export = updates.get('export') view = updates.get('view') return self._model.update_step(user, simulation, stepName, status, metadata, export, view)
def update_step(self, simulation, stepName, params): user = getCurrentUser() immutable = ['type', 'folderId'] updates = getBodyJson() if stepName not in simulation.get('steps', {}): raise RestException('Simulation %s doesn\'t contain step %s' % (simulation['_id'], stepName), 400) for p in updates: if p in immutable: raise RestException('\'%s\' is an immutable property' % p, 400) try: ref_resolver = jsonschema.RefResolver.from_schema( schema.definitions) jsonschema.validate( updates, schema.definitions['stepUpdate'], resolver=ref_resolver) except jsonschema.ValidationError as ve: raise RestException(ve.message, 400) status = updates.get('status') metadata = updates.get('metadata') export = updates.get('export') view = updates.get('view') return self._model.update_step( user, simulation, stepName, status, metadata, export, view)
def validate(self, project): """ Validate using jsonschema """ try: ref_resolver = jsonschema.RefResolver.from_schema( schema.definitions) jsonschema.validate(project, schema.project, resolver=ref_resolver) except jsonschema.ValidationError as ve: raise ValidationException(ve.message) # Ensure unique name for the project user = getCurrentUser() q = { 'name': project['name'], 'userId': user['_id'] } if '_id' in project: q['_id'] = {'$ne': project['_id']} duplicate = self.findOne(q, fields=['_id']) if duplicate is not None: raise ValidationException('A project with that name already ' 'exists.', 'name') return project
def create_profile(user, params): body = getBodyJson() requireParams([ 'name', 'accessKeyId', 'secretAccessKey', 'regionName', 'availabilityZone' ], body) profile_type = 'ec2' if 'cloudProvider' not in body.keys() \ else body['cloudProvider'] model = ModelImporter.model('aws', 'cumulus') profile = model.create_profile(user['_id'], body['name'], profile_type, body['accessKeyId'], body['secretAccessKey'], body['regionName'], body['availabilityZone'], body.get('publicIPs', False)) # Now fire of a task to create a key pair for this profile try: cumulus.aws.ec2.tasks.key.generate_key_pair.delay( _filter(profile), get_task_token()['_id']) cherrypy.response.status = 201 cherrypy.response.headers['Location'] \ = '/user/%s/aws/profile/%s' % (str(user['_id']), str(profile['_id'])) return model.filter(profile, getCurrentUser()) except Exception: # Remove profile if error occurs fire of task model.remove(profile) raise
def start(self, *args, **kwargs): user = getCurrentUser() input_ = kwargs.get('input') cluster = kwargs.get('cluster') image = kwargs.get('image') run_parameters = kwargs.get('runParameters') if input_ is None: raise Exception('Unable to extract input.') if '_id' not in cluster and 'name' not in cluster: raise Exception('Unable to extract cluster.') if image is None: raise Exception('Unable to extract the docker image name.') if run_parameters is None: run_parameters = {} cluster_id = parse('cluster._id').find(kwargs) if cluster_id: cluster_id = cluster_id[0].value model = ModelImporter.model('cluster', 'cumulus') cluster = model.load(cluster_id, user=user, level=AccessType.ADMIN) cluster = model.filter(cluster, user, passphrase=False) super(OpenChemistryTaskFlow, self).start(start.s(input_, cluster, image, run_parameters), *args, **kwargs)
def get_twitter(user): # Either twitterPublic must be true or the user has admin level access if user.get('twitterPublic') is not True: if User().getAccessLevel(user, getCurrentUser()) != AccessType.ADMIN: return None return user.get('twitter')
def deleteProcessedVideo(self, id, params): user = getCurrentUser() itemModel = self.model('item') fileModel = self.model('file') item = itemModel.load(id, user=user, level=AccessType.READ) itemVideoData = item.get('video', {}) fileIdList = itemVideoData.get('createdFiles', []) for f in fileIdList: theFile = fileModel.load(f, level=AccessType.WRITE, user=user) if theFile: fileModel.remove(theFile) itemVideoData['createdFiles'] = [] itemVideoData.pop('jobId', None) itemVideoData = item.get('video', {}) jobId = itemVideoData.get('jobId') item['video'] = itemVideoData itemModel.save(item) return { 'message': 'Processed video data deleted.', 'itemId': id, 'originalJobId': jobId, 'removedFiles': fileIdList }
def downloadFile(self, file, offset=0, headers=True, endByte=None, **kwargs): if 'path' not in file: raise Exception('Missing path property') full_path = file['path'] url = '%s/file/%s/%s?view=read' % (self.newt_base_url, self.machine, full_path) if headers: raise cherrypy.HTTPRedirect(url) else: session_id = parse('newt.sessionId').find(getCurrentUser()) if len(session_id) > 0: session_id = session_id[0].value if session_id is None: raise GirderException('Missing NEWT session id') def stream(): cookies = dict(newt_sessionid=session_id) r = requests.get(url, cookies=cookies, stream=True) for chunk in r.iter_content(chunk_size=BUF_LEN): if chunk: yield chunk return stream
def get_geometry_format(self, moleculeId, id, output_format): if output_format not in Molecule.output_formats: raise RestException('Format not supported.') if output_format in Molecule.output_formats_2d: # It's just smiles or inchi, call the general end point return self.get_format(moleculeId, output_format, None) user = getCurrentUser() geometry = GeometryModel().load(id, level=AccessType.READ, user=user) if not geometry: raise RestException('Geometry not found.', code=404) data = json.dumps(geometry['cjson']) if output_format != 'cjson': data = avogadro.convert_str(data, 'cjson', output_format) def stream(): cherrypy.response.headers['Content-Type'] = ( Molecule.mime_types[output_format]) yield data return stream
def onGetItem(event): itemResponse = event.info['returnVal'] # Hide the 'originalFilename' metadata on Images from non-site admins if 'originalFilename' in itemResponse.get('meta', {}): currentUser = getCurrentUser() if not (currentUser and currentUser['admin']): del itemResponse['meta']['originalFilename']
def _create_ebs(self, body, zone): user = getCurrentUser() name = body['name'] size = body['size'] fs = body.get('fs', None) profileId = body['profileId'] return self._model.create_ebs(user, profileId, name, zone, size, fs)
def append_to_log(self, id, params): user = getCurrentUser() if not self._model.load(id, user=user, level=AccessType.ADMIN): raise RestException('Volume not found.', code=404) return self._model.append_to_log( user, id, getBodyJson())
def submit_job(self, job): log_url = '%s/jobs/%s/log' % (getApiUrl(), job['_id']) girder_token = get_task_token(self.cluster)['_id'] cumulus.tasks.job.submit( girder_token, self._model.filter(self.cluster, getCurrentUser(), passphrase=False), job, log_url)
def session_id(self, params): user = getCurrentUser() user = self.model('user').load(user['_id'], fields=['newt'], force=True) return { 'sessionId': user.get('newt', {}).get('sessionId') }
def start(self, request_body): log_write_url = '%s/clusters/%s/log' % (getApiUrl(), self.cluster['_id']) girder_token = get_task_token(self.cluster)['_id'] cumulus.tasks.cluster.test_connection \ .delay( self._model.filter(self.cluster, getCurrentUser(), passphrase=False), log_write_url=log_write_url, girder_token=girder_token)
def create(self, params): project = getBodyJson() project = self.model('project', 'hpccloud').create(getCurrentUser(), project) cherrypy.response.status = 201 cherrypy.response.headers['Location'] = '/projects/%s' % project['_id'] return project
def challengeSaved(event): """ After a challenge is saved, we want to update the Assets folder permissions to be the same as the challenge. """ challenge = event.info folder = getAssetsFolder(challenge, getCurrentUser(), False) Folder().copyAccessPolicies( challenge, folder, save=True)
def find_id(self, id, params): mol = MoleculeModel().load(id, level=AccessType.READ, user=getCurrentUser()) if not mol: raise RestException('Molecule not found.', code=404) cjson = True cjsonParam = params.get('cjson') if cjsonParam is not None: cjson = cjsonParam.lower() == 'true' return self._clean(mol, cjson)
def handle(self, record): user = getCurrentUser() Record().save({ 'type': record.msg, 'details': record.details, 'ip': cherrypy.request.remote.ip, 'userId': user and user['_id'], 'when': datetime.datetime.utcnow() }, triggerEvents=False)
def detach(self, volume, params): profile_id = parse('profileId').find(volume)[0].value profile, secret_key = _get_profile(profile_id) girder_callback_info = { 'girder_api_url': getApiUrl(), 'girder_token': get_task_token()['_id']} p = CloudProvider(dict(secretAccessKey=secret_key, **profile)) aws_volume = p.get_volume(volume) if aws_volume is None or aws_volume['state'] != VolumeState.INUSE: raise RestException('This volume is not attached ' 'to a cluster', 400) if 'clusterId' not in volume: raise RestException('clusterId is not set on this volume!', 400) try: volume['path'] except KeyError: raise RestException('path is not set on this volume!', 400) cluster = self.model('cluster', 'cumulus').load(volume['clusterId'], user=getCurrentUser(), level=AccessType.ADMIN) master = p.get_master_instance(cluster['_id']) if master['state'] != InstanceState.RUNNING: raise RestException('Master instance is not running!', 400) user = getCurrentUser() cluster = self.model('cluster', 'cumulus').filter( cluster, user, passphrase=False) cumulus.ansible.tasks.volume.detach_volume\ .delay(profile, cluster, master, self._model.filter(volume, user), secret_key, girder_callback_info) volume['status'] = VolumeState.DETACHING volume = self._model.update_volume(user, volume) return self._model.filter(volume, user)
def attach(self, volume, cluster, params): body = getBodyJson() self.requireParams(['path'], body) path = body['path'] profile_id = parse('profileId').find(volume)[0].value profile, secret_key = _get_profile(profile_id) girder_callback_info = { 'girder_api_url': getApiUrl(), 'girder_token': get_task_token()['_id']} p = CloudProvider(dict(secretAccessKey=secret_key, **profile)) aws_volume = p.get_volume(volume) # If volume exists it needs to be available to be attached. If # it doesn't exist it will be created as part of the attach # playbook. if aws_volume is not None and \ aws_volume['state'] != VolumeState.AVAILABLE: raise RestException('This volume is not available to attach ' 'to a cluster', 400) master = p.get_master_instance(cluster['_id']) if master['state'] != InstanceState.RUNNING: raise RestException('Master instance is not running!', 400) cluster = self.model('cluster', 'cumulus').filter( cluster, getCurrentUser(), passphrase=False) cumulus.ansible.tasks.volume.attach_volume\ .delay(profile, cluster, master, self._model.filter(volume, getCurrentUser()), path, secret_key, girder_callback_info) volume['status'] = VolumeState.ATTACHING volume = self._model.update_volume(getCurrentUser(), volume) return self._model.filter(volume, getCurrentUser())
def create(self, body): self.requireParams(['fileId'], body) file = File().load(body['fileId'], user=getCurrentUser()) with File().open(file) as f: experiment_data = json.load(f) if 'experiment' not in experiment_data: raise RestException('Invalid experiment file.') return self._process_experimental(experiment_data)
def start(self, request_body): if self.cluster['status'] == ClusterStatus.CREATING: raise RestException('Cluster is not ready to start.', code=400) log_write_url = '%s/clusters/%s/log' % (getApiUrl(), self.cluster['_id']) girder_token = get_task_token()['_id'] cumulus.tasks.cluster.test_connection \ .delay(self._model.filter(self.cluster, getCurrentUser(), passphrase=False), log_write_url=log_write_url, girder_token=girder_token)
def createSession(self, params): sessionsFolder = getCreateSessionsFolder() sessionId = requests.post(self.search_url + '/session').json()['sid'] item = ModelImporter.model('item').createItem(name=sessionId, creator=getCurrentUser(), folder=sessionsFolder) ModelImporter.model('item').setMetadata(item, { 'sid': sessionId }) return item
def log(self, id, params): user = getCurrentUser() offset = 0 if 'offset' in params: offset = int(params['offset']) if not self._model.load(id, user=user, level=AccessType.READ): raise RestException('Volume not found.', code=404) log_records = self._model.log_records(user, id, offset) return {'log': log_records}