def post_nodeset(cls, body): ''' ''' # compose nodeset_info and document for database nodeset_info = { 'description': body.description, 'size': len(body.nodes), 'time_of_upload': cls.timestamp('-') } x_consumer_id = X.consumer_id() document = { **nodeset_info, 'nodes': body.nodes, 'X-Consumer-ID': x_consumer_id, } # insert into database and return NodeSetInfo _id = nodesets.insert_one(document).inserted_id nodeset_id = cls.generate_uuid(_id) nodesets.update_one(filter={ '_id': _id, 'X-Consumer-ID': x_consumer_id, }, update={ '$set': { 'id': nodeset_id, }, }) nodeset_info['id'] = nodeset_id return util.deserialize_model(nodeset_info, NodeSetInfo)
def post_parameter_set(cls, body): ''' ''' parameter_set_data = { key: val for key, val in body.to_dict().items() if val and key != 'description'} try: AverageDeregnetArguments(**parameter_set_data) except: return 'Invalid input data', 409 parameter_set_info = { # 'description': body.description, 'set_parameters': list(parameter_set_data.keys()), } x_consumer_id = X.consumer_id() _id = parameter_sets.insert_one({ **parameter_set_data, **parameter_set_info, 'X-Consumer-ID': x_consumer_id, }).inserted_id parameter_set_id = cls.generate_uuid(_id) parameter_sets.update_one( filter={ '_id': _id, 'X-Consumer-ID': x_consumer_id, }, update={ '$set': { 'id': parameter_set_id, }, } ) parameter_set_info['id'] = parameter_set_id return util.deserialize_model(parameter_set_info, ParameterSetInfo)
def post_graphml(cls, graph_id, file_to_upload): ''' ''' x_consumer_id = X.consumer_id() graph = graphs.find_one(filter={ 'id': graph_id, 'X-Consumer-ID': x_consumer_id, }) if not graph: return 'Invalid graph ID', 400 if graph['file_id'] is not None: return 'A graph for that ID is already uploaded', 409 content = file_to_upload.read() if not valid_graphmlz(content): return 'Invalid GraphML file (igraph)', 400 file_id = files.put(content) graph = graphs.ig_from_file_id(file_id) graphs.update_one(filter={ 'id': graph_id, 'X-Consumer-ID': x_consumer_id, }, update={ '$set': { 'file_id': file_id, 'num_nodes': len(graph.vs), 'num_edges': len(graph.es) } }) return 'GraphML successfully uploaded', 201
def get_runs(cls, skip, limit): return [ util.deserialize_model(run_info, RunInfo) for run_info in runs.find(filter={ 'X-Consumer-ID': X.consumer_id() }).skip(skip).limit(limit) ]
def post_graph(cls, initial_graph_info): ''' ''' graph_info = { 'time_of_upload': cls.timestamp('-'), 'name': initial_graph_info.name, 'description': initial_graph_info.description, 'num_nodes': 0, 'num_edges': 0, } graph_data = { 'file_id': None, 'node_id_attr': initial_graph_info.node_id_attr, } x_consumer_id = X.consumer_id() _id = graphs.insert_one({ **graph_info, **graph_data, 'X-Consumer-ID': x_consumer_id, }).inserted_id graph_id = cls.generate_uuid(_id) graphs.update_one(filter={ '_id': _id, 'X-Consumer-ID': x_consumer_id, }, update={ '$set': { 'id': graph_id }, }) graph_info['id'] = graph_id return util.deserialize_model(graph_info, GraphInfo)
def get_parameter_sets(cls, skip, limit): parameter_set_infos = parameter_sets.find( filter={'X-Consumer-ID': X.consumer_id()}, projection=parameter_sets.PARAMSET_INFO_PROJ ).skip(skip).limit(limit) return [ util.deserialize_model(parameter_set_info, ParameterSetInfo) for parameter_set_info in parameter_set_infos ]
def get_subgraphs(cls, skip, limit): subgraph_infos = subgraphs.find( filter={'X-Consumer-ID': X.consumer_id()}, projection=subgraphs.SUBGR_INFO_PROJ ).skip(skip).limit(limit) return [ util.deserialize_model(subgraph_info, SubgraphInfo) for subgraph_info in subgraph_infos ]
def get_run(cls, run_id): run_info = runs.find_one(filter={ 'id': run_id, 'X-Consumer-ID': X.consumer_id(), }) if not run_info: return 'Invalid run ID', 400 return util.deserialize_model(run_info, RunInfo)
def delete_subgraph(cls, subgraph_id): deletion = subgraphs.delete_one(filter={ 'id': subgraph_id, 'X-Consumer-ID': X.consumer_id(), }) if not deletion: return 'Invalid subgraph ID', 400 return 'Subgraph successfully deleted', 201
def get_subgraph(cls, subgraph_id): subgraph_info = subgraphs.find_one( filter={ 'id': subgraph_id, 'X-Consumer-ID': X.consumer_id(), }, projection=subgraphs.SUBGR_INFO_PROJ) if not subgraph_info: return 'Invalid subgraph ID', 400 return util.deserialize_model(subgraph_info, SubgraphInfo)
def delete_parameter_set(cls, parameter_set_id): if not parameter_sets.dependent_runs.is_empty(parameter_set_id): return 'Invalid parameter set ID: runs depend on this parameter set', 400 deletion = parameter_sets.delete_one({ 'id': parameter_set_id, 'X-Consumer-ID': X.consumer_id(), }) if not deletion: return 'Invalid parameter set ID: No parameter set with that ID', 400 return 'Parameter set successfully deleted', 201
def get_parameter_set(cls, parameter_set_id): parameter_set_info = parameter_sets.find_one( filter={ 'id': parameter_set_id, 'X-Consumer-ID': X.consumer_id(), }, projection=parameter_sets.PARAMSET_INFO_PROJ ) if not parameter_set_info: return 'Invalid parameter set ID', 400 return util.deserialize_model(parameter_set_info, ParameterSetInfo)
def get_parameter_set_data(cls, parameter_set_id): parameter_set_data = parameter_sets.find_one( filter={ 'id': parameter_set_id, 'X-Consumer-ID': X.consumer_id() }, projection=parameter_sets.PARAMSET_DATA_PROJ ) if not parameter_set_data: return 'Invalid parameter set ID', 400 return util.deserialize_model(parameter_set_data, ParameterSet)
def delete_run(cls, run_id): # TODO. Delete completed runs if they do not reference subgraphs deletion = runs.delete_one(filter={ 'id': run_id, 'started': False, 'X-Consumer-ID': X.consumer_id(), }) if not deletion: return 'Invalid run ID', 400 celery.control.revoke(run_id) return 'Run successfully deleted', 201
def get_nodeset(cls, nodeset_id): ''' ''' nodeset_info = nodesets.find_one(filter={ 'id': nodeset_id, 'X-Consumer-ID': X.consumer_id(), }, projection=nodesets.NODESET_INFO_PROJ) if not nodeset_info: return 'Invalid node set Id', 400 return util.deserialize_model(nodeset_info, NodeSetInfo)
def get_score(cls, score_id): ''' ''' score_info = scores.find_one(filter={ 'id': score_id, 'X-Consumer-ID': X.consumer_id(), }, projection=scores.SCORE_INFO_PROJ) if not score_info: return 'Invalid ID', 400 return util.deserialize_model(score_info, ScoreInfo)
def get_graph(cls, graph_id): ''' ''' graph_info = graphs.find_one(filter={ 'id': graph_id, 'X-Consumer-ID': X.consumer_id(), }, projection=graphs.GRAPH_INFO_PROJ) if not graph_info: return 'Invalid graph ID: no graph with that ID', 400 return util.deserialize_model(graph_info, GraphInfo)
def get_graphs(cls, skip, limit): ''' ''' graph_infos = graphs.find( filter={ 'X-Consumer-ID': X.consumer_id() }, projection=graphs.GRAPH_INFO_PROJ).skip(skip).limit(limit) return [ util.deserialize_model(graph_info, GraphInfo) for graph_info in graph_infos ]
def get_scores(cls, skip, limit): ''' ''' score_infos = scores.find( filter={ 'X-Consumer-ID': X.consumer_id(), }, projection=scores.SCORE_INFO_PROJ).skip(skip).limit(limit) return [ util.deserialize_model(score_info, ScoreInfo) for score_info in score_infos ]
def get_nodesets(cls, skip, limit): ''' ''' nodeset_infos = nodesets.find( filter={ 'X-Consumer-ID': X.consumer_id() }, projection=nodesets.NODESET_INFO_PROJ).skip(skip).limit(limit) return [ util.deserialize_model(nodeset_info, NodeSetInfo) for nodeset_info in nodeset_infos ]
def delete_nodeset(cls, nodeset_id): ''' ''' if not nodesets.dependent_runs.is_empty(nodeset_id): return 'Invalid nodeset ID: some runs depend on this nodeset', 400 deletion = nodesets.delete_one(filter={ 'id': nodeset_id, 'X-Consumer-ID': X.consumer_id(), }) if not deletion: return 'Invalid nodeset ID: No nodeset with that ID', 400 return 'Node set successfully deleted', 201
def delete_score(cls, score_id): ''' ''' if not scores.dependent_runs.is_empty(score_id): return 'Invalid score ID: runs depend on this score', 400 deletion = scores.delete_one(filter={ 'id': score_id, 'X-Consumer-ID': X.consumer_id(), }) if not deletion: return 'Invalid score ID', 400 return 'Score successfully deleted', 201
def delete_graph(cls, graph_id): ''' Delete a graph ''' if not graphs.dependent_runs.is_empty(graph_id): return 'Invalid graph ID: some runs depend on this graph', 400 graph_data = graphs.find_one_and_delete( filter={ 'id': graph_id, 'X-Consumer-ID': X.consumer_id(), }, projection=graphs.GRAPH_DATA_PROJ) if not graph_data: return 'Invalid graph ID: no graph with that ID', 400 file_id = graph_data.get('file_id') if file_id: files.delete(file_id) return 'Graph successfully deleted', 201
def download_subgraph_as(cls, subgraph_id, filetype): subgraph_data = subgraphs.find_one(filter={ 'id': subgraph_id, 'X-Consumer-ID': X.consumer_id(), }) if not subgraph_data: return 'Invalid subgraph ID', 400 nodes = subgraph_data['nodes'] graph_id = subgraph_data['graph_id'] node_id_attr = subgraph_data['node_id_attr'] graph = graphs.get_ig(graph_id) graph.vs['id'] = [v[node_id_attr] for v in graph.vs] subgraph = graph.induced_subgraph(list(nodes.keys()), implementation='create_from_scratch') subgraph.vs['deregnet_score'] = [nodes[v['id']] for v in subgraph.vs] if filetype == 'graphml': content_disposition = 'attachment; filename='+subgraph_id+'.graphml' with tempfile.TemporaryFile() as tmpfile: subgraph.write_graphml(tmpfile) tmpfile.seek(0) return tmpfile.read(), 200, {'Content-Disposition': content_disposition}
def post_score(cls, body): ''' ''' if len(body.node_ids) != len(body.score_values): return 'node_ids and score_values do not have matching size', 409 # compose score_info and document for database score_info = { 'description': body.description, 'id': None, 'size': len(body.node_ids), 'time_of_upload': cls.timestamp('-') } score_data = { 'node_ids': body.node_ids, 'score_values': body.score_values } # insert into database and return ScoreInfo x_consumer_id = X.consumer_id() _id = scores.insert_one({ **score_info, **score_data, 'X-Consumer-ID': x_consumer_id, }).inserted_id score_id = cls.generate_uuid(_id) scores.update_one(filter={ '_id': _id, 'X-Consumer-ID': x_consumer_id, }, update={ '$set': { 'id': score_id, }, }) score_info['id'] = score_id return util.deserialize_model(score_info, ScoreInfo)
def post_run(cls, body): ''' ''' run_input = body.to_dict() run_input = {k: v for k, v in run_input.items() if v is not None} graph_id = run_input['graph_id'] score_id = run_input.get('score_id') receptors_id = run_input.get('receptors_id') terminals_id = run_input.get('treminals_id') exclude_id = run_input.get('exclude_id') include_id = run_input.get('include_id') parameter_set_id = run_input.get('parameter_set_id') x_consumer_id = X.consumer_id() valid, message = cls.validate_run(x_consumer_id, graph_id, score_id, receptors_id, terminals_id, exclude_id, include_id, parameter_set_id) if not valid: return 'Invalid run, invalid %s encountered, check your run input' % message, 400 # generate run info and insert into database default_parameters = parameter_sets.get_parameter_set_default_data() if not parameter_set_id: parameter_set = default_parameters else: parameter_set = parameter_sets.get_parameter_set_as_dict( parameter_set_id) parameter_set = {**default_parameters, **parameter_set} parameter_set = { **parameter_set, **{ k: v for k, v in run_input.get('parameter_set', {}).items() if v is not None } } run_input['parameter_set'] = parameter_set run_info = { 'post_time': cls.timestamp('-'), 'started': False, 'done': False, 'subgraph_ids': [], 'run_input': run_input } _id = runs.insert_one({ **run_info, 'X-Consumer-ID': x_consumer_id, }).inserted_id run_id = cls.generate_uuid(_id) runs.update_one(filter={ '_id': _id, 'X-Consumer-ID': x_consumer_id, }, update={ '$set': { 'id': run_id, }, }) # register new run dependencies graphs.dependent_runs[graph_id] = run_id cls.update_dependent_runs(run_id, score_id, scores) cls.update_dependent_runs(run_id, terminals_id, nodesets) cls.update_dependent_runs(run_id, receptors_id, nodesets) cls.update_dependent_runs(run_id, exclude_id, nodesets) cls.update_dependent_runs(run_id, include_id, nodesets) cls.update_dependent_runs(run_id, parameter_set_id, parameter_sets) run_info['id'] = run_id # push run onto the job queue celery.send_task('find-subgraphs', args=(run_id, ), task_id=run_id, queue='runs') return util.deserialize_model(run_info, RunInfo)