class ShellHandler(CLIHandler): def __init__(self): CLIHandler.__init__(self) def ProcessArgs(self, args): cmdOptionsList = SessionOptions.GetOptParseOptions() cmdOptionsList.append( make_option("--context", dest="context", default='', help=SUPPRESS_HELP)) _STR_USAGE = "%prog [options]" cmdParser = OptionParser(option_list=cmdOptionsList, usage=_STR_USAGE, add_help_option=False) # Get command line options (options, remainingArgs) = cmdParser.parse_args(args) sessionOptions = SessionOptions(options) self.session = Session(sessionOptions) self._ParseContext(options) def ClearCmdState(self): self.cmdNamespace = None self.app = None self.method = None self.options = Values() self.options._update_loose({'formatter': None, 'debug': False}) self.usage = '' def ExecuteCommand(self, args): try: self.ClearCmdState() result, message = self._HandleOneCmd(args) if message: self.Print(message) message = '' except CLIParseException as err: # Parse error if err.message: logging.error(err.message) message = self._FormatHelpNoRaise(self.cmdNamespace, self.app, self.method, err) except CLIExecuteException as err: # Execution exception message = err.message except SessionException as err: message = err.message except vmodl.MethodFault as err: message = "Runtime error: " + err.msg except Exception as err: LogException(err) message = "Runtime error" # Print message self.Print(message)
def test_main_dicts_merge(request): null, expected = read_csv_to_dict( f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_MERGE}') sort_list_of_dict(expected) options = Values() options._update_loose({ 'data_file': f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_PROCESSED_DATA}', 'metrics_file': f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_METRICS_DATA}', 'save_directory': f'{request.config.rootdir}/{TEST_RESOURCES}/' }) main_dicts_merge(options, None) null, result = read_csv_to_dict( f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_RESULT_CSV_FILE}') assert expected == result
def test_main_merge_pd_wo_type(request, cleanup): expected = pd.read_csv( f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_MERGE_2}') expected = expected.round(decimals=3) expected.sort_values(by=expected.columns.to_list()[:-4], inplace=True) expected.reset_index(drop=True, inplace=True) options = Values() options._update_loose({ 'data_file': f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_PROCESSED_DATA}', 'metrics_file': f'{request.config.rootdir}/{TEST_RESOURCES}/{MT_METRICS_DATA}', 'save_directory': f'{request.config.rootdir}/{TEST_RESOURCES}/' }) main_merge_pd(options) result = pd.read_csv(f'{options.save_directory}{MT_RESULT_CSV_FILE}') result = result.round(decimals=3) assert expected.equals(result)
def generateGraph(self, filename, suggested_pathways=[], compound_data=None, gene_data=None, protein_data=None, format='svg'): # Build options-like structure for generation of graph # (compatibility with command line version, we need to fake it) options = Values() options._update_loose({ 'file': None, #'pathways': self.config.Read('/Pathways/Show'), #'not_pathways':'', 'show_all': False, # self.config.ReadBool('/Pathways/ShowAll'), 'search': '', 'cluster_by': self.config.get('/App/ClusterBy'), 'show_enzymes': self.config.get('/App/ShowEnzymes'), # self.config.ReadBool('/App/ShowEnzymes'), 'show_secondary': self.config.get('/App/Show2nd'), 'show_molecular': self.config.get('/App/ShowMolecular'), 'show_network_analysis': self.config.get('/App/ShowAnalysis'), 'show_gibbs': self.config.get('/App/ShowGibbs'), 'highlightpathways': self.config.get('/App/HighlightPathways'), 'highlightregions': self.config.get('/App/HighlightRegions'), 'splines': 'true', 'focus': False, 'show_pathway_links': self.config.get('/Pathways/ShowLinks'), # Always except when saving the file 'output': format, }) #pathway_ids = self.config.value('/Pathways/Show').split(',') if suggested_pathways: pathway_ids = [p.id for p in suggested_pathways.entities[1]] else: pathway_ids = [] # Add the manually Shown pathways pathway_ids_show = self.config.get('/Pathways/Show') pathway_ids.extend(pathway_ids_show) # Now remove the Hide pathways pathway_ids_hide = self.config.get('/Pathways/Hide') pathway_ids = [p for p in pathway_ids if p not in pathway_ids_hide] # Convert pathways_ids to pathways pathways = [self.m.db.pathway(pid) for pid in pathway_ids if self.m.db.pathway(pid) is not None] if pathway_ids == []: return None if compound_data or gene_data or protein_data: # Generate independent scales node_colors = {} for dsi in compound_data, gene_data, protein_data: if dsi == None: continue #if self.m.data.analysis_timecourse: # # Generate the multiple views # tps = sorted( self.m.data.analysis_timecourse.keys(), key=int ) # # Insert counter variable into the filename # filename = self.get_filename_with_counter(filename) # print "Generate timecourse..." # for tp in tps: # print "%s" % tp # graph = generator( pathways, options, self.m.db, analysis=self.m.data.analysis_timecourse[ tp ]) #, layout=self.layout) # graph.write(filename % tp, format=options.output, prog='neato') # return tps #else: print("Generate map for single control:test...") # Build analysis lookup dict; we want a single color for each metabolite mini, maxi = min(abs(np.median(dsi.data)), 0), max(abs(np.median(dsi.data)), 0) mini, maxi = -1.0, +1.0 # Fudge; need an intelligent way to determine (2*median? 2*mean?) scale = utils.calculate_scale([mini, 0, maxi], [9, 1], out=np.around) # rdbu9 scale for n, m in enumerate(dsi.entities[1]): if m is not None: ecol = utils.calculate_rdbu9_color(scale, dsi.data[0, n]) #print xref, ecol if ecol is not None: node_colors[m.id] = ecol graph = generator(pathways, options, self.m.db, analysis=node_colors) # , layout=self.layout) self.status.emit('waiting') self.progress.emit(0.5) graph.write(filename, format=options.output, prog='neato') return None else: graph = generator(pathways, options, self.m.db) # , layout=self.layout) self.status.emit('waiting') self.progress.emit(0.5) graph.write(filename, format=options.output, prog='neato') return None
def generateGraph(self, filename, suggested_pathways=[], compound_data=None, gene_data=None, protein_data=None, format='svg'): # Build options-like structure for generation of graph # (compatibility with command line version, we need to fake it) options = Values() options._update_loose({ 'file': None, #'pathways': self.config.Read('/Pathways/Show'), #'not_pathways':'', 'show_all': False, # self.config.ReadBool('/Pathways/ShowAll'), 'search': '', 'cluster_by': self.config.get('/App/ClusterBy'), 'show_enzymes': self.config.get('/App/ShowEnzymes' ), # self.config.ReadBool('/App/ShowEnzymes'), 'show_secondary': self.config.get('/App/Show2nd'), 'show_molecular': self.config.get('/App/ShowMolecular'), 'show_network_analysis': self.config.get('/App/ShowAnalysis'), 'highlightpathways': self.config.get('/App/HighlightPathways'), 'highlightregions': self.config.get('/App/HighlightRegions'), 'splines': 'true', 'focus': False, 'show_pathway_links': self.config.get('/Pathways/ShowLinks'), # Always except when saving the file 'output': format, }) #pathway_ids = self.config.value('/Pathways/Show').split(',') if suggested_pathways: pathway_ids = [p.id for p in suggested_pathways.entities[1]] else: pathway_ids = [] print(self.config.get('/Pathways/Show')) # Add the manually Shown pathways pathway_ids_show = self.config.get('/Pathways/Show') pathway_ids.extend(pathway_ids_show) # Now remove the Hide pathways pathway_ids_hide = self.config.get('/Pathways/Hide') pathway_ids = [p for p in pathway_ids if p not in pathway_ids_hide] # Convert pathways_ids to pathways pathways = [ self.m.db.pathways[pid] for pid in pathway_ids if pid in list(self.m.db.pathways.keys()) ] if pathway_ids == []: return None if compound_data or gene_data or protein_data: # Generate independent scales node_colors = {} for dsi in compound_data, gene_data, protein_data: if dsi == None: continue #if self.m.data.analysis_timecourse: # # Generate the multiple views # tps = sorted( self.m.data.analysis_timecourse.keys(), key=int ) # # Insert counter variable into the filename # filename = self.get_filename_with_counter(filename) # print "Generate timecourse..." # for tp in tps: # print "%s" % tp # graph = generator( pathways, options, self.m.db, analysis=self.m.data.analysis_timecourse[ tp ]) #, layout=self.layout) # graph.write(filename % tp, format=options.output, prog='neato') # return tps #else: print("Generate map for single control:test...") # Build analysis lookup dict; we want a single color for each metabolite mini, maxi = min(abs(np.median(dsi.data)), 0), max(abs(np.median(dsi.data)), 0) mini, maxi = -1.0, +1.0 # Fudge; need an intelligent way to determine (2*median? 2*mean?) scale = utils.calculate_scale([mini, 0, maxi], [9, 1], out=np.around) # rdbu9 scale for n, m in enumerate(dsi.entities[1]): if m is not None: ecol = utils.calculate_rdbu9_color( scale, dsi.data[0, n]) #print xref, ecol if ecol is not None: node_colors[m.id] = ecol graph = generator(pathways, options, self.m.db, analysis=node_colors) # , layout=self.layout) self.status.emit('waiting') self.progress.emit(0.5) graph.write(filename, format=options.output, prog='neato') return None else: graph = generator(pathways, options, self.m.db) # , layout=self.layout) self.status.emit('waiting') self.progress.emit(0.5) graph.write(filename, format=options.output, prog='neato') return None