def export_listDict_csv(self,filename_O): '''export a listDict to .csv INPUT: filename_O = string, name of the file ''' export = base_exportData(self.listDict); export.write_dict2csv(filename_O);
def export_visualizationProject_csv(self,project_id_I,filename_O): '''export the visualization project to csv''' data1_O = []; data1_O = self.get_rows_projectID_visualizationProject(project_id_I); io = base_exportData(data1_O); io.write_dict2csv(filename_O);
def export_checkLLOQAndULOQ_csv(self,experiment_id_I,filename='checkLLOQAndULOQ.csv'): '''export LLOQ and ULOQ check''' #TODO change to export method for .csv and .js print('export_checkLLOQAndULOQ...') # query data for the view check = []; check = self.get_checkLLOQAndULOQ(experiment_id_I); ## create and populate the view #for n in range(len(check)): # if check[n]: # row = data_stage01_quantification_checkLLOQAndULOQ(experiment_id_I, # check[n]['sample_name'], # check[n]['component_group_name'], # check[n]['component_name'], # check[n]['calculated_concentration'], # check[n]['conc_units'], # check[n]['correlation'], # check[n]['lloq'], # check[n]['uloq'], # check[n]['points'], # check[n]['used']); # self.session.add(row); #self.session.commit(); if check: export = base_exportData(check); export.write_dict2csv(filename); else: print("all components are within the lloq and uloq");
def export_dataStage01ReplicatesMI_csv(self, experiment_id_I, filename): '''export dataStage01ReplicatesMI to csv file''' # query the data data = []; data = self.get_data_experimentID_dataStage01ReplicatesMI(experiment_id_I); # expand the data file: sns = [] cgn = [] for d in data: sns.append(d['sample_name_short']); cgn.append(d['component_group_name']); sns_sorted = sorted(set(sns)) cgn_sorted = sorted(set(cgn)) concentrations = [] for c in cgn_sorted: row = ['NA' for r in range(len(sns_sorted))]; cnt = 0; for s in sns_sorted: for d in data: if d['sample_name_short'] == s and d['component_group_name'] == c: if d['calculated_concentration']: row[cnt] = d['calculated_concentration']; break; cnt = cnt+1 concentrations.append(row); # write concentrations to file export = base_exportData(concentrations); export.write_headerAndColumnsAndElements2csv(sns_sorted,cgn_sorted,filename);
def export_rows_experimentID_sample_csv(self, experiment_id_I,filename_O): '''export rows of table sample by experiment_id''' data_O = []; data_O = self.get_rows_experimentID_sample('ALEsKOs01'); if data_O: baseo = base_exportData(data_O); else: print('no rows found.');
def export_dataStage01Normalized_csv(self,filename,experiment_id_I=[],sample_name_I=[], component_name_I=[], calculated_concentration_units_I=[], used__I=True): '''export data_stage01_quantification_normalized to csv''' print('export data_stage01_quantification_normalized to csv...') # query data for the view data = []; if experiment_id_I: for experiment_id in experiment_id_I: if experiment_id_I and sample_name_I: for sample_name in sample_name_I: if experiment_id_I and sample_name_I and component_name_I: for component_name in component_name_I: if experiment_id_I and sample_name_I and component_name_I and calculated_concentration_units_I: for ccu in calculated_concentration_units_I: data_tmp = []; data_tmp = self.get_rows_uniqueAndUsed_dataStage01QuantificationNormalized(experiment_id_I=experiment_id, sample_name_I=sample_name, component_name_I=component_name, calculated_concentration_units_I=ccu, used__I=used__I); data.extend(data_tmp) else: data_tmp = []; data_tmp = self.get_rows_uniqueAndUsed_dataStage01QuantificationNormalized(experiment_id_I=experiment_id, sample_name_I=sample_name, component_name_I=component_name, calculated_concentration_units_I='%', used__I=used__I); data.extend(data_tmp); else: data_tmp = []; data_tmp = self.get_rows_uniqueAndUsed_dataStage01QuantificationNormalized(experiment_id_I=experiment_id, sample_name_I=sample_name, component_name_I='%', calculated_concentration_units_I='%', used__I=used__I); data.extend(data_tmp); else: data_tmp = []; data_tmp = self.get_rows_uniqueAndUsed_dataStage01QuantificationNormalized(experiment_id_I=experiment_id, sample_name_I='%', component_name_I='%', calculated_concentration_units_I='%', used__I=used__I); data.extend(data_tmp); else: data = self.get_rows_uniqueAndUsed_dataStage01QuantificationNormalized(experiment_id_I='%', sample_name_I='%', component_name_I='%', calculated_concentration_units_I='%', used__I=True); if data: export = base_exportData(data); export.write_dict2csv(filename); else: print("no rows found");
def export_data_stage02_isotopomer_fittedExchangeFluxes_csv(self,simulation_ids_I,filename_O): """export data_stage02_isotopomer_fittedExchangeFluxes INPUT: simulation_id_I = [] of string, simulation_id filename_O = string, filename for export""" data_O = []; for simulation_id in simulation_ids_I: data_tmp =[]; data_tmp = self.get_rows_simulationID_dataStage02IsotopomerfittedExchangeFluxes(simulation_id); data_O.extend(data_tmp); if data_O: io = base_exportData(data_O); io.write_dict2csv(filename_O);
def export_dataStage01IsotopomerNormalized_csv(self, experiment_id_I, filename_O, sample_name_abbreviation_I='%', time_point_I='%', scan_type_I='%', met_id_I='%'): '''export data_stage01_isotopomer_normalized to .csv''' data = []; data = self.get_rows_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized(experiment_id_I,sample_name_abbreviation_I,time_point_I,scan_type_I,met_id_I) if data: # write data to file export = base_exportData(data); export.write_dict2csv(filename_O);
def export_sampleStorage_csv(self, sample_ids_I, filename_O): """Export sample storage to .csv INPUT: sample_ids_I = [] of string, sample_id filename_O = string, filename for export""" data_O = []; for sample_id in sample_ids_I: data_tmp =[]; data_tmp = self.get_rows_sampleID_limsSampleStorage(sample_id); data_O.extend(data_tmp); if data_O: io = base_exportData(data_O); io.write_dict2csv(filename_O);
def export_mapGeneName2ModelReaction_csv(self,rows_I, filename_O): """return the model reaction rows whose enzymes are produced by a given gene INPUT: rows_I = rows from data_stage02_physiology_modelReactions OUTPUT: filename_O = name of output file rows from data_stage02_physiology_modelReactions in a .csv file """ if rows_I: io = base_exportData(rows_I); io.write_dict2csv(filename_O); else: print('no rows found');
def export_calibratorConcentrations_csv(self,filename,met_ids_I=[]): '''export calibrator concentrations''' data_O = []; if met_ids_I: met_ids = met_ids_I; else: met_ids = []; met_ids = self.get_metIDs_calibratorConcentrations(); for met_id in met_ids: rows = []; rows = self.get_rows_metID_calibratorConcentrations(met_id); data_O.extend(rows); export = base_exportData(data_O); export.write_dict2csv(filename);
def export_checkISMatch_csv(self,experiment_id_I,filename='checkISMatch.csv'): '''check that the internal standard used in the data file matches that of the calibration method''' '''SELECT experiment.id, data_stage01_quantification_mqresultstable.sample_name, data_stage01_quantification_mqresultstable.component_name, data_stage01_quantification_mqresultstable.is_name, quantitation_method.is_name FROM public.data_stage01_quantification_mqresultstable, public.experiment, public.quantitation_method WHERE experiment.id LIKE 'ibop_rbc02' AND experiment.sample_name LIKE data_stage01_quantification_mqresultstable.sample_name AND (data_stage01_quantification_mqresultstable.sample_type LIKE 'Unknown' OR data_stage01_quantification_mqresultstable.sample_type LIKE 'Quality Control') AND experiment.quantitation_method_id LIKE quantitation_method.id AND quantitation_method.component_name LIKE data_stage01_quantification_mqresultstable.component_name AND data_stage01_quantification_mqresultstable.used_ AND NOT data_stage01_quantification_mqresultstable.is_ AND data_stage01_quantification_mqresultstable.is_name NOT LIKE quantitation_method.is_name;''' #TODO change to export method for .csv and .js print('execute_checkISMatch...') # query data for the view check = []; check = self.get_checkISMatch(experiment_id_I); ## create and populate the view #for n in range(len(check)): # if check[n]: # row = data_stage01_quantification_checkISMatch(experiment_id_I, # check[n]['sample_name'], # check[n]['component_name'], # check[n]['IS_name_samples'], # check[n]['IS_name_calibrators']); # self.session.add(row); #self.session.commit(); if check: print("IS mismatches found"); export = base_exportData(check); export.write_dict2csv(filename); else: print("No IS mismatches found");
def export_data_stage02_isotopomer_measuredFluxes_csv(self,experiment_ids_I,model_ids_I,sample_name_abbreviations_I,filename_O): """export data_stage02_isotopomer_measuredFluxes INPUT: experiment_ids_I = [] of string, experiment_id model_ids_I = [] of strings, model_id sample_name_abbreviations_I = [] of string, sample_name_abbreviation filename_O = string, filename for export """ data_O = []; for experiment_id in experiment_ids_I: for model_id in model_ids_I: for sna in sample_name_abbreviations_I: data_tmp =[]; data_tmp = self.get_rows_experimentIDAndModelIDAndSampleNameAbbreviation_dataStage02IsotopomerMeasuredFluxes(experiment_id,model_id,sna); data_O.extend(data_tmp); if data_O: io = base_exportData(data_O); io.write_dict2csv(filename_O);
def export_data_stage02_isotopomer_fittedExchangeFluxStatistics_csv(self,simulation_ids_I,filename_O,flux_units_I=[]): """export data_stage02_isotopomer_fittedExchangeFluxStatistics INPUT: simulation_id_I = [] of string, simulation_id filename_O = string, filename for export""" data_O = []; for simulation_id in simulation_ids_I: if flux_units_I: for flux_units in flux_units_I: data_tmp =[]; data_tmp = self.get_rows_simulationIDAndFluxUnits_dataStage02IsotopomerFittedExchangeFluxStatistics(simulation_id,flux_units); data_O.extend(data_tmp); else: data_tmp =[]; data_tmp = self.get_rows_simulationID_dataStage02IsotopomerFittedExchangeFluxStatistics(simulation_id); data_O.extend(data_tmp); if data_O: io = base_exportData(data_O); io.write_dict2csv(filename_O);
def export_data_stage02_isotopomer_measuredFragments_csv(self,experiment_ids_I,sample_name_abbreviations_I,filename_O,time_points_I=[]): """export data_stage02_isotopomer_measuredFragments INPUT: experiment_ids_I = [] of string, experiment_id sample_name_abbreviations_I = [] of string, sample_name_abbreviation filename_O = string, filename for export time_points_I = [] of strings, time_point""" data_O = []; for experiment_id in experiment_ids_I: for sna in sample_name_abbreviations_I: if time_points_I: for tp in time_points_I: data_tmp =[]; data_tmp = self.get_row_experimentIDAndSampleNameAbbreviationAndTimePoint_dataStage02IsotopomerMeasuredFragments(experiment_id,sna,tp); data_O.extend(data_tmp); else: data_tmp =[]; data_tmp = self.get_row_experimentIDAndSampleNameAbbreviation_dataStage02IsotopomerMeasuredFragments(experiment_id,sna); data_O.extend(data_tmp); if data_O: io = base_exportData(data_O); io.write_dict2csv(filename_O);
def export_checkCVAndExtracelluar_averages_csv(self,experiment_id_I,filename,cv_threshold_I=20,extracellular_threshold_I=50): '''check the CV and % Extracellular of the averages table INPUT: experiment_id_I = experiment_id cv_threshold_I = float, % cv tolerance extracellular_threshold_I = float, % extracellular threshold ''' print('execute_checkCVAndExtracelluar_averages...') # query data for the view check = []; check = self.get_checkCVAndExtracellular_averages(experiment_id_I,cv_threshold_I=cv_threshold_I,extracellular_threshold_I=extracellular_threshold_I); ## create and populate the view #for n in range(len(check)): # if check[n]: # row = data_stage01_quantification_checkCVAndExtracellular_averages(check[n]['experiment_id'], # check[n]['sample_name_abbreviation'], # check[n]['component_group_name'], # check[n]['time_point'], # check[n]['component_name'], # check[n]['n_replicates_broth'], # check[n]['calculated_concentration_broth_average'], # check[n]['calculated_concentration_broth_cv'], # check[n]['n_replicates_filtrate'], # check[n]['calculated_concentration_filtrate_average'], # check[n]['calculated_concentration_filtrate_cv'], # check[n]['n_replicates'], # check[n]['calculated_concentration_average'], # check[n]['calculated_concentration_cv'], # check[n]['calculated_concentration_units'], # check[n]['extracellular_percent'], # check[n]['used']); # self.session.add(row); #self.session.commit(); if check: export = base_exportData(check); export.write_dict2csv(filename); else: print("all components are within the %CV and %Extracellular tolerance");
def export_allMetaboliteFragmentMappings_csv(filename_I='ms_method_fragments.csv',mode_I='-',msmethodtype_I='tuning'): '''export all metabolite fragment mappings to .csv INPUT: filename_I = string, filename mode_I = string, MS mode (default=-) msmethodtype_I = string, ms_methodtype (default=tuning) ''' mfautil = MFA_utilities(); #Query met_ids met_ids = []; met_ids = msmethodquery.get_metIDs_msModeAndMsMethodType('-','tuning'); data_O = []; for met in met_ids: # fragments fragment_formulas = []; parent,product=[],[]; parent,product= msmethodquery.get_precursorAndProductFormulas_metID(met,'-','tuning'); parent = list(set(parent)); fragment_formulas.extend(parent); fragment_formulas.extend(product); # fragment carbon mappings frag_cmap = {}; frag_cmap = msmethodquery.get_precursorFormulaAndProductFormulaAndCMapsAndPositions_metID(met,'-','tuning'); for frag in fragment_formulas: if frag_cmap[frag]['fragment'] is None: continue; # combine into a structure positions,elements = [],[]; positions,elements = mfautil.convert_fragmentAndElements2PositionAndElements(frag_cmap[frag]['fragment'],frag_cmap[frag]['fragment_elements']); if positions: tmp = {'met_id':met,'positions':positions,'elements':elements,'formula':frag}; data_O.append(tmp); # export the data if data_O: export = base_exportData(data_O) export.write_dict2csv(filename_I);
def export_checkCV_dilutions_csv(self,experiment_id_I,filename='checkCV_dilutions.csv'): '''Export the dilutions table''' #TODO change to export method for .csv and .js print('execute_checkCV_dilutions...') # query data for the view check = []; check = self.get_checkCV_dilutions(experiment_id_I); ## create and populate the view #for n in range(len(check)): # if check[n]: # row = data_stage01_quantification_checkCV_dilutions(check[n]['experiment_id'], # check[n]['sample_id'], # check[n]['component_group_name'], # check[n]['component_name'], # check[n]['n_replicates'], # check[n]['calculated_concentration_average'], # check[n]['calculated_concentration_cv'], # check[n]['calculated_concentration_units']); # self.session.add(row); #self.session.commit(); export = base_exportData(check); export.write_dict2csv(filename);
def export_evidence(self, filename_O): """export evidence""" io = base_exportData(self.evidence) io.write_dict2csv(filename_O)
def export_validation(self, filename_O): """export validation""" io = base_exportData(self.validation) io.write_dict2csv(filename_O)
def export_mutations(self, filename_O): """export mutations""" io = base_exportData(self.mutations) io.write_dict2csv(filename_O)
def export_metadata(self, filename_O): """export metadata""" io = base_exportData(self.metadata) io.write_dict2csv(filename_O)
def export_genesFpkmTracking(self,filename_O): """export genesFpkmTracking""" io = base_exportData(self.genesFpkmTracking); io.write_dict2csv(filename_O);
def export_calibrationConcentrations(self, data, filename): '''export calibration curve concentrations''' # write calibration curve concentrations to file export = base_exportData(data); export.write_dict2csv(filename);
def export_amplificationAnnotations(self,filename_O): """export amplificationAnnotations""" io = base_exportData(self.amplificationAnnotations); io.write_dict2csv(filename_O);
def export_mutationsFiltered(self, filename_O): """export mutationsFiltered""" io = base_exportData(self.mutationsFiltered) io.write_dict2csv(filename_O)
def export_batchFile(self, data_I, header_I, filename): export = base_exportData(data_I) export.write_headersAndElements2txt(header_I,filename) return;
def export_geneExpDiff(self, filename_O): """export geneExpDiff""" io = base_exportData(self.geneExpDiff) io.write_dict2csv(filename_O)
def export_coverageStats(self,filename_O): """export coverageStats""" io = base_exportData(self.coverageStats); io.write_dict2csv(filename_O);
def export_thermodynamicAnalysisComparison_csv(self,experiment_id_I,sample_name_abbreviation_base, model_ids_I=[], models_I={}, time_points_I=[], sample_name_abbreviations_I=[], measured_concentration_coverage_criteria_I=0.5, measured_dG_f_coverage_criteria_I=0.99, filename='tacomparison.csv'): '''export concentration and dG_r data for visualization''' # get the model ids: data_O = []; if model_ids_I: model_ids = model_ids_I; else: model_ids = []; model_ids = self.get_modelID_experimentID_dataStage03QuantificationSimulation(experiment_id_I); for model_id in model_ids: print('exporting thermodynamic analysis for model_id ' + model_id); # get the cobra model if models_I: cobra_model = models_I[model_id]; else: cobra_model_sbml = None; cobra_model_sbml = self.get_row_modelID_dataStage02PhysiologyModels(model_id); # write the model to a temporary file with open('data/cobra_model_tmp.xml','w') as file: file.write(cobra_model_sbml['model_file']); # Read in the sbml file and define the model conditions cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True); # get the time-points if time_points_I: time_points = time_points_I; else: time_points = []; time_points = self.get_timePoints_experimentIDAndModelID_dataStage03QuantificationSimulation(experiment_id_I,model_id); for tp in time_points: print('exporting thermodynamic analysis for time_point ' + tp); # get sample_name_abbreviations if sample_name_abbreviations_I: sample_name_abbreviations = sample_name_abbreviations_I; else: sample_name_abbreviations = []; sample_name_abbreviations = self.get_sampleNameAbbreviations_experimentIDAndModelIDAndTimePoint_dataStage03QuantificationSimulation(experiment_id_I,model_id,tp); sample_name_abbreviations = [sna for sna in sample_name_abbreviations if sna !=sample_name_abbreviation_base] # get information about the sample to be compared concentrations_base = {}; concentrations_base = self.get_rowsEscher_experimentIDAndTimePointAndSampleNameAbbreviations_dataStage03QuantificationMetabolomicsData(experiment_id_I,tp,sample_name_abbreviation_base); # get tcc tcc_base = []; tcc_base = self.get_rows_experimentIDAndModelIDAndTimePointAndSampleNameAbbreviations_dataStage03QuantificationTCC(experiment_id_I,model_id,tp,sample_name_abbreviation_base,measured_concentration_coverage_criteria_I,measured_dG_f_coverage_criteria_I); for sna in sample_name_abbreviations: print('exporting thermodynamic analysis for sample_name_abbreviation ' + sample_name_abbreviation_base+'_vs_'+sna); for tcc_b in tcc_base: # get tcc tcc = {}; tcc = self.get_row_experimentIDAndModelIDAndTimePointAndSampleNameAbbreviations_dataStage03QuantificationTCC(experiment_id_I,model_id,tp,sna,tcc_b['rxn_id'],tcc_b['dG_r_lb'],tcc_b['dG_r_ub'],measured_concentration_coverage_criteria_I,measured_dG_f_coverage_criteria_I); # record data if tcc: # test for statistical and biological significance significant_stat=self.check_significanceStatistical(tcc_b['dG_r_lb'],tcc_b['dG_r_ub'],tcc['dG_r_lb'],tcc['dG_r_ub']); significant_bio=self.check_significanceBiological(tcc_b['dG_r_lb'],tcc_b['dG_r_ub'],tcc['dG_r_lb'],tcc['dG_r_ub']); data_O.append({'experiment_id':experiment_id_I, 'model_id':model_id, 'sample_name_abbreviation':sna, 'time_point':tp, 'rxn_id':tcc['rxn_id'], 'dG_r_units':tcc['dG_r_units'], 'dG_r_lb_base':tcc_b['dG_r_lb'], 'dG_r_lb':tcc['dG_r_lb'], 'dG_r_ub_base':tcc_b['dG_r_ub'], 'dG_r_ub':tcc['dG_r_ub'], 'displacement_lb_base':tcc_b['displacement_lb'], 'displacement_lb':tcc['displacement_lb'], 'displacement_ub_base':tcc_b['displacement_ub'], 'displacement_ub':tcc['displacement_ub'], 'feasible_base':tcc_b['feasible'], 'feasible':tcc['feasible'], 'significant_stat':significant_stat, 'significant_bio':significant_bio}); # write data to csv headers = ['experiment_id', 'model_id', 'sample_name_abbreviation', 'time_point', 'rxn_id', 'dG_r_units', 'dG_r_lb_base', 'dG_r_ub_base', 'dG_r_lb', 'dG_r_ub', 'displacement_lb_base', 'displacement_lb', 'displacement_ub_base', 'displacement_ub', 'feasible_base', 'feasible', 'significant_stat', 'significant_bio'] io = base_exportData(data_O); filename_str = filename.split('.')[0] + experiment_id_I + '.' + filename.split('.')[1]; io.write_dict2csv(filename_str,headers);