def search_page(): logger.info("in search_page") logger.info(request.url) result = None if USE_REDIS: with Bench("Trying Redis cache"): key = "search_results:v1:" + json.dumps(request.args, sort_keys=True) logger.debug("key is:", pf(key)) result = Redis.get(key) if result: logger.info("Redis cache hit on search results!") result = pickle.loads(result) else: logger.info("Skipping Redis cache (USE_REDIS=False)") logger.info("request.args is", request.args) the_search = search_results.SearchResultPage(request.args) result = the_search.__dict__ valid_search = result['search_term_exists'] logger.debugf("result", result) if USE_REDIS and valid_search: Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL)) Redis.expire(key, 60*60) if valid_search: return render_template("search_result_page.html", **result) else: return render_template("search_error.html")
def create_temp_trait(): logger.info(request.url) print("REQUEST.FORM:", request.form) #template_vars = submit_trait.SubmitTrait(request.form) doc = docs.Docs("links") return render_template("links.html", **doc.__dict__)
def ctl_results(): logger.info("In ctl, request.form is:", request.form) logger.info(request.url) ctl = ctl_analysis.CTL() # Start R, load the package and pointers and create the analysis ctlA = ctl.run_analysis(request.form) # Start the analysis, a ctlA object should be a separate long running thread result = ctl.process_results(ctlA) # After the analysis is finished store the result return render_template("ctl_results.html", **result) # Display them using the template
def loadImage(self, path, name): logger.info("pre-loading imgage results:", self.results[path]) imgfile = open(self.results[path], 'rb') imgdata = imgfile.read() imgB64 = imgdata.encode("base64") bytesarray = array.array('B', imgB64) self.results[name] = bytesarray
def sharing_info_page(): """Info page displayed when the user clicks the "Info" button next to the dataset selection""" logger.info("In sharing_info_page") logger.error(request.url) fd = webqtlFormData.webqtlFormData(request.args) template_vars = SharingInfoPage.SharingInfoPage(fd) return template_vars
def corr_scatter_plot_page(): logger.info(request.url) template_vars = corr_scatter_plot.CorrScatterPlot(request.args) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") return render_template("corr_scatterplot.html", **template_vars.__dict__)
def export(): logger.info("request.form:", request.form) svg_xml = request.form.get("data", "Invalid data") filename = request.form.get("filename", "manhattan_plot_snp") response = Response(svg_xml, mimetype="image/svg+xml") response.headers["Content-Disposition"] = "attachment; filename=%s"%filename return response
def wcgna_results(): logger.info("In wgcna, request.form is:", request.form) logger.info(request.url) wgcna = wgcna_analysis.WGCNA() # Start R, load the package and pointers and create the analysis wgcnaA = wgcna.run_analysis(request.form) # Start the analysis, a wgcnaA object should be a separate long running thread result = wgcna.process_results(wgcnaA) # After the analysis is finished store the result return render_template("wgcna_results.html", **result) # Display them using the template
def process_results(self, results): logger.info("Processing CTL output") template_vars = {} template_vars["results"] = self.results template_vars["elements"] = self.elements self.render_image(self.results) sys.stdout.flush() return(dict(template_vars))
def gsearchact(): logger.info(request.url) result = gsearch.GSearch(request.args).__dict__ type = request.args['type'] if type == "gene": return render_template("gsearch_gene.html", **result) elif type == "phenotype": return render_template("gsearch_pheno.html", **result)
def process_results(self, results): logger.info("Processing PheWAS output") # TODO: get the PDF in the temp folder, and display it to the user template_vars = {} template_vars["results"] = self.results self.render_image(self.results) template_vars["R_debuglog"] = self.results['R_debuglog'] return(dict(template_vars))
def export_traits_csv(): """CSV file consisting of the traits from the search result page""" logger.info("In export_traits_csv") logger.info("request.form:", request.form) csv_data = export_traits.export_search_results_csv(request.form) return Response(csv_data, mimetype='text/csv', headers={"Content-Disposition":"attachment;filename=trait_list.csv"})
def init_db(): # import all modules here that might define models so that # they will be registered properly on the metadata. Otherwise # you will have to import them first before calling init_db() #import yourapplication.models import wqflask.model logger.debug("Creating all model metadata") Base.metadata.create_all(bind=engine) logger.info("Done creating all model metadata")
def export_mapping_results(): logger.info("request.form:", request.form) logger.info(request.url) file_path = request.form.get("results_path") results_csv = open(file_path, "r").read() response = Response(results_csv, mimetype='text/csv', headers={"Content-Disposition":"attachment;filename=mapping_results.csv"}) return response
def __init__(self): ("Initialization of PheWAS") # TODO: Loading the package should only be done once, since it is quite expensive logger.info(r_library("auwerx")) # Load the auwerx package self.r_create_Pheno_aligner = ro.r["create.Pheno_aligner"] # Map the create.Pheno_aligner function self.r_calculate_all_pvalue_parallel = ro.r["calculate.all.pvalue.parallel"] # Map the calculate.all.pvalue.parallel function self.r_PheWASManhattan = ro.r["PheWASManhattan"] # Map the PheWASManhattan function self.r_Stop = ro.r["throwStopError"] # Map the PheWASManhattan function self.r_PyLoadData = ro.r["PyLoadData"] # Map the load function logger.info("Initialization of PheWAS done !")
def loading_page(): logger.info(request.url) initial_start_vars = request.form logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items()) #temp_uuid = initial_start_vars['temp_uuid'] wanted = ( 'temp_uuid', 'trait_id', 'dataset', 'method', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'num_bootstrap', 'bootCheck', 'bootstrap_results', 'LRSCheck', 'covariates', 'maf', 'use_loco', 'manhattan_plot', 'control_marker', 'control_marker_db', 'do_control', 'genofile', 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', 'haplotypeAnalystCheck', 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno' ) start_vars_container = {} start_vars = {} for key, value in initial_start_vars.iteritems(): if key in wanted or key.startswith(('value:')): start_vars[key] = value start_vars_container['start_vars'] = start_vars rendered_template = render_template("loading.html", **start_vars_container) return rendered_template
def export_pdf(): import cairosvg logger.info("request.form:", request.form) svg_xml = request.form.get("data", "Invalid data") logger.info("svg_xml:", svg_xml) filename = request.form.get("filename", "interval_map_pdf") filepath = GENERATED_IMAGE_DIR+filename pdf_file = cairosvg.svg2pdf(bytestring=svg_xml) response = Response(pdf_file, mimetype="application/pdf") response.headers["Content-Disposition"] = "attachment; filename=%s"%filename return response
def index_page(): logger.info("Sending index_page") params = request.args if 'import_collections' in params: import_collections = params['import_collections'] if import_collections == "true": g.cookie_session.import_traits_to_user() if USE_GN_SERVER: # The menu is generated using GN_SERVER return render_template("index_page.html", gn_server_url = GN_SERVER_URL, version=GN_VERSION) else: # Old style static menu (OBSOLETE) return render_template("index_page_orig.html", version=GN_VERSION)
def show_trait_page(): logger.info(request.url) template_vars = show_trait.ShowTrait(request.args) #logger.info("js_data before dump:", template_vars.js_data) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") # Sorting the keys messes up the ordered dictionary, so don't do that #sort_keys=True) #logger.info("js_data after dump:", template_vars.js_data) #logger.info("show_trait template_vars:", pf(template_vars.__dict__)) return render_template("show_trait.html", **template_vars.__dict__)
def corr_matrix_page(): logger.info("In corr_matrix, request.form is:", pf(request.form)) start_vars = request.form traits = [trait.strip() for trait in start_vars['trait_list'].split(',')] if traits[0] != "": template_vars = show_corr_matrix.CorrelationMatrix(start_vars) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") return render_template("correlation_matrix.html", **template_vars.__dict__) else: return render_template("empty_collection.html", **{'tool':'Correlation Matrix'})
def network_graph_page(): logger.info("In network_graph, request.form is:", pf(request.form)) start_vars = request.form traits = [trait.strip() for trait in start_vars['trait_list'].split(',')] if traits[0] != "": template_vars = network_graph.NetworkGraph(start_vars) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") return render_template("network_graph.html", **template_vars.__dict__) else: return render_template("empty_collection.html", **{'tool':'Network Graph'})
def comp_bar_chart_page(): logger.info("In comp bar chart, request.form is:", pf(request.form)) logger.info(request.url) start_vars = request.form traits = [trait.strip() for trait in start_vars['trait_list'].split(',')] if traits[0] != "": template_vars = comparison_bar_chart.ComparisonBarChart(request.form) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") result = template_vars.__dict__ rendered_template = render_template("comparison_bar_chart.html", **result) else: rendered_template = render_template("empty_collection.html", **{'tool':'Comparison Bar Chart'}) return rendered_template
def tmp_page(img_path): logger.info("In tmp_page") logger.info("img_path:", img_path) logger.info(request.url) initial_start_vars = request.form logger.info("initial_start_vars:", initial_start_vars) imgfile = open(GENERATED_IMAGE_DIR + img_path, 'rb') imgdata = imgfile.read() imgB64 = imgdata.encode("base64") bytesarray = array.array('B', imgB64) return render_template("show_image.html", img_base64 = bytesarray )
def export_perm_data(): """CSV file consisting of the permutation data for the mapping results""" logger.info(request.url) num_perm = float(request.form['num_perm']) perm_data = json.loads(request.form['perm_results']) buff = StringIO.StringIO() writer = csv.writer(buff) writer.writerow(["Suggestive LRS (p=0.63) = " + str(perm_data[int(num_perm*0.37-1)])]) writer.writerow(["Significant LRS (p=0.05) = " + str(perm_data[int(num_perm*0.95-1)])]) writer.writerow(["Highly Significant LRS (p=0.01) = " + str(perm_data[int(num_perm*0.99-1)])]) writer.writerow("") writer.writerow([str(num_perm) + " Permutations"]) writer.writerow("") for item in perm_data: writer.writerow([item]) csv_data = buff.getvalue() buff.close() return Response(csv_data, mimetype='text/csv', headers={"Content-Disposition":"attachment;filename=perm_data.csv"})
def export_trait_csv(): """CSV file consisting of the sample data from the trait data and analysis page""" logger.info("In export_trait_csv") logger.info("request.form:", request.form) logger.info(request.url) sample_data = export_trait_data.export_sample_table(request.form) logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data))) buff = StringIO.StringIO() writer = csv.writer(buff) for row in sample_data: writer.writerow(row) csv_data = buff.getvalue() buff.close() return Response(csv_data, mimetype='text/csv', headers={"Content-Disposition":"attachment;filename=sample_data.csv"})
def __init__(self): logger.info("Initialization of CTL") #log = r_file("/tmp/genenetwork_ctl.log", open = "wt") #r_sink(log) # Uncomment the r_sink() commands to log output from stdout/stderr to a file #r_sink(log, type = "message") r_library("ctl") # Load CTL - Should only be done once, since it is quite expensive r_options(stringsAsFactors = False) logger.info("Initialization of CTL done, package loaded in R session") self.r_CTLscan = ro.r["CTLscan"] # Map the CTLscan function self.r_CTLsignificant = ro.r["CTLsignificant"] # Map the CTLsignificant function self.r_lineplot = ro.r["ctl.lineplot"] # Map the ctl.lineplot function self.r_plotCTLobject = ro.r["plot.CTLobject"] # Map the CTLsignificant function self.nodes_list = [] self.edges_list = [] logger.info("Obtained pointers to CTL functions")
def export_trait_excel(): """Excel file consisting of the sample data from the trait data and analysis page""" logger.info("In export_trait_excel") logger.info("request.form:", request.form) logger.info(request.url) sample_data = export_trait_data.export_sample_table(request.form) logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data))) buff = StringIO.StringIO() workbook = xlsxwriter.Workbook(buff, {'in_memory': True}) worksheet = workbook.add_worksheet() for i, row in enumerate(sample_data): worksheet.write(i, 0, row[0]) worksheet.write(i, 1, row[1]) if len(row) > 2: worksheet.write(i, 2, row[2]) workbook.close() excel_data = buff.getvalue() buff.close() return Response(excel_data, mimetype='application/vnd.ms-excel', headers={"Content-Disposition":"attachment;filename=sample_data.xlsx"})
def get_transform_vals(dataset, trait): es = get_elasticsearch_connection(for_user=False) logger.info("DATASET NAME:", dataset.name) query = '{"bool": {"must": [{"match": {"name": "%s"}}, {"match": {"dataset": "%s"}}]}}' % (trait.name, dataset.name) es_body = { "query": { "bool": { "must": [ { "match": { "name": "%s" % (trait.name) } }, { "match": { "dataset": "%s" % (dataset.name) } } ] } } } response = es.search( index = "traits", doc_type = "trait", body = es_body ) logger.info("THE RESPONSE:", response) results = response['hits']['hits'] if len(results) > 0: samples = results[0]['_source']['samples'] sample_dict = {} for sample in samples: sample_dict[sample['name']] = sample['qnorm'] logger.info("SAMPLE DICT:", sample_dict) return sample_dict else: return None
def corr_compute_page(): logger.info("In corr_compute, request.form is:", pf(request.form)) #fd = webqtlFormData.webqtlFormData(request.form) template_vars = show_corr_results.CorrelationResults(request.form) return render_template("correlation_page.html", **template_vars.__dict__)
def phewas(): logger.info("In phewas, request.form is:", request.form) # We are going to get additional user input for the analysis phewasO = phewas_analysis.PheWAS() # Start R, load the package and pointers and create the analysis phewasA = phewasO.run_analysis(request.form) result = phewasO.process_results(phewasA) # After the analysis is finished store the result return render_template("phewas_analysis.html", **result) # Display them using the template
and snap_id <= %s and parsing_schema_name not in ('SYSTEM','SYS','DBSNMP','MZHANG','YANGLI') and parsing_schema_name is not null group by sql_id,parsing_schema_name) sqt, dba_hist_sqltext st where st.sql_id(+) = sqt.sql_id and st.dbid(+) = %s order by nvl(sqt.elap, -1) desc, sqt.sql_id) order by Elapsed_Time_Per_Exec desc ''' % (dbid, inst_num, beg_snap, end_snap, dbid) engine = create_engine(DB_CONN_STRING_1, poolclass=NullPool) return pd.read_sql_query(sql, engine) if __name__ == '__main__': logger.info('开始') #加载有卡数据 delDirectory() dbid = getDBInfo().values if not dbid[0][0] or dbid[0][0] == 'null': logger.info('获取的dbid失败:%s' % dbid[0][0]) else: logger.info('成功获取dbid:%s' % dbid[0][0]) ''' 抓取50.51上所有的sql信息 ''' now = datetime.datetime.now() days = 1 oneday = datetime.timedelta(days=days)
def help(): logger.info(request.url) doc = Docs("help", request.args) return render_template("docs.html", **doc.__dict__)
def mediation(): logger.info("In mediation, request.form is:", request.form) # We are going to get additional user input for the analysis return render_template("mediation_analysis.html", **request.form) # Display them using the template
def submit_bnw(): logger.info(request.url) return render_template("empty_collection.html", **{'tool': 'Correlation Matrix'})
def gsearch_updating(): logger.info("REQUEST ARGS:", request.values) logger.info(request.url) result = UpdateGSearch(request.args).__dict__ return result['results']
def run_analysis(self, requestform): logger.info("Starting PheWAS analysis on dataset") genofilelocation = locate( "BXD.geno", "genotype") # Get the location of the BXD genotypes precompfile = locate_phewas( "PheWAS_pval_EMMA_norm.RData", "auwerx") # Get the location of the pre-computed EMMA results # Get user parameters, trait_id and dataset, and store/update them in self self.trait_id = requestform["trait_id"] self.datasetname = requestform["dataset"] self.dataset = data_set.create_dataset(self.datasetname) self.region = int(requestform["num_region"]) self.mtadjust = str(requestform["sel_mtadjust"]) # Logger.Info some debug logger.info("self.trait_id:" + self.trait_id + "\n") logger.info("self.datasetname:" + self.datasetname + "\n") logger.info("self.dataset.type:" + self.dataset.type + "\n") # GN Magic ? self.this_trait = GeneralTrait(dataset=self.dataset, name=self.trait_id, get_qtl_info=False, get_sample_info=False) logger.info(vars(self.this_trait)) # Set the values we need self.chr = str(self.this_trait.chr) self.mb = int(self.this_trait.mb) # logger.info some debug logger.info("location:" + self.chr + ":" + str(self.mb) + "+/-" + str(self.region) + "\n") # Load in the genotypes file *sigh* to make the markermap parser = genofile_parser.ConvertGenoFile(genofilelocation) parser.process_csv() snpinfo = [] for marker in parser.markers: snpinfo.append(marker["name"]) snpinfo.append(marker["chr"]) snpinfo.append(marker["Mb"]) rnames = r_seq(1, len(parser.markers)) # Create the snp aligner object out of the BXD genotypes snpaligner = ro.r.matrix(snpinfo, nrow=len(parser.markers), dimnames=r_list(rnames, r_c("SNP", "Chr", "Pos")), ncol=3, byrow=True) # Create the phenotype aligner object using R phenoaligner = self.r_create_Pheno_aligner() self.results = {} self.results['imgurl1'] = webqtlUtil.genRandStr("phewas_") + ".png" self.results['imgloc1'] = GENERATED_IMAGE_DIR + self.results['imgurl1'] self.results['mtadjust'] = self.mtadjust logger.info("IMAGE AT:", self.results['imgurl1']) logger.info("IMAGE AT:", self.results['imgloc1']) # Create the PheWAS plot (The gene/probe name, chromosome and gene/probe positions should come from the user input) # TODO: generate the PDF in the temp folder, with a unique name assert (precompfile) assert (phenoaligner) assert (snpaligner) phewasres = self.r_PheWASManhattan("Test", precompfile, phenoaligner, snpaligner, "None", self.chr, self.mb, self.region, self.results['imgloc1'], self.mtadjust) self.results['phewas1'] = phewasres[0] self.results['phewas2'] = phewasres[1] self.results['tabulardata'] = phewasres[2] self.results['R_debuglog'] = phewasres[3] #self.r_PheWASManhattan(allpvalues) #self.r_Stop() logger.info("Initialization of PheWAS done !")
rate2_1['cost'] = rate2_1.apply(lambda row: otpGetCardType(row['from_card_type']), axis=1) logger.info('无银行卡类型 : %s 行.' % rate2_1.shape[0]) df = [rate_1,rate_2,rate1_1,rate1_2,rate2_1] logger.info('合并数据 开始') rate2 = pd.concat(df) logger.info('完成了所有有卡成本处理 : %s 行.' % (rate2.shape[0])) gc.collect() #计算利润 logger.info('利润计算 开始') rate2['grain'] = rate2.apply(lambda row: OptGrain(row['trans_charge'],row['cost']), axis=1) logger.info('利润计算处理 : %s 行.' % (rate2.shape[0])) return rate2 if __name__ == '__main__': logger.info('开始') # 加载有卡数据 rate2 = getHaveCardData() logger.info('生成excel文件,开始') saveToExcel(rate2) logger.info('生成execl文件,结束') logger.info('打包并发送数据,开始') sendMailInfo() logger.info('打包并发送数据,结束')
datagenerator_obj = DataGenerator(count, path) processed_data = datagenerator_obj.read_xls_data() if field_and_data_type_list: # this code is to process fetched field name and datatype from console field_data_type_tuple = () for element in field_and_data_type_list: field_data_type_tuple += (element, ) if str(element).lower().strip() == 'value': continue if len(field_data_type_tuple) >= 2: processed_data.append(field_data_type_tuple) field_data_type_tuple = () if processed_data: datagenerator_obj.create_files(processed_data) minute, second = divmod(current_time() - begin_time, 60) logger.info( "Execution of {} records ended in : {:0.0f} minute {:0.02f} second" .format(count, minute, second)) logger.info("End of script....") except Exception as e: logger.error(e)
def __init__(self, start_vars, temp_uuid): helper_functions.get_species_dataset_trait(self, start_vars) self.temp_uuid = temp_uuid #needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) self.json_data = {} self.json_data['lodnames'] = ['lod.hk'] self.samples = [] # Want only ones with values self.vals = [] all_samples_ordered = self.dataset.group.all_samples_ordered() primary_sample_names = list(all_samples_ordered) for sample in self.dataset.group.samplelist: # sample is actually the name of an individual in_trait_data = False for item in self.this_trait.data: if self.this_trait.data[item].name == sample: value = start_vars['value:' + self.this_trait.data[item].name] self.samples.append(self.this_trait.data[item].name) self.vals.append(value) in_trait_data = True break if not in_trait_data: value = start_vars.get('value:' + sample) if value: self.samples.append(sample) self.vals.append(value) self.mapping_method = start_vars['method'] if start_vars['manhattan_plot'] == "True": self.manhattan_plot = True else: self.manhattan_plot = False self.maf = start_vars['maf'] # Minor allele frequency self.suggestive = "" self.significant = "" self.pair_scan = False # Initializing this since it is checked in views to determine which template to use self.score_type = "LRS" #ZS: LRS or LOD self.mapping_scale = "physic" self.num_perm = 0 self.perm_output = [] self.bootstrap_results = [] #ZS: This is passed to GN1 code for single chr mapping self.selected_chr = -1 if "selected_chr" in start_vars: if int(start_vars['selected_chr']) != -1: #ZS: Needs to be -1 if showing full map; there's probably a better way to fix this self.selected_chr = int(start_vars['selected_chr']) + 1 else: self.selected_chr = int(start_vars['selected_chr']) if "startMb" in start_vars: self.startMb = start_vars['startMb'] if "endMb" in start_vars: self.endMb = start_vars['endMb'] if "graphWidth" in start_vars: self.graphWidth = start_vars['graphWidth'] if "lrsMax" in start_vars: self.lrsMax = start_vars['lrsMax'] if "haplotypeAnalystCheck" in start_vars: self.haplotypeAnalystCheck = start_vars['haplotypeAnalystCheck'] if "startMb" in start_vars: #ZS: This is to ensure showGenes, Legend, etc are checked the first time you open the mapping page, since startMb will only not be set during the first load if "permCheck" in start_vars: self.permCheck = "ON" else: self.permCheck = False self.num_perm = int(start_vars['num_perm']) self.LRSCheck = start_vars['LRSCheck'] if "showSNP" in start_vars: self.showSNP = start_vars['showSNP'] else: self.showSNP = False if "showGenes" in start_vars: self.showGenes = start_vars['showGenes'] else: self.showGenes = False if "viewLegend" in start_vars: self.viewLegend = start_vars['viewLegend'] else: self.viewLegend = False else: try: if int(start_vars['num_perm']) > 0: self.num_perm = int(start_vars['num_perm']) except: self.num_perm = 0 self.LRSCheck = self.score_type if self.num_perm > 0: self.permCheck = "ON" else: self.permCheck = False self.showSNP = "ON" self.showGenes = "ON" self.viewLegend = "ON" self.dataset.group.get_markers() if self.mapping_method == "gemma": self.score_type = "LOD" self.manhattan_plot = True with Bench("Running GEMMA"): included_markers, p_values = gemma_mapping.run_gemma(self.dataset, self.samples, self.vals) with Bench("Getting markers from csv"): marker_obs = get_markers_from_csv(included_markers, p_values, self.dataset.group.name) results = marker_obs elif self.mapping_method == "rqtl_plink": results = self.run_rqtl_plink() elif self.mapping_method == "rqtl_geno": self.score_type = "LOD" self.mapping_scale = "morgan" self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] self.method = start_vars['mapmethod_rqtl_geno'] self.model = start_vars['mapmodel_rqtl_geno'] if start_vars['pair_scan'] == "true": self.pair_scan = True if self.permCheck and self.num_perm > 0: perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno(self.vals, self.dataset, self.method, self.model, self.permCheck, self.num_perm, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan) else: results = rqtl_mapping.run_rqtl_geno(self.vals, self.dataset, self.method, self.model, self.permCheck, self.num_perm, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan) elif self.mapping_method == "reaper": if "startMb" in start_vars: #ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: self.additiveCheck = start_vars['additiveCheck'] else: self.additiveCheck = False if "bootCheck" in start_vars: self.bootCheck = "ON" else: self.bootCheck = False self.num_bootstrap = int(start_vars['num_bootstrap']) else: self.additiveCheck = "ON" try: if int(start_vars['num_bootstrap']) > 0: self.bootCheck = "ON" self.num_bootstrap = int(start_vars['num_bootstrap']) else: self.bootCheck = False self.num_bootstrap = 0 except: self.bootCheck = False self.num_bootstrap = 0 self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] logger.info("Running qtlreaper") results = self.gen_reaper_results() elif self.mapping_method == "plink": results = self.run_plink() elif self.mapping_method == "pylmm": logger.debug("RUNNING PYLMM") if self.num_perm > 0: self.run_permutations(str(temp_uuid)) results = self.gen_data(str(temp_uuid)) else: logger.debug("RUNNING NOTHING") if self.pair_scan == True: self.qtl_results = [] highest_chr = 1 #This is needed in order to convert the highest chr to X/Y for marker in results: if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y": if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y": highest_chr = marker['chr1'] if 'lod_score' in marker.keys(): self.qtl_results.append(marker) self.trimmed_markers = results for qtl in enumerate(self.qtl_results): self.json_data['chr1'].append(str(qtl['chr1'])) self.json_data['chr2'].append(str(qtl['chr2'])) self.json_data['Mb'].append(qtl['Mb']) self.json_data['markernames'].append(qtl['name']) self.js_data = dict( json_data = self.json_data, this_trait = self.this_trait.name, data_set = self.dataset.name, maf = self.maf, manhattan_plot = self.manhattan_plot, mapping_scale = self.mapping_scale, qtl_results = self.qtl_results ) else: self.cutoff = 2 self.qtl_results = [] highest_chr = 1 #This is needed in order to convert the highest chr to X/Y for marker in results: if marker['chr'] > 0 or marker['chr'] == "X" or marker['chr'] == "X/Y": if marker['chr'] > highest_chr or marker['chr'] == "X" or marker['chr'] == "X/Y": highest_chr = marker['chr'] if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): self.qtl_results.append(marker) self.trimmed_markers = trim_markers_for_table(results) self.json_data['chr'] = [] self.json_data['pos'] = [] self.json_data['lod.hk'] = [] self.json_data['markernames'] = [] self.json_data['suggestive'] = self.suggestive self.json_data['significant'] = self.significant #Need to convert the QTL objects that qtl reaper returns into a json serializable dictionary for index, qtl in enumerate(self.qtl_results): #if index<40: # logger.debug("lod score is:", qtl['lod_score']) if qtl['chr'] == highest_chr and highest_chr != "X" and highest_chr != "X/Y": #logger.debug("changing to X") self.json_data['chr'].append("X") else: self.json_data['chr'].append(str(qtl['chr'])) self.json_data['pos'].append(qtl['Mb']) if 'lrs_value' in qtl.keys(): self.json_data['lod.hk'].append(str(qtl['lrs_value'])) else: self.json_data['lod.hk'].append(str(qtl['lod_score'])) self.json_data['markernames'].append(qtl['name']) #Get chromosome lengths for drawing the interval map plot chromosome_mb_lengths = {} self.json_data['chrnames'] = [] for key in self.species.chromosomes.chromosomes.keys(): self.json_data['chrnames'].append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length]) chromosome_mb_lengths[key] = self.species.chromosomes.chromosomes[key].mb_length # logger.debug("json_data:", self.json_data) self.js_data = dict( result_score_type = self.score_type, json_data = self.json_data, this_trait = self.this_trait.name, data_set = self.dataset.name, maf = self.maf, manhattan_plot = self.manhattan_plot, mapping_scale = self.mapping_scale, chromosomes = chromosome_mb_lengths, qtl_results = self.qtl_results, num_perm = self.num_perm, perm_results = self.perm_output, )
signal.signal(signal.SIGPIPE, signal.SIG_DFL) BLUE = '\033[94m' GREEN = '\033[92m' BOLD = '\033[1m' ENDC = '\033[0m' import os app.config['SECRET_KEY'] = os.urandom(24) from utility.tools import WEBSERVER_MODE, get_setting_int port = get_setting_int("SERVER_PORT") logger.info("GN2 is running. Visit %shttp://localhost:%s/%s" % (BLUE, port, ENDC)) werkzeug_logger = logging.getLogger('werkzeug') if WEBSERVER_MODE == 'DEBUG': app.run(host='0.0.0.0', port=port, debug=True, use_debugger=False, threaded=False, processes=0, use_reloader=True) elif WEBSERVER_MODE == 'DEV': werkzeug_logger.setLevel(logging.WARNING) app.run(host='0.0.0.0', port=port,
def sendMailInfo(): import re now = datetime.datetime.now() m=re.match('.*(\d+)',CONF_ORDER_TIME) days = 1 if m: days = int(m.groups()[0]) oneday = datetime.timedelta(days=days) target_date = now - oneday + oneday target_date = target_date.strftime("%Y%m%d") zipDirectory = os.path.join(os.path.abspath(os.path.dirname(__file__)),'zipDir') if not os.path.exists(zipDirectory): os.mkdir(zipDirectory) logger.info('临时目录不存在创建临时目录:'+zipDirectory) zipfileName=os.path.join(zipDirectory,'report_'+target_date+'.zip') if os.path.exists(zipfileName): os.remove(zipfileName) logger.info('存在相同压缩文件名,删除历史数据:'+zipfileName) logger.info('创建压缩文件:'+zipfileName) if not os.path.exists('%s/result' % CUR_PATH): os.mkdir('%s/result' % CUR_PATH) fileDirectory = '%s/result' % CUR_PATH os.chdir(fileDirectory) os.system(("zip %s *")%(zipfileName)) createZip(zipfileName,fileDirectory) files=getAttachFileList(fileDirectory) print files logger.info('压缩结束') mail_to=['*****@*****.**'] port="25" m=Mail(port=port,mailTo=mail_to) try: logger.info('send mail starting....') subject='GEEDUN库TOP_10_SQL监控' content='When the REPORT has problems, please contact DBA!' m.sendMsg(subject,content,[zipfileName]) logger.info('send mail end.') except: return
def docedit(): logger.info(request.url) doc = docs.Docs(request.args['entry']) return render_template("docedit.html", **doc.__dict__)
def sharing_info_page(): """Info page displayed when the user clicks the "Info" button next to the dataset selection""" logger.info("In sharing_info_page") fd = webqtlFormData.webqtlFormData(request.args) template_vars = SharingInfoPage.SharingInfoPage(fd) return template_vars
def getHaveCardData(): logger.info('成本处理 开始') rate1 = getRateCostInfo() rate_1 = rate1[((rate1.from_card_type=='借记卡') | (rate1.from_card_type=='借贷合一') | (rate1.from_card_type=='预付卡')) & (rate1.backend_id=='posPay')] rate_2 = rate1[((rate1.from_card_type=='准贷记卡') | (rate1.from_card_type=='贷记卡')) & (rate1.backend_id=='posPay')] if(rate_1.empty): logger.info('827借卡数据处理: 0 行.') else: rate_1['cost'] = rate_1.apply(lambda row: OptBorrowCost(row['r_borrow'],row['pay_amount']), axis=1) if(rate_2.empty): logger.info('827开头贷卡数据处理: 0 行.') else: rate_2['cost'] = rate_2.apply(lambda row: OptLoanCost(row['r_loan'],row['pay_amount']), axis=1) logger.info('827开头的有卡数据处理 : %s 行.' % (rate_1.shape[0] + rate_2.shape[0])) rate1_1 = rate1[((rate1.from_card_type=='借记卡') | (rate1.from_card_type=='借贷合一') | (rate1.from_card_type=='预付卡')) & (rate1.backend_id=='unionIcCardPay')] rate1_2 = rate1[((rate1.from_card_type=='准贷记卡') | (rate1.from_card_type=='贷记卡')) & (rate1.backend_id=='unionIcCardPay')] if(rate1_1.empty): logger.info('银联借卡数据处理: 0 行.') else: rate1_1['cost'] = rate1_1.apply(lambda row: OptYinLianCost(row['r_loan'],row['r_borrow'],row['pay_amount'],'borrow'), axis=1) if(rate1_2.empty): logger.info('银联贷卡数据处理: 0 行.') else: rate1_2['cost'] = rate1_2.apply(lambda row: OptYinLianCost(row['r_loan'],row['r_borrow'],row['pay_amount'],'loan'), axis=1) logger.info('853开头的有卡数据处理 : %s 行.' % (rate1_1.shape[0] + rate1_2.shape[0])) if(rate1[rate1.from_card_type=='-'].empty): logger.info('银行卡类型记录无异常 : 0 行') df = [rate_1,rate_2,rate1_1,rate1_2] else: rate2_1 = rate1[rate1.from_card_type=='-'] rate2_1['cost'] = rate2_1.apply(lambda row: otpGetCardType(row['from_card_type']), axis=1) logger.info('无银行卡类型 : %s 行.' % rate2_1.shape[0]) df = [rate_1,rate_2,rate1_1,rate1_2,rate2_1] logger.info('合并数据 开始') rate2 = pd.concat(df) logger.info('完成了所有有卡成本处理 : %s 行.' % (rate2.shape[0])) gc.collect() #计算利润 logger.info('利润计算 开始') rate2['grain'] = rate2.apply(lambda row: OptGrain(row['trans_charge'],row['cost']), axis=1) logger.info('利润计算处理 : %s 行.' % (rate2.shape[0])) return rate2
def security_tutorial_page(): # ZS: Currently just links to GN1 logger.info(request.url) return render_template("admin/security_help.html")
def __init__(self, start_vars, temp_uuid): helper_functions.get_species_dataset_trait(self, start_vars) self.temp_uuid = temp_uuid #needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) #ZS: Needed to zoom in or remap temp traits like PCA traits if "temp_trait" in start_vars and start_vars['temp_trait'] != "False": self.temp_trait = "True" self.group = self.dataset.group.name self.json_data = {} self.json_data['lodnames'] = ['lod.hk'] #ZS: Sometimes a group may have a genofile that only includes a subset of samples genofile_samplelist = [] if 'genofile' in start_vars: if start_vars['genofile'] != "": self.genofile_string = start_vars['genofile'] self.dataset.group.genofile = self.genofile_string.split( ":")[0] genofile_samplelist = get_genofile_samplelist(self.dataset) all_samples_ordered = self.dataset.group.all_samples_ordered() self.vals = [] self.samples = [] self.sample_vals = start_vars['sample_vals'] sample_val_dict = json.loads(self.sample_vals) samples = sample_val_dict.keys() if (len(genofile_samplelist) != 0): for sample in genofile_samplelist: self.samples.append(sample) if sample in samples: self.vals.append(sample_val_dict[sample]) else: self.vals.append("x") else: for sample in self.dataset.group.samplelist: if sample in samples: self.vals.append(sample_val_dict[sample]) self.samples.append(sample) if 'n_samples' in start_vars: self.n_samples = start_vars['n_samples'] else: self.n_samples = len([val for val in self.vals if val != "x"]) #ZS: Check if genotypes exist in the DB in order to create links for markers self.geno_db_exists = geno_db_exists(self.dataset) self.mapping_method = start_vars['method'] if "results_path" in start_vars: self.mapping_results_path = start_vars['results_path'] else: mapping_results_filename = self.dataset.group.name + "_" + ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) self.mapping_results_path = "{}{}.csv".format( webqtlConfig.GENERATED_IMAGE_DIR, mapping_results_filename) self.manhattan_plot = False if 'manhattan_plot' in start_vars: if start_vars['manhattan_plot'].lower() != "false": self.color_scheme = "alternating" if "color_scheme" in start_vars: self.color_scheme = start_vars['color_scheme'] if self.color_scheme == "single": self.manhattan_single_color = start_vars[ 'manhattan_single_color'] self.manhattan_plot = True self.maf = start_vars['maf'] # Minor allele frequency if "use_loco" in start_vars: self.use_loco = start_vars['use_loco'] else: self.use_loco = None self.suggestive = "" self.significant = "" self.pair_scan = False # Initializing this since it is checked in views to determine which template to use if 'transform' in start_vars: self.transform = start_vars['transform'] else: self.transform = "" self.score_type = "LRS" #ZS: LRS or LOD self.mapping_scale = "physic" if "mapping_scale" in start_vars: self.mapping_scale = start_vars['mapping_scale'] self.num_perm = 0 self.perm_output = [] self.bootstrap_results = [] self.covariates = start_vars[ 'covariates'] if "covariates" in start_vars else "" self.categorical_vars = [] #ZS: This is passed to GN1 code for single chr mapping self.selected_chr = -1 if "selected_chr" in start_vars: if int( start_vars['selected_chr'] ) != -1: #ZS: Needs to be -1 if showing full map; there's probably a better way to fix this self.selected_chr = int(start_vars['selected_chr']) + 1 else: self.selected_chr = int(start_vars['selected_chr']) if "startMb" in start_vars: self.startMb = start_vars['startMb'] if "endMb" in start_vars: self.endMb = start_vars['endMb'] if "graphWidth" in start_vars: self.graphWidth = start_vars['graphWidth'] if "lrsMax" in start_vars: self.lrsMax = start_vars['lrsMax'] if "haplotypeAnalystCheck" in start_vars: self.haplotypeAnalystCheck = start_vars['haplotypeAnalystCheck'] if "startMb" in start_vars: #ZS: This is to ensure showGenes, Legend, etc are checked the first time you open the mapping page, since startMb will only not be set during the first load if "permCheck" in start_vars: self.permCheck = "ON" else: self.permCheck = False self.num_perm = int(start_vars['num_perm']) self.LRSCheck = start_vars['LRSCheck'] if "showSNP" in start_vars: self.showSNP = start_vars['showSNP'] else: self.showSNP = False if "showGenes" in start_vars: self.showGenes = start_vars['showGenes'] else: self.showGenes = False if "viewLegend" in start_vars: self.viewLegend = start_vars['viewLegend'] else: self.viewLegend = False else: try: if int(start_vars['num_perm']) > 0: self.num_perm = int(start_vars['num_perm']) except: self.num_perm = 0 if self.num_perm > 0: self.permCheck = "ON" else: self.permCheck = False self.showSNP = "ON" self.showGenes = "ON" self.viewLegend = "ON" #self.dataset.group.get_markers() if self.mapping_method == "gemma": self.first_run = True self.output_files = None if 'output_files' in start_vars: self.output_files = start_vars['output_files'] if 'first_run' in start_vars: #ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) self.first_run = False self.score_type = "-logP" self.manhattan_plot = True with Bench("Running GEMMA"): if self.use_loco == "True": marker_obs, self.output_files = gemma_mapping.run_gemma( self.this_trait, self.dataset, self.samples, self.vals, self.covariates, self.use_loco, self.maf, self.first_run, self.output_files) else: marker_obs, self.output_files = gemma_mapping.run_gemma( self.this_trait, self.dataset, self.samples, self.vals, self.covariates, self.use_loco, self.maf, self.first_run, self.output_files) results = marker_obs elif self.mapping_method == "rqtl_plink": results = self.run_rqtl_plink() elif self.mapping_method == "rqtl_geno": perm_strata = [] if "perm_strata" in start_vars and "categorical_vars" in start_vars: self.categorical_vars = start_vars["categorical_vars"].split( ",") if len(self.categorical_vars ) and start_vars["perm_strata"] == "True": primary_samples = SampleList(dataset=self.dataset, sample_names=self.samples, this_trait=self.this_trait) perm_strata = get_perm_strata(self.this_trait, primary_samples, self.categorical_vars, self.samples) self.score_type = "LOD" self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] if 'mapmethod_rqtl_geno' in start_vars: self.method = start_vars['mapmethod_rqtl_geno'] else: self.method = "em" self.model = start_vars['mapmodel_rqtl_geno'] #if start_vars['pair_scan'] == "true": # self.pair_scan = True if self.permCheck and self.num_perm > 0: self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno( self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) else: results = rqtl_mapping.run_rqtl_geno( self.vals, self.samples, self.dataset, self.mapping_scale, self.method, self.model, self.permCheck, self.num_perm, perm_strata, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan, self.covariates) elif self.mapping_method == "reaper": if "startMb" in start_vars: #ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: self.additiveCheck = start_vars['additiveCheck'] else: self.additiveCheck = False if "bootCheck" in start_vars: self.bootCheck = "ON" else: self.bootCheck = False self.num_bootstrap = int(start_vars['num_bootstrap']) else: self.additiveCheck = "ON" try: if int(start_vars['num_bootstrap']) > 0: self.bootCheck = "ON" self.num_bootstrap = int(start_vars['num_bootstrap']) else: self.bootCheck = False self.num_bootstrap = 0 except: self.bootCheck = False self.num_bootstrap = 0 self.reaper_version = start_vars['reaper_version'] self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] logger.info("Running qtlreaper") if self.reaper_version == "new": self.first_run = True self.output_files = None if 'first_run' in start_vars: #ZS: check if first run so existing result files can be used if it isn't (for example zooming on a chromosome, etc) self.first_run = False if 'output_files' in start_vars: self.output_files = start_vars['output_files'].split( ",") results, self.perm_output, self.suggestive, self.significant, self.bootstrap_results, self.output_files = qtlreaper_mapping.run_reaper( self.this_trait, self.dataset, self.samples, self.vals, self.json_data, self.num_perm, self.bootCheck, self.num_bootstrap, self.do_control, self.control_marker, self.manhattan_plot, self.first_run, self.output_files) else: results, self.json_data, self.perm_output, self.suggestive, self.significant, self.bootstrap_results = qtlreaper_mapping.run_original_reaper( self.this_trait, self.dataset, self.samples, self.vals, self.json_data, self.num_perm, self.bootCheck, self.num_bootstrap, self.do_control, self.control_marker, self.manhattan_plot) elif self.mapping_method == "plink": self.score_type = "-logP" self.manhattan_plot = True results = plink_mapping.run_plink(self.this_trait, self.dataset, self.species, self.vals, self.maf) #results = self.run_plink() else: logger.debug("RUNNING NOTHING") self.no_results = False if len(results) == 0: self.no_results = True else: if self.pair_scan == True: self.qtl_results = [] highest_chr = 1 #This is needed in order to convert the highest chr to X/Y for marker in results: if marker['chr1'] > 0 or marker['chr1'] == "X" or marker[ 'chr1'] == "X/Y": if marker['chr1'] > highest_chr or marker[ 'chr1'] == "X" or marker['chr1'] == "X/Y": highest_chr = marker['chr1'] if 'lod_score' in list(marker.keys()): self.qtl_results.append(marker) self.trimmed_markers = results for qtl in enumerate(self.qtl_results): self.json_data['chr1'].append(str(qtl['chr1'])) self.json_data['chr2'].append(str(qtl['chr2'])) self.json_data['Mb'].append(qtl['Mb']) self.json_data['markernames'].append(qtl['name']) self.js_data = dict(json_data=self.json_data, this_trait=self.this_trait.name, data_set=self.dataset.name, maf=self.maf, manhattan_plot=self.manhattan_plot, mapping_scale=self.mapping_scale, qtl_results=self.qtl_results) else: self.qtl_results = [] self.results_for_browser = [] self.annotations_for_browser = [] highest_chr = 1 #This is needed in order to convert the highest chr to X/Y for marker in results: if 'Mb' in marker: this_ps = marker['Mb'] * 1000000 else: this_ps = marker['cM'] * 1000000 browser_marker = dict(chr=str(marker['chr']), rs=marker['name'], ps=this_ps, url="/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno") if self.geno_db_exists == "True": annot_marker = dict(name=str(marker['name']), chr=str(marker['chr']), rs=marker['name'], pos=this_ps, url="/show_trait?trait_id=" + marker['name'] + "&dataset=" + self.dataset.group.name + "Geno") else: annot_marker = dict(name=str(marker['name']), chr=str(marker['chr']), rs=marker['name'], pos=this_ps) if 'lrs_value' in marker and marker['lrs_value'] > 0: browser_marker['p_wald'] = 10**-(marker['lrs_value'] / 4.61) elif 'lod_score' in marker and marker['lod_score'] > 0: browser_marker['p_wald'] = 10**-(marker['lod_score']) else: browser_marker['p_wald'] = 0 self.results_for_browser.append(browser_marker) self.annotations_for_browser.append(annot_marker) if str(marker['chr']) > '0' or str( marker['chr']) == "X" or str( marker['chr']) == "X/Y": if str(marker['chr']) > str(highest_chr) or str( marker['chr']) == "X" or str( marker['chr']) == "X/Y": highest_chr = marker['chr'] if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): if 'Mb' in marker.keys(): marker['display_pos'] = "Chr" + str( marker['chr']) + ": " + "{:.6f}".format( marker['Mb']) elif 'cM' in marker.keys(): marker['display_pos'] = "Chr" + str( marker['chr']) + ": " + "{:.3f}".format( marker['cM']) else: marker['display_pos'] = "N/A" self.qtl_results.append(marker) total_markers = len(self.qtl_results) with Bench("Exporting Results"): export_mapping_results(self.dataset, self.this_trait, self.qtl_results, self.mapping_results_path, self.mapping_scale, self.score_type) with Bench("Trimming Markers for Figure"): if len(self.qtl_results) > 30000: self.qtl_results = trim_markers_for_figure( self.qtl_results) self.results_for_browser = trim_markers_for_figure( self.results_for_browser) filtered_annotations = [] for marker in self.results_for_browser: for annot_marker in self.annotations_for_browser: if annot_marker['rs'] == marker['rs']: filtered_annotations.append(annot_marker) break self.annotations_for_browser = filtered_annotations browser_files = write_input_for_browser( self.dataset, self.results_for_browser, self.annotations_for_browser) else: browser_files = write_input_for_browser( self.dataset, self.results_for_browser, self.annotations_for_browser) with Bench("Trimming Markers for Table"): self.trimmed_markers = trim_markers_for_table(results) chr_lengths = get_chr_lengths(self.mapping_scale, self.mapping_method, self.dataset, self.qtl_results) #ZS: For zooming into genome browser, need to pass chromosome name instead of number if self.dataset.group.species == "mouse": if self.selected_chr == 20: this_chr = "X" else: this_chr = str(self.selected_chr) elif self.dataset.group.species == "rat": if self.selected_chr == 21: this_chr = "X" else: this_chr = str(self.selected_chr) else: if self.selected_chr == 22: this_chr = "X" elif self.selected_chr == 23: this_chr = "Y" else: this_chr = str(self.selected_chr) if self.mapping_method != "gemma": if self.score_type == "LRS": significant_for_browser = self.significant / 4.61 else: significant_for_browser = self.significant self.js_data = dict( #result_score_type = self.score_type, #this_trait = self.this_trait.name, #data_set = self.dataset.name, #maf = self.maf, #manhattan_plot = self.manhattan_plot, #mapping_scale = self.mapping_scale, #chromosomes = chromosome_mb_lengths, #qtl_results = self.qtl_results, categorical_vars=self.categorical_vars, chr_lengths=chr_lengths, num_perm=self.num_perm, perm_results=self.perm_output, significant=significant_for_browser, browser_files=browser_files, selected_chr=this_chr, total_markers=total_markers) else: self.js_data = dict(chr_lengths=chr_lengths, browser_files=browser_files, selected_chr=this_chr, total_markers=total_markers)
def get_temp_data(): logger.info(request.url) temp_uuid = request.args['key'] return flask.jsonify(temp_data.TempData(temp_uuid).get_all())
def search_page(): logger.info("in search_page") if 'info_database' in request.args: logger.info("Going to sharing_info_page") template_vars = sharing_info_page() if template_vars.redirect_url: logger.info("Going to redirect") return flask.redirect(template_vars.redirect_url) else: return render_template("data_sharing.html", **template_vars.__dict__) else: result = None if USE_REDIS: with Bench("Trying Redis cache"): key = "search_results:v1:" + json.dumps(request.args, sort_keys=True) logger.debug("key is:", pf(key)) result = Redis.get(key) if result: logger.info("Redis cache hit on search results!") result = pickle.loads(result) else: logger.info("Skipping Redis cache (USE_REDIS=False)") logger.info("request.args is", request.args) the_search = search_results.SearchResultPage(request.args) result = the_search.__dict__ logger.debugf("result", result) if USE_REDIS: Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL)) Redis.expire(key, 60 * 60) if result['search_term_exists']: return render_template("search_result_page.html", **result) else: return render_template("search_error.html")
def generated_file(filename): logger.info(request.url) return send_from_directory(GENERATED_IMAGE_DIR, filename)
def wcgna_setup(): logger.info("In wgcna, request.form is:", request.form ) # We are going to get additional user input for the analysis return render_template("wgcna_setup.html", **request.form) # Display them using the template
def ctl_setup(): # We are going to get additional user input for the analysis logger.info("In ctl, request.form is:", request.form) logger.info(request.url) # Display them using the template return render_template("ctl_setup.html", **request.form)
def heatmap_page(): logger.info("In heatmap, request.form is:", pf(request.form)) start_vars = request.form temp_uuid = uuid.uuid4() traits = [trait.strip() for trait in start_vars['trait_list'].split(',')] if traits[0] != "": version = "v5" key = "heatmap:{}:".format(version) + json.dumps(start_vars, sort_keys=True) logger.info("key is:", pf(key)) with Bench("Loading cache"): result = Redis.get(key) if result: logger.info("Cache hit!!!") with Bench("Loading results"): result = pickle.loads(result) else: logger.info("Cache miss!!!") template_vars = heatmap.Heatmap(request.form, temp_uuid) template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") result = template_vars.__dict__ for item in template_vars.__dict__.keys(): logger.info(" ---**--- {}: {}".format( type(template_vars.__dict__[item]), item)) pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL) logger.info("pickled result length:", len(pickled_result)) Redis.set(key, pickled_result) Redis.expire(key, 60 * 60) with Bench("Rendering template"): rendered_template = render_template("heatmap.html", **result) else: rendered_template = render_template("empty_collection.html", **{'tool': 'Heatmap'}) return rendered_template
def ephewas(): logger.info("In ephewas, request.form is:", request.form) # We are going to get additional user input for the analysis ephewasO = ephewas_analysis.EPheWAS() # Start R, load the package and pointers and create the analysis return render_template("ephewas_analysis.html", **request.form) # Display them using the template
def marker_regression_page(): initial_start_vars = request.form logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items()) temp_uuid = initial_start_vars['temp_uuid'] wanted = ('trait_id', 'dataset', 'method', 'trimmed_markers', 'selected_chr', 'chromosomes', 'mapping_scale', 'score_type', 'suggestive', 'significant', 'num_perm', 'permCheck', 'perm_output', 'num_bootstrap', 'bootCheck', 'bootstrap_results', 'LRSCheck', 'maf', 'manhattan_plot', 'control_marker', 'control_marker_db', 'do_control', 'genofile', 'pair_scan', 'startMb', 'endMb', 'graphWidth', 'lrsMax', 'additiveCheck', 'showSNP', 'showGenes', 'viewLegend', 'haplotypeAnalystCheck', 'mapmethod_rqtl_geno', 'mapmodel_rqtl_geno') start_vars = {} for key, value in initial_start_vars.iteritems(): if key in wanted or key.startswith(('value:')): start_vars[key] = value logger.debug("Marker regression called with start_vars:", start_vars) version = "v3" key = "marker_regression:{}:".format(version) + json.dumps(start_vars, sort_keys=True) logger.info("key is:", pf(key)) with Bench("Loading cache"): result = None # Just for testing #result = Redis.get(key) #logger.info("************************ Starting result *****************") #logger.info("result is [{}]: {}".format(type(result), result)) #logger.info("************************ Ending result ********************") if result: logger.info("Cache hit!!!") with Bench("Loading results"): result = pickle.loads(result) else: logger.info("Cache miss!!!") with Bench("Total time in MarkerRegression"): template_vars = marker_regression.MarkerRegression( start_vars, temp_uuid) if template_vars.mapping_method != "gemma" and template_vars.mapping_method != "plink": template_vars.js_data = json.dumps(template_vars.js_data, default=json_default_handler, indent=" ") result = template_vars.__dict__ if result['pair_scan']: with Bench("Rendering template"): img_path = result['pair_scan_filename'] logger.info("img_path:", img_path) initial_start_vars = request.form logger.info("initial_start_vars:", initial_start_vars) imgfile = open(TEMPDIR + img_path, 'rb') imgdata = imgfile.read() imgB64 = imgdata.encode("base64") bytesarray = array.array('B', imgB64) result['pair_scan_array'] = bytesarray rendered_template = render_template("pair_scan_results.html", **result) else: #for item in template_vars.__dict__.keys(): # logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item)) gn1_template_vars = marker_regression_gn1.MarkerRegression( result).__dict__ #pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL) #logger.info("pickled result length:", len(pickled_result)) #Redis.set(key, pickled_result) #Redis.expire(key, 1*60) with Bench("Rendering template"): if (gn1_template_vars['mapping_method'] == "gemma") or (gn1_template_vars['mapping_method'] == "plink"): gn1_template_vars.pop('qtlresults', None) print("TEMPLATE KEYS:", list(gn1_template_vars.keys())) rendered_template = render_template( "marker_regression_gn1.html", **gn1_template_vars) # with Bench("Rendering template"): # if result['pair_scan'] == True: # img_path = result['pair_scan_filename'] # logger.info("img_path:", img_path) # initial_start_vars = request.form # logger.info("initial_start_vars:", initial_start_vars) # imgfile = open(TEMPDIR + '/' + img_path, 'rb') # imgdata = imgfile.read() # imgB64 = imgdata.encode("base64") # bytesarray = array.array('B', imgB64) # result['pair_scan_array'] = bytesarray # rendered_template = render_template("pair_scan_results.html", **result) # else: # rendered_template = render_template("marker_regression.html", **result) # rendered_template = render_template("marker_regression_gn1.html", **gn1_template_vars) return rendered_template
def push_data_to_es(img_uuid, img_list): logger.info("Adding data to index..")
def run_analysis(self, requestform): logger.info("Starting CTL analysis on dataset") self.trait_db_list = [ trait.strip() for trait in requestform['trait_list'].split(',') ] self.trait_db_list = [x for x in self.trait_db_list if x] logger.debug("strategy:", requestform.get("strategy")) strategy = requestform.get("strategy") logger.debug("nperm:", requestform.get("nperm")) nperm = int(requestform.get("nperm")) logger.debug("parametric:", requestform.get("parametric")) parametric = bool(requestform.get("parametric")) logger.debug("significance:", requestform.get("significance")) significance = float(requestform.get("significance")) # Get the name of the .geno file belonging to the first phenotype datasetname = self.trait_db_list[0].split(":")[1] dataset = data_set.create_dataset(datasetname) genofilelocation = locate(dataset.group.name + ".geno", "genotype") parser = genofile_parser.ConvertGenoFile(genofilelocation) parser.process_csv() logger.debug("dataset group: ", dataset.group) # Create a genotype matrix individuals = parser.individuals markers = [] markernames = [] for marker in parser.markers: markernames.append(marker["name"]) markers.append(marker["genotypes"]) genotypes = list(itertools.chain(*markers)) logger.debug( len(genotypes) / len(individuals), "==", len(parser.markers)) rGeno = r_t( ro.r.matrix(r_unlist(genotypes), nrow=len(markernames), ncol=len(individuals), dimnames=r_list(markernames, individuals), byrow=True)) # Create a phenotype matrix traits = [] for trait in self.trait_db_list: logger.debug("retrieving data for", trait) if trait != "": ts = trait.split(':') gt = create_trait(name=ts[0], dataset_name=ts[1]) gt = retrieve_sample_data(gt, dataset, individuals) for ind in individuals: if ind in list(gt.data.keys()): traits.append(gt.data[ind].value) else: traits.append("-999") rPheno = r_t( ro.r.matrix(r_as_numeric(r_unlist(traits)), nrow=len(self.trait_db_list), ncol=len(individuals), dimnames=r_list(self.trait_db_list, individuals), byrow=True)) logger.debug(rPheno) # Use a data frame to store the objects rPheno = r_data_frame(rPheno, check_names=False) rGeno = r_data_frame(rGeno, check_names=False) # Debug: Print the genotype and phenotype files to disk #r_write_table(rGeno, "~/outputGN/geno.csv") #r_write_table(rPheno, "~/outputGN/pheno.csv") # Perform the CTL scan res = self.r_CTLscan(rGeno, rPheno, strategy=strategy, nperm=nperm, parametric=parametric, nthreads=6) # Get significant interactions significant = self.r_CTLsignificant(res, significance=significance) # Create an image for output self.results = {} self.results['imgurl1'] = webqtlUtil.genRandStr("CTLline_") + ".png" self.results['imgloc1'] = GENERATED_IMAGE_DIR + self.results['imgurl1'] self.results['ctlresult'] = significant # Store the user specified parameters for the output page self.results['requestform'] = requestform # Create the lineplot r_png(self.results['imgloc1'], width=1000, height=600, type='cairo-png') self.r_lineplot(res, significance=significance) r_dev_off() # We start from 2, since R starts from 1 :) n = 2 for trait in self.trait_db_list: # Create the QTL like CTL plots self.results['imgurl' + \ str(n)] = webqtlUtil.genRandStr("CTL_") + ".png" self.results['imgloc' + str(n)] = GENERATED_IMAGE_DIR + \ self.results['imgurl' + str(n)] r_png(self.results['imgloc' + str(n)], width=1000, height=600, type='cairo-png') self.r_plotCTLobject(res, (n - 1), significance=significance, main='Phenotype ' + trait) r_dev_off() n = n + 1 # Flush any output from R sys.stdout.flush() # Create the interactive graph for cytoscape visualization (Nodes and Edges) if not isinstance(significant, ri.RNULLType): for x in range(len(significant[0])): logger.debug(significant[0][x], significant[1][x], significant[2][x]) # Debug to console # Source tsS = significant[0][x].split(':') # Target tsT = significant[2][x].split(':') # Retrieve Source info from the DB gtS = create_trait(name=tsS[0], dataset_name=tsS[1]) # Retrieve Target info from the DB gtT = create_trait(name=tsT[0], dataset_name=tsT[1]) self.addNode(gtS) self.addNode(gtT) self.addEdge(gtS, gtT, significant, x) # Update the trait name for the displayed table significant[0][x] = "{} ({})".format(gtS.symbol, gtS.name) # Update the trait name for the displayed table significant[2][x] = "{} ({})".format(gtT.symbol, gtT.name) self.elements = json.dumps(self.nodes_list + self.edges_list)
def run_rqtl_geno(vals, samples, dataset, method, model, permCheck, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, pair_scan, cofactors): ## Get pointers to some common R functions r_library = ro.r["library"] # Map the library function r_c = ro.r["c"] # Map the c function plot = ro.r["plot"] # Map the plot function png = ro.r["png"] # Map the png function dev_off = ro.r["dev.off"] # Map the device off function print(r_library("qtl")) # Load R/qtl ## Get pointers to some R/qtl functions scanone = ro.r["scanone"] # Map the scanone function scantwo = ro.r["scantwo"] # Map the scantwo function calc_genoprob = ro.r["calc.genoprob"] # Map the calc.genoprob function crossname = dataset.group.name #try: # generate_cross_from_rdata(dataset) # read_cross_from_rdata = ro.r["generate_cross_from_rdata"] # Map the local read_cross_from_rdata function # genofilelocation = locate(crossname + ".RData", "genotype/rdata") # cross_object = read_cross_from_rdata(genofilelocation) # Map the local GENOtoCSVR function #except: generate_cross_from_geno(dataset) GENOtoCSVR = ro.r["GENOtoCSVR"] # Map the local GENOtoCSVR function crossfilelocation = TMPDIR + crossname + ".cross" if dataset.group.genofile: genofilelocation = locate(dataset.group.genofile, "genotype") else: genofilelocation = locate(dataset.group.name + ".geno", "genotype") cross_object = GENOtoCSVR(genofilelocation, crossfilelocation) # TODO: Add the SEX if that is available if manhattan_plot: cross_object = calc_genoprob(cross_object) else: cross_object = calc_genoprob(cross_object, step=1, stepwidth="max") pheno_string = sanitize_rqtl_phenotype(vals) cross_object = add_phenotype(cross_object, pheno_string, "the_pheno") # Add the phenotype # Scan for QTLs marker_covars = create_marker_covariates(control_marker, cross_object) # Create the additive covariate markers if cofactors != "": cross_object, trait_covars = add_cofactors(cross_object, dataset, cofactors, samples) # Create the covariates from selected traits ro.r('all_covars <- cbind(marker_covars, trait_covars)') else: ro.r('all_covars <- marker_covars') covars = ro.r['all_covars'] if pair_scan: if do_control == "true": logger.info("Using covariate"); result_data_frame = scantwo(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method, n_cluster = 16) else: logger.info("No covariates"); result_data_frame = scantwo(cross_object, pheno = "the_pheno", model=model, method=method, n_cluster = 16) pair_scan_filename = webqtlUtil.genRandStr("scantwo_") + ".png" png(file=TEMPDIR+pair_scan_filename) plot(result_data_frame) dev_off() return process_pair_scan_results(result_data_frame) else: if do_control == "true" or cofactors != "": logger.info("Using covariate"); result_data_frame = scanone(cross_object, pheno = "the_pheno", addcovar = covars, model=model, method=method) else: logger.info("No covariates"); result_data_frame = scanone(cross_object, pheno = "the_pheno", model=model, method=method) if num_perm > 0 and permCheck == "ON": # Do permutation (if requested by user) if len(perm_strata_list) > 0: #ZS: The strata list would only be populated if "Stratified" was checked on before mapping cross_object, strata_ob = add_perm_strata(cross_object, perm_strata_list) if do_control == "true" or cofactors != "": perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = int(num_perm), perm_strata = strata_ob, model=model, method=method) else: perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, perm_strata = strata_ob, model=model, method=method) else: if do_control == "true" or cofactors != "": perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", addcovar = covars, n_perm = int(num_perm), model=model, method=method) else: perm_data_frame = scanone(cross_object, pheno_col = "the_pheno", n_perm = num_perm, model=model, method=method) perm_output, suggestive, significant = process_rqtl_perm_results(num_perm, perm_data_frame) # Functions that sets the thresholds for the webinterface the_scale = check_mapping_scale(genofilelocation) return perm_output, suggestive, significant, process_rqtl_results(result_data_frame, dataset.group.species), the_scale else: the_scale = check_mapping_scale(genofilelocation) return process_rqtl_results(result_data_frame, dataset.group.species), the_scale
def __init__(self, start_vars, temp_uuid): helper_functions.get_species_dataset_trait(self, start_vars) self.temp_uuid = temp_uuid #needed to pass temp_uuid to gn1 mapping code (marker_regression_gn1.py) self.json_data = {} self.json_data['lodnames'] = ['lod.hk'] self.samples = [] # Want only ones with values self.vals = [] all_samples_ordered = self.dataset.group.all_samples_ordered() primary_sample_names = list(all_samples_ordered) for sample in self.dataset.group.samplelist: # sample is actually the name of an individual in_trait_data = False for item in self.this_trait.data: if self.this_trait.data[item].name == sample: value = start_vars['value:' + self.this_trait.data[item].name] self.samples.append(self.this_trait.data[item].name) self.vals.append(value) in_trait_data = True break if not in_trait_data: value = start_vars.get('value:' + sample) if value: self.samples.append(sample) self.vals.append(value) self.mapping_method = start_vars['method'] if start_vars['manhattan_plot'] == "True": self.manhattan_plot = True else: self.manhattan_plot = False self.maf = start_vars['maf'] # Minor allele frequency self.suggestive = "" self.significant = "" self.pair_scan = False # Initializing this since it is checked in views to determine which template to use self.score_type = "LRS" #ZS: LRS or LOD self.mapping_scale = "physic" self.num_perm = 0 self.perm_output = [] self.bootstrap_results = [] #ZS: This is passed to GN1 code for single chr mapping self.selected_chr = -1 if "selected_chr" in start_vars: if int(start_vars['selected_chr']) != -1: #ZS: Needs to be -1 if showing full map; there's probably a better way to fix this self.selected_chr = int(start_vars['selected_chr']) + 1 else: self.selected_chr = int(start_vars['selected_chr']) if "startMb" in start_vars: self.startMb = start_vars['startMb'] if "endMb" in start_vars: self.endMb = start_vars['endMb'] if "graphWidth" in start_vars: self.graphWidth = start_vars['graphWidth'] if "lrsMax" in start_vars: self.lrsMax = start_vars['lrsMax'] if "haplotypeAnalystCheck" in start_vars: self.haplotypeAnalystCheck = start_vars['haplotypeAnalystCheck'] if "startMb" in start_vars: #ZS: This is to ensure showGenes, Legend, etc are checked the first time you open the mapping page, since startMb will only not be set during the first load if "permCheck" in start_vars: self.permCheck = "ON" else: self.permCheck = False self.num_perm = int(start_vars['num_perm']) self.LRSCheck = start_vars['LRSCheck'] if "showSNP" in start_vars: self.showSNP = start_vars['showSNP'] else: self.showSNP = False if "showGenes" in start_vars: self.showGenes = start_vars['showGenes'] else: self.showGenes = False if "viewLegend" in start_vars: self.viewLegend = start_vars['viewLegend'] else: self.viewLegend = False else: try: if int(start_vars['num_perm']) > 0: self.num_perm = int(start_vars['num_perm']) except: self.num_perm = 0 if self.num_perm > 0: self.permCheck = "ON" else: self.permCheck = False self.showSNP = "ON" self.showGenes = "ON" self.viewLegend = "ON" self.dataset.group.get_markers() if self.mapping_method == "gemma": self.score_type = "-log(p)" self.manhattan_plot = True with Bench("Running GEMMA"): marker_obs = gemma_mapping.run_gemma(self.dataset, self.samples, self.vals) results = marker_obs elif self.mapping_method == "rqtl_plink": results = self.run_rqtl_plink() elif self.mapping_method == "rqtl_geno": self.score_type = "LOD" self.mapping_scale = "morgan" self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] self.dataset.group.genofile = start_vars['genofile'] self.method = start_vars['mapmethod_rqtl_geno'] self.model = start_vars['mapmodel_rqtl_geno'] if start_vars['pair_scan'] == "true": self.pair_scan = True if self.permCheck and self.num_perm > 0: self.perm_output, self.suggestive, self.significant, results = rqtl_mapping.run_rqtl_geno(self.vals, self.dataset, self.method, self.model, self.permCheck, self.num_perm, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan) else: results = rqtl_mapping.run_rqtl_geno(self.vals, self.dataset, self.method, self.model, self.permCheck, self.num_perm, self.do_control, self.control_marker, self.manhattan_plot, self.pair_scan) elif self.mapping_method == "reaper": if "startMb" in start_vars: #ZS: Check if first time page loaded, so it can default to ON if "additiveCheck" in start_vars: self.additiveCheck = start_vars['additiveCheck'] else: self.additiveCheck = False if "bootCheck" in start_vars: self.bootCheck = "ON" else: self.bootCheck = False self.num_bootstrap = int(start_vars['num_bootstrap']) else: self.additiveCheck = "ON" try: if int(start_vars['num_bootstrap']) > 0: self.bootCheck = "ON" self.num_bootstrap = int(start_vars['num_bootstrap']) else: self.bootCheck = False self.num_bootstrap = 0 except: self.bootCheck = False self.num_bootstrap = 0 self.control_marker = start_vars['control_marker'] self.do_control = start_vars['do_control'] self.dataset.group.genofile = start_vars['genofile'] logger.info("Running qtlreaper") results, self.json_data, self.perm_output, self.suggestive, self.significant, self.bootstrap_results = qtlreaper_mapping.gen_reaper_results(self.this_trait, self.dataset, self.samples, self.json_data, self.num_perm, self.bootCheck, self.num_bootstrap, self.do_control, self.control_marker, self.manhattan_plot) elif self.mapping_method == "plink": self.score_type = "-log(p)" self.manhattan_plot = True results = plink_mapping.run_plink(self.this_trait, self.dataset, self.species, self.vals, self.maf) #results = self.run_plink() elif self.mapping_method == "pylmm": logger.debug("RUNNING PYLMM") self.dataset.group.genofile = start_vars['genofile'] if self.num_perm > 0: self.run_permutations(str(temp_uuid)) results = self.gen_data(str(temp_uuid)) else: logger.debug("RUNNING NOTHING") if self.pair_scan == True: self.qtl_results = [] highest_chr = 1 #This is needed in order to convert the highest chr to X/Y for marker in results: if marker['chr1'] > 0 or marker['chr1'] == "X" or marker['chr1'] == "X/Y": if marker['chr1'] > highest_chr or marker['chr1'] == "X" or marker['chr1'] == "X/Y": highest_chr = marker['chr1'] if 'lod_score' in marker.keys(): self.qtl_results.append(marker) self.trimmed_markers = results for qtl in enumerate(self.qtl_results): self.json_data['chr1'].append(str(qtl['chr1'])) self.json_data['chr2'].append(str(qtl['chr2'])) self.json_data['Mb'].append(qtl['Mb']) self.json_data['markernames'].append(qtl['name']) self.js_data = dict( json_data = self.json_data, this_trait = self.this_trait.name, data_set = self.dataset.name, maf = self.maf, manhattan_plot = self.manhattan_plot, mapping_scale = self.mapping_scale, qtl_results = self.qtl_results ) else: self.cutoff = 2 self.qtl_results = [] highest_chr = 1 #This is needed in order to convert the highest chr to X/Y for marker in results: if marker['chr'] > 0 or marker['chr'] == "X" or marker['chr'] == "X/Y": if marker['chr'] > highest_chr or marker['chr'] == "X" or marker['chr'] == "X/Y": highest_chr = marker['chr'] if ('lod_score' in marker.keys()) or ('lrs_value' in marker.keys()): self.qtl_results.append(marker) self.trimmed_markers = trim_markers_for_table(results) if self.mapping_method != "gemma": self.json_data['chr'] = [] self.json_data['pos'] = [] self.json_data['lod.hk'] = [] self.json_data['markernames'] = [] self.json_data['suggestive'] = self.suggestive self.json_data['significant'] = self.significant #Need to convert the QTL objects that qtl reaper returns into a json serializable dictionary for index, qtl in enumerate(self.qtl_results): #if index<40: # logger.debug("lod score is:", qtl['lod_score']) if qtl['chr'] == highest_chr and highest_chr != "X" and highest_chr != "X/Y": #logger.debug("changing to X") self.json_data['chr'].append("X") else: self.json_data['chr'].append(str(qtl['chr'])) self.json_data['pos'].append(qtl['Mb']) if 'lrs_value' in qtl.keys(): self.json_data['lod.hk'].append(str(qtl['lrs_value'])) else: self.json_data['lod.hk'].append(str(qtl['lod_score'])) self.json_data['markernames'].append(qtl['name']) #Get chromosome lengths for drawing the interval map plot chromosome_mb_lengths = {} self.json_data['chrnames'] = [] for key in self.species.chromosomes.chromosomes.keys(): self.json_data['chrnames'].append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length]) chromosome_mb_lengths[key] = self.species.chromosomes.chromosomes[key].mb_length # logger.debug("json_data:", self.json_data) self.js_data = dict( result_score_type = self.score_type, json_data = self.json_data, this_trait = self.this_trait.name, data_set = self.dataset.name, maf = self.maf, manhattan_plot = self.manhattan_plot, mapping_scale = self.mapping_scale, chromosomes = chromosome_mb_lengths, qtl_results = self.qtl_results, num_perm = self.num_perm, perm_results = self.perm_output, )
and snap_id <= %s and parsing_schema_name= '%s' group by sql_id) sqt, dba_hist_sqltext st where st.sql_id(+) = sqt.sql_id and st.dbid(+) = %s order by nvl(sqt.bget, -1) desc, sqt.sql_id) where rownum < 65 and (rownum <= 10 or logical_read_Per_Total > 1) ''' % (beg_snap,end_snap,dbid,dbid,inst_num,inst_num,dbid,inst_num,beg_snap,end_snap,owner,dbid) engine = create_engine(DB_CONN_STRING_1,poolclass=NullPool) return pd.read_sql_query(sql,engine) if __name__ == '__main__': logger.info('开始') #加载有卡数据 delDirectory() dbid = getDBInfo('1',-1,-1,-1).values inst_num = getDBInfo('2',-1,-1,-1).values if not dbid[0][0] or dbid[0][0] == 'null' or not inst_num[0][0] or inst_num[0][0] == 'null' or inst_num[0][0] == 0: logger.info('获取的dbid失败:%s' % dbid[0][0]) logger.info('获取的inst_num失败:%s' % inst_num[0][0]) else: logger.info('成功获取dbid:%s' % dbid[0][0]) logger.info('成功获取inst_num:%s' % inst_num[0][0]) end_snap = getDBInfo('3',dbid[0][0],inst_num[0][0],-1).values beg_snap = getDBInfo('4',dbid[0][0],inst_num[0][0],end_snap[0][0]).values logger.info('成功获取beg_snap:%s' % beg_snap[0][0])