Example #1
0
def sharing_info_page():
    """Info page displayed when the user clicks the "Info" button next to the dataset selection"""
    logger.info("In sharing_info_page")
    logger.error(request.url)
    fd = webqtlFormData.webqtlFormData(request.args)
    template_vars = SharingInfoPage.SharingInfoPage(fd)
    return template_vars
Example #2
0
def ctl_results():
    logger.info("In ctl, request.form is:", request.form)
    logger.error(request.url)
    ctl = ctl_analysis.CTL()                                  # Start R, load the package and pointers and create the analysis
    ctlA = ctl.run_analysis(request.form)                     # Start the analysis, a ctlA object should be a separate long running thread
    result = ctl.process_results(ctlA)                        # After the analysis is finished store the result
    return render_template("ctl_results.html", **result)      # Display them using the template
Example #3
0
def wcgna_results():
    logger.info("In wgcna, request.form is:", request.form)
    logger.error(request.url)
    wgcna = wgcna_analysis.WGCNA()                                # Start R, load the package and pointers and create the analysis
    wgcnaA = wgcna.run_analysis(request.form)                     # Start the analysis, a wgcnaA object should be a separate long running thread
    result = wgcna.process_results(wgcnaA)                        # After the analysis is finished store the result
    return render_template("wgcna_results.html", **result)        # Display them using the template
Example #4
0
def corr_scatter_plot_page():
    logger.error(request.url)
    template_vars = corr_scatter_plot.CorrScatterPlot(request.args)
    template_vars.js_data = json.dumps(template_vars.js_data,
                                       default=json_default_handler,
                                       indent="   ")
    return render_template("corr_scatterplot.html", **template_vars.__dict__)
Example #5
0
def parse_inputs(arguments):
    '''
        in this method we are: 
            1. fetching command line arguments like iteration count value, 
                file path and list of field name and data type;
            2. splitting arguments to fetch value of count, path, field name and data type
            3. storing the values in correct variables.
    '''
    try:
        lists_after_splitting_arguments = []
        count = 100
        path = None
        field_and_data_type_list = []

        # splitting arguments after main.py to fetch value of count and path in list
        for argument in arguments[1:]:
            lists_after_splitting_arguments.append(argument.split("="))

        # assigning value to count, path and list of field name and data type
        for list_of_argument in lists_after_splitting_arguments:
            if list_of_argument[0] == 'count':
                count = int(list_of_argument[1]) if str(
                    list_of_argument[1]).isdigit() else 100
            elif list_of_argument[0] == 'path':
                path = list_of_argument[1]
            else:
                field_and_data_type_list += list_of_argument

        return count, path, field_and_data_type_list

    except Exception as e:
        logger.error(e)
    def read_xls_data(self):
        '''
            in this method we are reading xls data from given location in the path variable.
            fetch the field name and its data type from the raw string and converting it to list of tuples.
            basically, converting the raw data to processed data.
        '''
        try:
            if self.path:
                logger.info("Reading data from >>> {}".format(self.path))
                raw_data = pd.read_excel(self.path, usecols="A:B")
                processed_data = []

                field_name = raw_data.get('fieldname')
                data_type = raw_data.get('dataType')

                # fetching field name and data type from raw data
                if str(field_name) != 'None' and str(data_type) != 'None' and field_name.size == data_type.size:
                    field_name_list = raw_data['fieldname'].tolist()
                    data_type_list = raw_data['dataType'].tolist()

                    for position in range(0, len(field_name_list)):
                        # appending column name with it's data type in a tuple format
                        processed_data.append((field_name_list[position], data_type_list[position]))

                else:
                    logger.info("Invalid input excel file")

                return processed_data

            else:
                return []

        except Exception as e:
            logger.error(e)
Example #7
0
	def open(self):
		try:
			self._conn = pymysql.connect(host=self._host, port=self._port, user=self._user, passwd=self._passwd,
										 db=self._database, charset='utf8', autocommit=True)
		except Exception as exception:
			self._conn=None
			logger.error("Failed to connect to the database: {0!s}".format(exception))
Example #8
0
def saveToExcel(df):
    df_cols = []
    import re
    now = datetime.datetime.now()
    m=re.match('.*(\d+)',CONF_ORDER_TIME)
    days = 1
    if m:
        days = int(m.groups()[0])
    oneday = datetime.timedelta(days=days)
    target_date = now - oneday
    target_date = target_date.strftime("%Y%m%d")
    if not os.path.exists('%s/report' % CUR_PATH):
        os.mkdir('%s/report' % CUR_PATH)
    fileName = '%s/report/eqdetail-%s.xlsx' % (CUR_PATH,target_date)
    report_col_dict = {} 
    for k, v in CONF_EQDETAIL_DICT.items():
        report_col_dict[k.lower()] = v    
    for k in df.columns:
        if report_col_dict.has_key(k):
            df_cols.append(report_col_dict[k])
        else:
            #df_cols.append('unTitled')
            df_cols.append('unTitled-%s' % k)
            logger.error('异常,标题字段不存在,字段名称:%s' % k)
    df.columns = df_cols
    excel = df
    excel.columns = df_cols
    # print fileName
    if os.path.exists(fileName):
        os.remove(fileName)
    excel.to_excel(fileName)
Example #9
0
def create_temp_trait():
    logger.error(request.url)
    print("REQUEST.FORM:", request.form)
    #template_vars = submit_trait.SubmitTrait(request.form)

    doc = docs.Docs("links")
    return render_template("links.html", **doc.__dict__)
Example #10
0
def export():
    logger.info("request.form:", request.form)
    logger.error(request.url)
    svg_xml = request.form.get("data", "Invalid data")
    filename = request.form.get("filename", "manhattan_plot_snp")
    response = Response(svg_xml, mimetype="image/svg+xml")
    response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
    return response
Example #11
0
def gsearchact():
    logger.error(request.url)
    result = gsearch.GSearch(request.args).__dict__
    type = request.args['type']
    if type == "gene":
        return render_template("gsearch_gene.html", **result)
    elif type == "phenotype":
        return render_template("gsearch_pheno.html", **result)
Example #12
0
def export_traits_csv():
    """CSV file consisting of the traits from the search result page"""
    logger.info("In export_traits_csv")
    logger.info("request.form:", request.form)
    logger.error(request.url)
    csv_data = export_traits.export_search_results_csv(request.form)

    return Response(csv_data,
                    mimetype='text/csv',
                    headers={"Content-Disposition":"attachment;filename=trait_list.csv"})
Example #13
0
def export_mapping_results():
    logger.info("request.form:", request.form)
    logger.error(request.url)
    file_path = request.form.get("results_path")
    results_csv = open(file_path, "r").read()
    response = Response(results_csv,
                        mimetype='text/csv',
                        headers={"Content-Disposition":"attachment;filename=mapping_results.csv"})

    return response
Example #14
0
def loading_page():
    logger.error(request.url)
    initial_start_vars = request.form
    logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items())
    #temp_uuid = initial_start_vars['temp_uuid']
    wanted = (
        'temp_uuid',
        'trait_id',
        'dataset',
        'method',
        'trimmed_markers',
        'selected_chr',
        'chromosomes',
        'mapping_scale',
        'score_type',
        'suggestive',
        'significant',
        'num_perm',
        'permCheck',
        'perm_output',
        'num_bootstrap',
        'bootCheck',
        'bootstrap_results',
        'LRSCheck',
        'covariates',
        'maf',
        'use_loco',
        'manhattan_plot',
        'control_marker',
        'control_marker_db',
        'do_control',
        'genofile',
        'pair_scan',
        'startMb',
        'endMb',
        'graphWidth',
        'lrsMax',
        'additiveCheck',
        'showSNP',
        'showGenes',
        'viewLegend',
        'haplotypeAnalystCheck',
        'mapmethod_rqtl_geno',
        'mapmodel_rqtl_geno'
    )
    start_vars_container = {}
    start_vars = {}
    for key, value in initial_start_vars.iteritems():
        if key in wanted or key.startswith(('value:')):
            start_vars[key] = value

    start_vars_container['start_vars'] = start_vars
    rendered_template = render_template("loading.html", **start_vars_container)

    return rendered_template
Example #15
0
def get_face_group_data():
    try:
        table = dynamodb.Table(DYNAMO_TBL_FACEGROUPS)
        response = table.scan()
    except Exception as e:
        logger.error(
            'Error occurred fetching face data from dynammodb: {}'.format(
                str(e)))
        raise Exception('Error occurred fetching face data from dynammodb:', e)

    return response
def get_result_from_db():
    try:
        table = dynamodb.Table('Results')
        response = table.scan()
    except Exception as e:
        logger.error(
            'Error occurred fetching result data from dynammodb: {}'.format(
                str(e)))
        raise Exception('Error occurred fetching result data from dynammodb:',
                        e)

    return response
Example #17
0
def tmp_page(img_path):
    logger.info("In tmp_page")
    logger.info("img_path:", img_path)
    logger.error(request.url)
    initial_start_vars = request.form
    logger.info("initial_start_vars:", initial_start_vars)
    imgfile = open(GENERATED_IMAGE_DIR + img_path, 'rb')
    imgdata = imgfile.read()
    imgB64 = imgdata.encode("base64")
    bytesarray = array.array('B', imgB64)
    return render_template("show_image.html",
                            img_base64 = bytesarray )
Example #18
0
def export_pdf():
    import cairosvg
    logger.info("request.form:", request.form)
    logger.error(request.url)
    svg_xml = request.form.get("data", "Invalid data")
    logger.info("svg_xml:", svg_xml)
    filename = request.form.get("filename", "interval_map_pdf")
    filepath = GENERATED_IMAGE_DIR+filename
    pdf_file = cairosvg.svg2pdf(bytestring=svg_xml)
    response = Response(pdf_file, mimetype="application/pdf")
    response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
    return response
 def __init__(self, iteration_count=100, file_path=None):
     '''
         initialization method to set count and path parameters
     '''
     try:
         logger.info("Script initiated, starting process....")
         self.iteration_count = iteration_count
         if file_path and os.path.exists(file_path):
             self.path = file_path
         else:
             self.path = None
     except Exception as e:
         logger.error(e)
Example #20
0
def show_trait_page():
    logger.error(request.url)
    template_vars = show_trait.ShowTrait(request.args)
    #logger.info("js_data before dump:", template_vars.js_data)
    template_vars.js_data = json.dumps(template_vars.js_data,
                                       default=json_default_handler,
                                       indent="   ")
    # Sorting the keys messes up the ordered dictionary, so don't do that
                                       #sort_keys=True)

    #logger.info("js_data after dump:", template_vars.js_data)
    #logger.info("show_trait template_vars:", pf(template_vars.__dict__))
    return render_template("show_trait.html", **template_vars.__dict__)
Example #21
0
def network_graph_page():
    logger.info("In network_graph, request.form is:", pf(request.form))
    logger.error(request.url)
    start_vars = request.form
    traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
    if traits[0] != "":
        template_vars = network_graph.NetworkGraph(start_vars)
        template_vars.js_data = json.dumps(template_vars.js_data,
                                           default=json_default_handler,
                                           indent="   ")

        return render_template("network_graph.html", **template_vars.__dict__)
    else:
        return render_template("empty_collection.html", **{'tool':'Network Graph'})
Example #22
0
def index_page():
    logger.info("Sending index_page")
    logger.error(request.url)
    params = request.args
    if 'import_collections' in params:
        import_collections = params['import_collections']
        if import_collections == "true":
            g.cookie_session.import_traits_to_user()
    if USE_GN_SERVER:
        # The menu is generated using GN_SERVER
        return render_template("index_page.html", gn_server_url = GN_SERVER_URL, version=GN_VERSION)
    else:
        # Old style static menu (OBSOLETE)
        return render_template("index_page_orig.html", version=GN_VERSION)
Example #23
0
def corr_matrix_page():
    logger.info("In corr_matrix, request.form is:", pf(request.form))
    logger.error(request.url)

    start_vars = request.form
    traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
    if traits[0] != "":
        template_vars = show_corr_matrix.CorrelationMatrix(start_vars)
        template_vars.js_data = json.dumps(template_vars.js_data,
                                           default=json_default_handler,
                                           indent="   ")

        return render_template("correlation_matrix.html", **template_vars.__dict__)
    else:
        return render_template("empty_collection.html", **{'tool':'Correlation Matrix'})
Example #24
0
def heatmap_page():
    logger.info("In heatmap, request.form is:", pf(request.form))
    logger.error(request.url)

    start_vars = request.form
    temp_uuid = uuid.uuid4()

    traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
    if traits[0] != "":
        version = "v5"
        key = "heatmap:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
        logger.info("key is:", pf(key))
        with Bench("Loading cache"):
            result = Redis.get(key)

        if result:
            logger.info("Cache hit!!!")
            with Bench("Loading results"):
                result = pickle.loads(result)

        else:
            logger.info("Cache miss!!!")

            template_vars = heatmap.Heatmap(request.form, temp_uuid)
            template_vars.js_data = json.dumps(template_vars.js_data,
                                               default=json_default_handler,
                                               indent="   ")

            result = template_vars.__dict__

            for item in template_vars.__dict__.keys():
                logger.info("  ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))

            pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
            logger.info("pickled result length:", len(pickled_result))
            Redis.set(key, pickled_result)
            Redis.expire(key, 60*60)

        with Bench("Rendering template"):
            rendered_template = render_template("heatmap.html", **result)

    else:
        rendered_template = render_template("empty_collection.html", **{'tool':'Heatmap'})

    return rendered_template
def save_face_group_data(img_uuid, image_list):
    try:
        response = dynamo_client.put_item(
            Item={
                'uuid': {
                    'S': img_uuid,
                },
                'image_list': {
                    'S': str(image_list),
                },
            },
            ReturnConsumedCapacity='TOTAL',
            TableName=DYNAMO_TBL_FACEGROUPS,
        )
    except Exception as e:
        logger.error('Error occurred while storing face data: {}'.format(
            str(e)))
        raise Exception('Error occurred while storing face data:', e)
Example #26
0
def export_trait_csv():
    """CSV file consisting of the sample data from the trait data and analysis page"""
    logger.info("In export_trait_csv")
    logger.info("request.form:", request.form)
    logger.error(request.url)
    sample_data = export_trait_data.export_sample_table(request.form)

    logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))

    buff = StringIO.StringIO()
    writer = csv.writer(buff)
    for row in sample_data:
        writer.writerow(row)
    csv_data = buff.getvalue()
    buff.close()

    return Response(csv_data,
                    mimetype='text/csv',
                    headers={"Content-Disposition":"attachment;filename=sample_data.csv"})
Example #27
0
def getSubDate(date_time, date_type):
    '''日期切割''' 
    if not date_time:
        return '-'
    if str(date_time) == 'nan':
        return '-'
    try:        
        if date_type == 'year':
            return date_time[:4]
        if date_type == 'month':
            return date_time[4:6]
        if date_type == 'day':
            return date_time[6:8]
        if date_type == 'hour':
            return date_time[8:10]
    except Exception as e:
        logger.error('异常 格式化时间失败,异常信息: %s' % str(e))
        logger.error('输入参数 date_time:%s,date_type:%s' % (date_time, date_type))
        return '无效日期,无法切割'        
    return date_time
Example #28
0
def handle_generic_exceptions(e):
    import werkzeug
    err_msg = str(e)
    now = datetime.datetime.utcnow()
    time_str = now.strftime('%l:%M%p UTC %b %d, %Y')
    # get the stack trace and send it to the logger
    exc_type, exc_value, exc_traceback = sys.exc_info()
    formatted_lines = {
        f"{request.url} ({time_str}) "
        f" {traceback.format_exc().splitlines()}"
    }

    _message_templates = {
        werkzeug.exceptions.NotFound: ("404: Not Found: "
                                       f"{time_str}: {request.url}"),
        werkzeug.exceptions.BadRequest: ("400: Bad Request: "
                                         f"{time_str}: {request.url}"),
        werkzeug.exceptions.RequestTimeout: ("408: Request Timeout: "
                                             f"{time_str}: {request.url}")
    }
    # Default to the lengthy stack trace!
    logger.error(_message_templates.get(exc_type, formatted_lines))
    # Handle random animations
    # Use a cookie to have one animation on refresh
    animation = request.cookies.get(err_msg[:32])
    if not animation:
        animation = random.choice([
            fn for fn in os.listdir("./wqflask/static/gif/error")
            if fn.endswith(".gif")
        ])

    resp = make_response(
        render_template("error.html",
                        message=err_msg,
                        stack=formatted_lines,
                        error_image=animation,
                        version=GN_VERSION))

    # logger.error("Set cookie %s with %s" % (err_msg, animation))
    resp.set_cookie(err_msg[:32], animation)
    return resp
def compare_faces(src_image, target_image):
    # logger.info("src_image: ", src_image, "target_image: ", target_image)
    try:
        response = rek_client.compare_faces(
            SourceImage={'S3Object': {
                'Bucket': s3_bucket,
                'Name': src_image
            }},
            TargetImage={
                'S3Object': {
                    'Bucket': s3_bucket,
                    'Name': target_image
                }
            })
        return response
    except Exception as e:
        logger.error(
            'Error occurred while comparing faces using rekognition : {}'.
            format(str(e)))
        raise Exception(
            'Error occurred while comparing faces using rekognition: ', e)
Example #30
0
def handle_bad_request(e):
    err_msg = str(e)
    logger.error(err_msg)
    logger.error(request.url)
    # get the stack trace and send it to the logger
    exc_type, exc_value, exc_traceback = sys.exc_info()
    logger.error(traceback.format_exc())
    now = datetime.datetime.utcnow()
    time_str = now.strftime('%l:%M%p UTC %b %d, %Y')
    formatted_lines = [request.url + " (" + time_str + ")"
                       ] + traceback.format_exc().splitlines()

    # Handle random animations
    # Use a cookie to have one animation on refresh
    animation = request.cookies.get(err_msg[:32])
    if not animation:
        list = [
            fn for fn in os.listdir("./wqflask/static/gif/error")
            if fn.endswith(".gif")
        ]
        animation = random.choice(list)

    resp = make_response(
        render_template("error.html",
                        message=err_msg,
                        stack=formatted_lines,
                        error_image=animation,
                        version=GN_VERSION))

    # logger.error("Set cookie %s with %s" % (err_msg, animation))
    resp.set_cookie(err_msg[:32], animation)
    return resp
Example #31
0
def export_perm_data():
    """CSV file consisting of the permutation data for the mapping results"""
    logger.error(request.url)
    num_perm = float(request.form['num_perm'])
    perm_data = json.loads(request.form['perm_results'])

    buff = StringIO.StringIO()
    writer = csv.writer(buff)
    writer.writerow(["Suggestive LRS (p=0.63) = " + str(perm_data[int(num_perm*0.37-1)])])
    writer.writerow(["Significant LRS (p=0.05) = " + str(perm_data[int(num_perm*0.95-1)])])
    writer.writerow(["Highly Significant LRS (p=0.01) = " + str(perm_data[int(num_perm*0.99-1)])])
    writer.writerow("")
    writer.writerow([str(num_perm) + " Permutations"])
    writer.writerow("")
    for item in perm_data:
        writer.writerow([item])
    csv_data = buff.getvalue()
    buff.close()

    return Response(csv_data,
                    mimetype='text/csv',
                    headers={"Content-Disposition":"attachment;filename=perm_data.csv"})
Example #32
0
def search_page():
    logger.info("in search_page")
    logger.error(request.url)
    if 'info_database' in request.args:
        logger.info("Going to sharing_info_page")
        template_vars = sharing_info_page()
        if template_vars.redirect_url:
            logger.info("Going to redirect")
            return flask.redirect(template_vars.redirect_url)
        else:
            return render_template("data_sharing.html", **template_vars.__dict__)
    else:
        result = None
        if USE_REDIS:
            with Bench("Trying Redis cache"):
                key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
                logger.debug("key is:", pf(key))
                result = Redis.get(key)
                if result:
                    logger.info("Redis cache hit on search results!")
                    result = pickle.loads(result)
        else:
            logger.info("Skipping Redis cache (USE_REDIS=False)")

        logger.info("request.args is", request.args)
        the_search = search_results.SearchResultPage(request.args)
        result = the_search.__dict__

        logger.debugf("result", result)

        if USE_REDIS:
            Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
            Redis.expire(key, 60*60)

        if result['search_term_exists']:
            return render_template("search_result_page.html", **result)
        else:
            return render_template("search_error.html")
Example #33
0
def export_trait_excel():
    """Excel file consisting of the sample data from the trait data and analysis page"""
    logger.info("In export_trait_excel")
    logger.info("request.form:", request.form)
    logger.error(request.url)
    sample_data = export_trait_data.export_sample_table(request.form)

    logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))

    buff = StringIO.StringIO()
    workbook = xlsxwriter.Workbook(buff, {'in_memory': True})
    worksheet = workbook.add_worksheet()
    for i, row in enumerate(sample_data):
        worksheet.write(i, 0, row[0])
        worksheet.write(i, 1, row[1])
        if len(row) > 2:
            worksheet.write(i, 2, row[2])
    workbook.close()
    excel_data = buff.getvalue()
    buff.close()

    return Response(excel_data,
                    mimetype='application/vnd.ms-excel',
                    headers={"Content-Disposition":"attachment;filename=sample_data.xlsx"})
Example #34
0
def handle_bad_request(e):
    err_msg = str(e)
    logger.error(err_msg)
    logger.error(request.url)
    # get the stack trace and send it to the logger
    exc_type, exc_value, exc_traceback = sys.exc_info()
    logger.error(traceback.format_exc())
    now = datetime.datetime.utcnow()
    time_str = now.strftime('%l:%M%p UTC %b %d, %Y')
    formatted_lines = [request.url + " ("+time_str+")"]+traceback.format_exc().splitlines()

    # Handle random animations
    # Use a cookie to have one animation on refresh
    animation = request.cookies.get(err_msg[:32])
    if not animation:
        list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ]
        animation = random.choice(list)

    resp = make_response(render_template("error.html",message=err_msg,stack=formatted_lines,error_image=animation,version=GN_VERSION))

    # logger.error("Set cookie %s with %s" % (err_msg, animation))
    resp.set_cookie(err_msg[:32],animation)
    return resp
Example #35
0
def submit_trait_form():
    logger.error(request.url)
    species_and_groups = get_species_groups()
    return render_template("submit_trait.html", **{'species_and_groups' : species_and_groups, 'gn_server_url' : GN_SERVER_URL, 'version' : GN_VERSION})
Example #36
0
def ctl_setup():
    logger.info("In ctl, request.form is:", request.form)             # We are going to get additional user input for the analysis
    logger.error(request.url)
    return render_template("ctl_setup.html", **request.form)          # Display them using the template
Example #37
0
def get_temp_data():
    logger.error(request.url)
    temp_uuid = request.args['key']
    return flask.jsonify(temp_data.TempData(temp_uuid).get_all())
Example #38
0
def docedit():
    logger.error(request.url)
    doc = docs.Docs(request.args['entry'])
    return render_template("docedit.html", **doc.__dict__)
Example #39
0
def generated_file(filename):
    logger.error(request.url)
    return send_from_directory(GENERATED_IMAGE_DIR,filename)
Example #40
0
def corr_compute_page():
    logger.info("In corr_compute, request.form is:", pf(request.form))
    logger.error(request.url)
    #fd = webqtlFormData.webqtlFormData(request.form)
    template_vars = show_corr_results.CorrelationResults(request.form)
    return render_template("correlation_page.html", **template_vars.__dict__)
Example #41
0
def marker_regression_page():
    initial_start_vars = request.form
    logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items())
    logger.error(request.url)
    temp_uuid = initial_start_vars['temp_uuid']
    wanted = (
        'trait_id',
        'dataset',
        'method',
        'mapping_results_path',
        'trimmed_markers',
        'selected_chr',
        'chromosomes',
        'mapping_scale',
        'plotScale',
        'score_type',
        'suggestive',
        'significant',
        'num_perm',
        'permCheck',
        'perm_output',
        'num_bootstrap',
        'bootCheck',
        'bootstrap_results',
        'LRSCheck',
        'covariates',
        'maf',
        'use_loco',
        'manhattan_plot',
        'control_marker',
        'control_marker_db',
        'do_control',
        'genofile',
        'pair_scan',
        'startMb',
        'endMb',
        'graphWidth',
        'lrsMax',
        'additiveCheck',
        'showSNP',
        'showGenes',
        'viewLegend',
        'haplotypeAnalystCheck',
        'mapmethod_rqtl_geno',
        'mapmodel_rqtl_geno'
    )
    start_vars = {}
    for key, value in initial_start_vars.iteritems():
        if key in wanted or key.startswith(('value:')):
            start_vars[key] = value
    logger.debug("Marker regression called with start_vars:", start_vars)

    version = "v3"
    key = "marker_regression:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
    logger.info("key is:", pf(key))
    with Bench("Loading cache"):
        result = None # Just for testing
        #result = Redis.get(key)

    #logger.info("************************ Starting result *****************")
    #logger.info("result is [{}]: {}".format(type(result), result))
    #logger.info("************************ Ending result ********************")

    if result:
        logger.info("Cache hit!!!")
        with Bench("Loading results"):
            result = pickle.loads(result)
    else:
        logger.info("Cache miss!!!")
        with Bench("Total time in MarkerRegression"):
            template_vars = marker_regression.MarkerRegression(start_vars, temp_uuid)

        if template_vars.mapping_method != "gemma" and template_vars.mapping_method != "plink":
            template_vars.js_data = json.dumps(template_vars.js_data,
                                               default=json_default_handler,
                                               indent="   ")

        json_filename = webqtlUtil.genRandStr("") + ".json"
        with open(GENERATED_TEXT_DIR + "/" + json_filename, "wb") as json_file:
            json_file.write(template_vars.js_data)

        csv_filename = webqtlUtil.genRandStr("") + ".csv"
        with open(GENERATED_TEXT_DIR + "/" + csv_filename, "wb") as csv_file:
            writer = csv.writer(csv_file)
            writer.writerow(("Locus", "Chr", "Mb", "LOD"))
            for (row) in qtl_results:
                score = row["lod_score"] if "lod_score" in row else row["lrs_value"]
                writer.writerow((row["name"], row["chr"], row["Mb"], score))


        result = template_vars.__dict__

        if result['pair_scan']:
            with Bench("Rendering template"):
                img_path = result['pair_scan_filename']
                logger.info("img_path:", img_path)
                initial_start_vars = request.form
                logger.info("initial_start_vars:", initial_start_vars)
                imgfile = open(TEMPDIR + img_path, 'rb')
                imgdata = imgfile.read()
                imgB64 = imgdata.encode("base64")
                bytesarray = array.array('B', imgB64)
                result['pair_scan_array'] = bytesarray
                rendered_template = render_template("pair_scan_results.html", **result)
        else:
            #for item in template_vars.__dict__.keys():
            #    logger.info("  ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))

            gn1_template_vars = marker_regression_gn1.MarkerRegression(result).__dict__
            #pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
            #logger.info("pickled result length:", len(pickled_result))
            #Redis.set(key, pickled_result)
            #Redis.expire(key, 1*60)

            with Bench("Rendering template"):
                if (gn1_template_vars['mapping_method'] == "gemma") or (gn1_template_vars['mapping_method'] == "plink"):
                    gn1_template_vars.pop('qtlresults', None)
                print("TEMPLATE KEYS:", list(gn1_template_vars.keys()))
                rendered_template = render_template("marker_regression_gn1.html", **gn1_template_vars)

    return rendered_template
Example #42
0
def help():
    logger.error(request.url)
    doc = docs.Docs("help")
    return render_template("docs.html", **doc.__dict__)