Esempio n. 1
0
def nemesis():
    search = Search()
    if search.validate_on_submit():
        results = Superheroes.query.filter(
            Superheroes.nemesis == search.nemesis.data.upper()).all()
        p1 = 0
        p2 = 0
        p3 = 0
        p1id = ""
        p2id = ""
        p3id = ""
        for x in results:
            p1 = int(x.__dict__['p1'])
            p1id = Powers.query.filter(Powers.id == p1).first()
            x.__dict__['p1'] = p1id.power
            p2 = int(x.__dict__['p2'])
            p2id = Powers.query.filter(Powers.id == p2).first()
            x.__dict__['p2'] = p2id.power
            p3 = int(x.__dict__['p3'])
            p3id = Powers.query.filter(Powers.id == p3).first()
            x.__dict__['p3'] = p3id.power
        return render_template("show.html", superherodata=results)
    return render_template("searchnemesis.html", search=search)


#publisher, name, alterego, power, team, sidekick, nemesis
Esempio n. 2
0
def search():
    form = Search()
    if form.validate_on_submit():
        search = lookup(form.search.data)
        return render_template("search.html", form=form, search=search)

    return render_template("search.html", form=form)
Esempio n. 3
0
def name():
    search = Search()
    if search.validate_on_submit():
        print(search.name.data)
        results = Superheroes.query.filter(
            Superheroes.name == search.name.data.upper()).all()
        return render_template("show.html", superherodata=results)
    return render_template("searchname.html", search=search)
Esempio n. 4
0
def power():
    search = Search()
    if search.validate_on_submit():
        #pquery=Powers.query.filter(Powers.power.in_(search.power.data.upper())).first()
        pquery = Powers.query.filter(
            Powers.power == search.power.data.upper()).first()
        p1q = Superheroes.query.filter(Superheroes.p1 == pquery.id).all()
        p2q = Superheroes.query.filter(Superheroes.p2 == pquery.id).all()
        p3q = Superheroes.query.filter(Superheroes.p3 == pquery.id).all()
        #results=Superheroes.query.filter(Superheroes.p1==pquery.id, Superheroes.p2==pquery.id, Superheroes.p3==pquery.id).all()
        p1 = 0
        p2 = 0
        p3 = 0
        p1id = ""
        p2id = ""
        p3id = ""
        for x in p1q:
            p1 = int(x.__dict__['p1'])
            p1id = Powers.query.filter(Powers.id == p1).first()
            x.__dict__['p1'] = p1id.power
            p2 = int(x.__dict__['p2'])
            p2id = Powers.query.filter(Powers.id == p2).first()
            x.__dict__['p2'] = p2id.power
            p3 = int(x.__dict__['p3'])
            p3id = Powers.query.filter(Powers.id == p3).first()
            x.__dict__['p3'] = p3id.power
        for x in p2q:
            p1 = int(x.__dict__['p1'])
            p1id = Powers.query.filter(Powers.id == p1).first()
            x.__dict__['p1'] = p1id.power
            p2 = int(x.__dict__['p2'])
            p2id = Powers.query.filter(Powers.id == p2).first()
            x.__dict__['p2'] = p2id.power
            p3 = int(x.__dict__['p3'])
            p3id = Powers.query.filter(Powers.id == p3).first()
            x.__dict__['p3'] = p3id.power
        for x in p3q:
            p1 = int(x.__dict__['p1'])
            p1id = Powers.query.filter(Powers.id == p1).first()
            x.__dict__['p1'] = p1id.power
            p2 = int(x.__dict__['p2'])
            p2id = Powers.query.filter(Powers.id == p2).first()
            x.__dict__['p2'] = p2id.power
            p3 = int(x.__dict__['p3'])
            p3id = Powers.query.filter(Powers.id == p3).first()
            x.__dict__['p3'] = p3id.power
        return render_template("showpowers.html", p1=p1q, p2=p2q, p3=p3q)
    return render_template("searchpower.html", search=search)
Esempio n. 5
0
def sidekick():
    search = Search()
    if search.validate_on_submit():
        results = Superheroes.query.filter(
            Superheroes.sidekick == search.sidekick.data.upper()).all()
        p1 = 0
        p2 = 0
        p3 = 0
        p1id = ""
        p2id = ""
        p3id = ""
        for x in results:
            p1 = int(x.__dict__['p1'])
            p1id = Powers.query.filter(Powers.id == p1).first()
            x.__dict__['p1'] = p1id.power
            p2 = int(x.__dict__['p2'])
            p2id = Powers.query.filter(Powers.id == p2).first()
            x.__dict__['p2'] = p2id.power
            p3 = int(x.__dict__['p3'])
            p3id = Powers.query.filter(Powers.id == p3).first()
            x.__dict__['p3'] = p3id.power
        return render_template("show.html", superherodata=results)
    return render_template("searchsidekick.html", search=search)
Esempio n. 6
0
def fileResult(process_id, ref_genome, file_name):
    form = Search()
    intervalform = intervalForm()
    fileform = fileForm(CombinedMultiDict((request.files, request.form)))

    if request.method == 'POST' and fileform.validate_on_submit():
        filename = secure_filename(fileform.file.data.filename)
        # check if the post request has the file part
        if 'file' not in request.files:
            return home("No selected file")

        result = request.form
        ref_genome = result['reference genome']

        file = request.files['file']

        if file.filename == '':
            return home("No selected file")

        if ref_genome == "":
            return home("No reference genome")

        if file:
            file_name = secure_filename(file.filename)
            process_id = str(random.randint(0, sys.maxsize))
            file.save(app.config["SERVER_PATH"] + "/uploads/" +
                      str(process_id) + file_name.split(".", 1)[-1])
            return redirect("/search/{}/{}/{}".format(process_id, ref_genome,
                                                      file_name))

    out = utility.file_search(process_id, ref_genome, file_name)

    print("INITIATING SEARCH")
    start_total = time.time()
    start_task = time.time()

    if isinstance(out, str):  # parsing error occured
        return home(out)
    else:
        result_df = out

    end_task = time.time()
    print("#####")
    print("SEARCH COMPLETED ({} seconds)".format(end_task - start_task))

    # make array of links for pages
    start_task = time.time()

    # out = utility.retrieve_metadata(ref_genome) #generic function to handle data information gathering
    # if isinstance(out, str): # parsing error occured
    #     return home(out)
    # else:
    #     metadata_df = out

    # result_df = pd.merge(result_df,
    #                      metadata_df,
    #                      left_on ="name",
    #                      right_on ="NAME",
    #                      how ="left")
    # print(result_df)
    result_df.sort_values("overlaps", inplace=True)

    end_task = time.time()
    print("########")
    print("DATA SOURCE INFOMATION COLLECTED ({} seconds)".format(end_task -
                                                                 start_task))

    #Pagination
    # determining results displayed on current page

    current_page = 1
    number_pages = int(math.ceil(len(result_df.index) / 10.0))
    if current_page < 7 or number_pages < 10:
        if number_pages < 10:
            displayed_page_numbers = list(range(1, number_pages + 1))
        else:
            displayed_page_numbers = list(range(1, 11))
    else:
        if number_pages < (page + 4):
            displayed_page_numbers = list(
                range(current_page - 5, number_pages + 1))
        else:
            displayed_page_numbers = list(
                range(current_page - 5, current_page + 5))

    # EDIT BELOW
    current_page_results = result_df.iloc[(current_page * 10 -
                                           10):(current_page * 10)]
    print("Pagination", displayed_page_numbers, current_page, number_pages)

    end_total = time.time()
    print("##########")
    print("TOTAL SEARCH TIME ELAPSED {}".format(end_total - start_total))
    print("####################")
    print("RENDERING RESULTS {}-{}".format(current_page * 10 - 10,
                                           current_page * 10))
    print("########################################")
    # [0 filename, 1 total region, 2 overlap, 3 short name, 4 long name, 5 description, 6 short description, 7 ID]
    result_df.reset_index(inplace=True)
    print(result_df.columns)

    result_df = result_df.fillna("").sort_values(
        "combo_score", ascending=False)  # FIX ME SHOULD this be asc
    results = result_df[[
        "FILEID", "SIZE", "overlaps", "SHORTNAME", "LONGNAME", "LONGINFO",
        "SHORTINFO", "index", "combo_score"
    ]].to_numpy()
    return render_template('file_result.html',
                           form=fileform,
                           results=results.tolist(),
                           searchtime=end_total - start_total,
                           file_name=file_name,
                           ref_genome=ref_genome,
                           numresults=len(result_df.index),
                           source=ref_genome,
                           page=1,
                           pageNums=displayed_page_numbers)
Esempio n. 7
0
def intervalResult(ref_genome, chrom, lower, upper):
    form = Search()
    intervalform = intervalForm()
    fileform = fileForm(CombinedMultiDict((request.files, request.form)))

    current_interval = [
        str(chrom) + ":" + str(lower) + "-" + str(upper), ref_genome
    ]
    if session.get('intervals', None) == None:
        session["intervals"] = [[chrom, lower, upper, ref_genome]]

    print("Result recieved:", session.get('intervals', None))
    print("#")

    if form.validate_on_submit():
        out = utility.parse_interval_input(form.Input.data)
        if isinstance(out, str):  # parsing error occured
            return home(error=out)
        else:
            session['intervals'] = [[
                int(i[0].split(":")[0]),
                int(i[0].split(":")[1].split("-")[0]),
                int(i[0].split(":")[1].split("-")[1]), i[1]
            ] for i in out]
            return redirect("/result/{}/{}".format(out[0][1], out[0][0]))

    print("INITIATING SEARCH")
    start_total = time.time()
    start_task = time.time()

    out = utility.interval_search(ref_genome, chrom, lower, upper)

    if isinstance(out, str):  # parsing error occured
        return render_template('home.html',
                               fileform=fileform,
                               intervalform=intervalform,
                               error=out)
    else:
        result_df = out

    end_task = time.time()
    print("#####")
    print("SEARCH COMPLETED ({} seconds)".format(end_task - start_task))

    # make array of links for pages
    start_task = time.time()

    # out = utility.retrieve_metadata(ref_genome) #generic function to handle data information gathering
    # if isinstance(out, str): # parsing error occured
    #     return render_template('home.html', fileform = fileform, intervalform = intervalform, error = out)
    # else:
    #     metadata_df = out

    # result_df = pd.merge(result_df,
    #                      metadata_df,
    #                      left_on ="name",
    #                      right_on ="NAME",
    #                      how ="left")
    print(result_df)
    result_df.sort_values("overlaps", ascending=False, inplace=True)

    end_task = time.time()
    print("########")
    print("DATA SOURCE INFOMATION COLLECTED ({} seconds)".format(end_task -
                                                                 start_task))

    #Pagination
    # determining results displayed on current page

    current_page = 1
    number_pages = int(math.ceil(len(result_df.index) / 10.0))
    if current_page < 7 or number_pages < 10:
        if number_pages < 10:
            displayed_page_numbers = list(range(1, number_pages + 1))
        else:
            displayed_page_numbers = list(range(1, 11))
    else:
        if number_pages < (page + 4):
            displayed_page_numbers = list(
                range(current_page - 5, number_pages + 1))
        else:
            displayed_page_numbers = list(
                range(current_page - 5, current_page + 5))

    # EDIT BELOW
    current_page_results = result_df.iloc[(current_page * 10 -
                                           10):(current_page * 10)]
    print("Pagination", displayed_page_numbers, current_page, number_pages)

    end_total = time.time()
    print("##########")
    print("TOTAL SEARCH TIME ELAPSED {}".format(end_total - start_total))
    print("####################")
    print("RENDERING RESULTS {}-{}".format(current_page * 10 - 10,
                                           current_page * 10))
    print("########################################")
    # [0 filename, 1 total region, 2 overlap, 3 short name, 4 long name, 5 description, 6 short description, 7 ID]
    result_df.reset_index(inplace=True)
    print(result_df.columns)
    result_df = result_df.fillna("")
    results = result_df[[
        "FILEID", "size", "overlaps", "SHORTNAME", "LONGNAME", "LONGINFO",
        "SHORTINFO", "index"
    ]].to_numpy()

    print(session.get('intervals', None))
    return render_template(
        'interval_result.html',
        current_interval=current_interval,
        current_interval_split=[chrom, lower, upper, ref_genome],
        form=form,
        results=results.tolist(),
        searchtime=end_total - start_total,
        sessionIntervals=session['intervals'],
        numresults=len(result_df.index),
        source=ref_genome,
        page=1,
        pageNums=displayed_page_numbers)