示例#1
0
def generate_output(steps):
	import output
	cursor=conn.cursor()
	all_shooters = cursor.execute("""
		SELECT object_id,name,object_type FROM OBJECTS WHERE object_type='shooter'
		""").fetchall()

	itr = cursor.execute("""
		SELECT  
			step
			,x
			,y
			,object_type
			,object_id
		FROM MATRIX
		ORDER BY step

		""").fetchall()
	matrix=[None]*(steps+1)
	for item in itr:
		step =item["step"]
		x=item["x"]
		y=item["y"]
		object_type=item["object_type"]
		o={"x":x,"y":y,"o":object_type[0]}
		if not matrix[step]:
			matrix[step]=[]
		matrix[step].append(o)
	
	result = caculate_result()
	data={"shooters":all_shooters,"matrix":matrix,"result":result}
	output.write_output(PATH,data,max_x,max_y)
示例#2
0
def manager():
    """Controls the program execution.

    Allows the user to select between analysis and verification. 

    If analysis is chosen the user_input module is called to get the input
    from the user. The mlob routine is run to generate the internal forces.
    Finally the output module is passed the user input and program output
    to write the output to the user.

    If the verification is chosen the verification routine is run which compares
    AREMA Tb 15-1-16 values to the program calculated values. Both values are
    reported for each span length specified in the AREMA table and the relative
    error between the table value and program value.

    Args:
        None

    Returns:
        None
    """
    while True:
        option = user_option()

        if option == "a" or option == "A" or option == "Analysis":
            (axle_spacing, axle_wt, space_to_trailing_load, distributed_load,
             span_length1, span_length2, num_nodes) = get_input()

            #for echoing user input in the output
            uias = []  #user_input_axle_spacing
            [uias.append(x) for x in axle_spacing]
            uiaw = []  #user_input_axle_wt
            [uiaw.append(x) for x in axle_wt]

            start = timeit.default_timer()

            (node_loc, V_max1, M_corr1, V_max1_axle, M_max1, V_corr1,
             M_max1_axle, V_max2, M_corr2, V_max2_axle, M_max2, V_corr2,
             M_max2_axle, Rmax_pier, Rmax_pier_axle, span1_begin,
             span2_begin) = analyze_vehicle(axle_spacing, axle_wt,
                                            span_length1, span_length2,
                                            num_nodes, space_to_trailing_load,
                                            distributed_load)

            stop = timeit.default_timer()

            analysis_time = stop - start

            write_output(uias, uiaw, span_length1, span_length2, num_nodes,
                         space_to_trailing_load, distributed_load, node_loc,
                         V_max1, M_corr1, V_max1_axle, M_max1, V_corr1,
                         M_max1_axle, V_max2, M_corr2, V_max2_axle, M_max2,
                         V_corr2, M_max2_axle, Rmax_pier, Rmax_pier_axle,
                         analysis_time, span1_begin, span2_begin)
        elif option == "v" or option == "V" or option == "Verify":
            user_verification()
        else:
            print "Invalid command."
def main():
    raw_content = parsing.parse_content()["concepts"]
    templated_concepts = [
        templating.apply_template("templates/concept.html", **concept)
        for concept in raw_content]
    concatinated_concepts = "".join(templated_concepts)
    content = templating.apply_template("templates/page.html",
        content = concatinated_concepts)
    output.write_output(content)
def main():
    raw_content = parsing.parse_content()["concepts"]
    templated_concepts = [
        templating.apply_template("templates/concept.html", **concept)
        for concept in raw_content
    ]
    concatinated_concepts = "".join(templated_concepts)
    content = templating.apply_template("templates/page.html",
                                        content=concatinated_concepts)
    output.write_output(content)
示例#5
0
def main():
    parser = ArgumentParser()
    parser.add_argument("file", type=str, help="the text file database")
    parser.add_argument("person",
                        type=str,
                        help="the person to make the family tree for")
    args = parser.parse_args()

    fam = read_input(args.file)

    output_file = "fam"
    write_output(fam, fam[args.person], output_file)
    system("inkscape {0}.svg --export-pdf {0}.pdf".format(output_file))
示例#6
0
def main():
    """
    Main Function
    :return: None
    """
    start_time = time.time()
    flows = loader.load_flows(par.Year, par.Month, par.Day, par.HourStart,
                              par.MinuteStart, par.HourEnd, par.MinuteEnd)
    print "loaded " + str(len(flows)) + " flows",
    print("--- %s seconds ---" % (time.time() - start_time))

    flows = loader.preprocess_dataf(flows)
    pivot_flows = loader.preprocess_for_pivots(flows)
    print "preprocessed " + str(len(flows)) + " flows",
    print("--- %s seconds ---" % (time.time() - start_time))

    if not par.OnlyPivots:
        G_Complete = graphs.multidigraph_from_dataf(flows)
    G = graphs.multidigraph_from_dataf(pivot_flows)
    print "graph created",
    print("--- %s seconds ---" % (time.time() - start_time))

    nodes_pivots, edges_pivots = pivdet.detect_pivoting(G)
    print "pivot detection over, " + str(len(nodes_pivots)) + " pivots",
    print("--- %s seconds ---" % (time.time() - start_time))

    if par.OnlyPivots:
        risks_pivots = pivdet.find_malicious_pivoting(G, nodes_pivots,
                                                      edges_pivots)
    else:
        risks_pivots = pivdet.find_malicious_pivoting(G_Complete, nodes_pivots,
                                                      edges_pivots)
    print "malicious pivots detection over",
    print("--- %s seconds ---" % (time.time() - start_time))

    output.write_output("pivot_detection", G, nodes_pivots, edges_pivots,
                        risks_pivots)
    if len(nodes_pivots) > 0:
        if par.OnlyPivots:
            graphs.draw_graph(G, nodes_pivots, edges_pivots)
        else:
            graphs.draw_graph(G_Complete, nodes_pivots, edges_pivots)

    print "End, ",
    print("--- %s seconds ---" % (time.time() - start_time))

    return None
示例#7
0
def run():
    parser = argparse.ArgumentParser(description="TREC-COVID document ranker CLI")
    parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true", default=False)
    parser.add_argument("-cp", "--compute_pickle", help="Compute mapping from internal lucene id's to external docid's", action="store_true", default=False)
    parser.add_argument("-n", "--n_queries", help="Naximum number of queries to run", type=int, default=999)
    parser.add_argument("-m", "--model", help="which model used in ranking from {bm25, tf_idf}", default="bm25")
    parser.add_argument("-d", "--doc_at_a_time", help="Use document_at_a_time algorithm", action="store_true", default=False)
    parser.add_argument("-k", "--k_docs", help="Numer of documents to retrieve", type=int, default=100)
    parser.add_argument("-r", "--rerank", help="Which rerank model to use 'rocchio', or 'ide'", default="none")
    args = parser.parse_args()
    global verbose
    verbose = args.verbose
    model = args.model
    doc_at_a_time = args.doc_at_a_time
    k = args.k_docs
    rerank = args.rerank

    index_reader = IndexReader(LUCENE_INDEX)
    searcher = SimpleSearcher(LUCENE_INDEX)
    models = Models(index_reader, QRELFILE)
    trec_index = Index(index_reader, searcher)

    if not os.path.exists('output'):
        os.makedirs('output')

    if args.compute_pickle:
        print("Computing id index dict")
        docidx_docid = {docidx : (trec_index.get_docid_from_index(docidx), trec_index.get_n_of_words_in_inverted_list_doc(docidx)) for docidx in range(trec_index.get_max_docindex())}
        with open('blob/mapping.pickle', 'wb') as handle:
            pickle.dump(docidx_docid, handle, protocol=pickle.HIGHEST_PROTOCOL)
    if True:
        with open('blob/mapping.pickle', 'rb') as handle:
            print("Loading id index dict")
            docidx_docid = pickle.load(handle)
        print("Finished initializing id index dict")

    topics = parse_topics(TOPICSFILE)

    rocchio = False
    if model == "bm25":
        rankfun = score_bm25
    elif model == "tf_idf":
        rankfun = score_tf_idf
    else:
        print("Model should be 'tf_idf' or 'bm25' (default)!")
        sys.exit(1)

    t = time.localtime()
    current_time = time.strftime("%H:%M", t)
    rankfile = "output/ranking-{0}-{1}.txt".format(model, current_time)
    resultfile = "output/results-{0}-{1}.json".format(model, current_time)

    if doc_at_a_time:
        try:
            with open(rankfile, 'w') as outfile:
                for idx in range(1, min(args.n_queries+1, len(topics)+1)):
                    for i, (score, docid) in enumerate(document_at_a_time(topics[str(idx)]["query"], trec_index, models, k, docidx_docid), 1):
                        outfile.write(write_output(idx, docid, i, score, "document_at_a_time"))
        finally:
            outfile.close()
    else:
        try:
            with open(rankfile, 'w') as outfile:
                for idx in range(1, min(args.n_queries+1, len(topics)+1)):
                    for i, (score, docid) in enumerate(
                        get_docs_and_score_query(topics[str(idx)]["query"], rankfun, trec_index, models, idx, k, docidx_docid, rerank=rerank), 1):
                        outfile.write(write_output(idx, docid, i, score, "score_query"))
        finally:
            outfile.close()

    results = pytrec_evaluation(rankfile, QRELFILE)
    with open(resultfile, 'w') as outjson:
        json.dump(results, outjson)