Beispiel #1
0
    def run_batch(self):
        """
        Runs lmc program with given inputs (if any), until all inputs have been used.
        Checks the outputs to ensure they match those expected (--checker used).
        Plots a graph of input against cycles taken (--graph used).
        """
        if len(self.potential_values) == 0:
            self.run_once()

        # run the program repeatedly for multiple input values
        while len(self.potential_values) > 0:
            self.run_program()

        if self.checker:
            if len(self.unexpected_outputs) == 0:
                print("The program passed all tests.")
            else:
                print("The program failed %d tests." % len(self.unexpected_outputs))
                for inputs, expected_outputs, outputs in self.unexpected_outputs:
                    inputs = ', '.join(str(val) for val in inputs)
                    expected_outputs = ', '.join(str(val) for val in expected_outputs)
                    outputs = ', '.join(str(val) for val in outputs)
                    print("inputs: %s, expected_output: %s, output: %s" % (inputs, expected_outputs, outputs))
        self.write_feedback()

        if self.has_graph:
            plot.plot_graph(self.inputs_and_cycles)
Beispiel #2
0
	def spider_closed(self, spider):
		spider.logger.info('Spider closed')
		self.graph.write_graph_to_file("my_graph.json")
		plot.plot_graph(self.graph)

# Should we allow for spelling errors or should we check for them? 
# at what point should we start implementing a GUI?
# the user should control how much the graph grows, 
Beispiel #3
0
    def spider_closed(self, spider):
        spider.logger.info('Spider closed')
        self.graph.write_graph_to_file(self.a_file, self.f_file)
        plot.plot_graph(self.graph)


# the graph could scale nodes in terms of popularity, both for films and actors
# need to create a user interface that calls these methods so the user doesn't have to all the time
# CLI takes two arguments for names and title even though lots of names and titles have more than two words
# CLI should take one argument and break it up accoridingly if need be
Beispiel #4
0
def pso():
    swarm = Swarm()
    particle0 = Particle(my_function.min_val, my_function.max_val)
    swarm.particles.append(particle0)
    swarm.global_best_position = particle0.position
    particle0.best_value = my_function.my_function(particle0.position[0], particle0.position[1])
    swarm.global_best_value = particle0.best_value

    for i in range(0, max_particle - 1):
        particle = Particle(my_function.min_val, my_function.max_val)
        swarm.particles.append(particle)
        value = my_function.my_function(particle.position[0], particle.position[1])
        particle.best_value = value
        if value < swarm.global_best_value:
            swarm.global_best_value = value
            swarm.global_best_position = particle.position

    if to_plot:
        plot_graph(None, my_function.function_name, 'viridis', None)
        plot_graph(swarm.particles, 'initial state', 'Pastel1', swarm.global_best_position)

    # position(k+1) = position(k) + v(k+1)
    # v(k+1) = w(k+1) * v(k) +
    #           cp(k+1) * rand * (best_position(k) - position(k)) +
    #           cg(k+1) * rand * (global_best_position - position(k))
    for it in range(0, max_iteration):
        for particle in swarm.particles:
            v_kplus1 = w(it) * particle.prev_v + \
                       cp(it) * np.random.random() * (particle.best_position - particle.position) + \
                       cg(it) * np.random.random() * (swarm.global_best_position - particle.position)
            next_position = particle.position + v_kplus1
            value = my_function.my_function(particle.position[0], particle.position[1])
            if value < particle.best_value:
                particle.best_value = value
                particle.best_position = next_position
            particle.position = next_position
            particle.prev_v = v_kplus1

        for particle in swarm.particles:
            value = my_function.my_function(particle.position[0], particle.position[1])

            if value < swarm.global_best_value:
                swarm.global_best_value = value
                swarm.global_best_position = particle.position

        if it % (max_iteration // 10) == 0 and to_plot:
            plot_graph(swarm.particles, 'iteration ' + str(it + 1), 'Pastel1', swarm.global_best_position)

    if to_plot:
        plot_graph(swarm.particles, 'final state', 'Pastel1', swarm.global_best_position)

    print('x = {0}'.format(swarm.global_best_position[0]))
    print('y = {0}'.format(swarm.global_best_position[1]))
    print('Global best = {0}'.format(swarm.global_best_value))

    analytic_xy, analytic_best = my_function.analytic_result()
    print('\nPontos eredmény:')
    print('x = {0}'.format(analytic_xy[0]))
    print('y = {0}'.format(analytic_xy[1]))
    print('Global best = {0}'.format(analytic_best))
Beispiel #5
0
def run():
    print "Random Forest Regressor started..."
    dir_path = ""
    train_file_path = dir_path + "train.csv"
    train_file = read_csv(train_file_path, skiprows=1, header=None)

    train_file = train_file.drop(train_file.columns[0], axis=1)
    train_file = train_file.values

    train_X_temp = train_file[5:50000, :-1]
    train_Y = train_file[6:50001, -1]
    train_X = np.zeros((train_X_temp.shape[0], 8 * 5))
    for i in range(train_X_temp.shape[0]):
        for j in range(5):
            for k in range(8):
                train_X[i][j * 8 + k] = train_X_temp[i - j][k]

    test_file_name = dir_path + "test2.csv"
    test_file = read_csv(test_file_name, skiprows=1, header=None)
    test_file = test_file.values
    test_X = np.array(test_file[:, :-1])
    test_y = test_file[:, -1]

    scores_array = []
    mean_array = []
    variance_array = []
    factors = []
    for i in range(2, 11):
        factors.append(i)
        for j in range(1, 40):
            model = RandomForestRegressor(n_jobs=-1,
                                          max_features=len(train_X[0]) / i)
            estimators = [10]
            scores = []
            prediction = []
            for n in estimators:
                model.set_params(n_estimators=n)
                model.fit(train_X, train_Y)
                scores.append(model.score(test_X, test_y))
                scores_array.append(scores[0])
                prediction.append(model.predict(test_X))
        mean_array.append(sum(scores_array) / len(scores_array))
        variance_array.append(np.var(scores_array))
    print "Mean array:\n", mean_array
    print "Variance array:\n", variance_array

    plot_graph(factors, variance_array, "Feature division factor",
               "Variance of accuracy scores")
    plot_graph(factors, mean_array, "Feature division factor",
               "Mean of accuracy scores")
Beispiel #6
0
def index_post():
    if request.form['submit'] == 'Submit':
        try:
            query1 = request.form['Query1']
            query1 = str(query1)
            query2 = request.form['Query2']
            query2 = str(query2)
            # tsearch(query1, query2)
            p1, n1, p2, n2 = classifier()
            plot_graph(query1, query2, p1, n1, p2, n2)

            return render_template('home.html',
                                   q1=query1.upper(),
                                   q2=query2.upper(),
                                   p1=p1,
                                   n1=n1,
                                   p2=p2,
                                   n2=n2)
        except:
            return render_template('home.html',
                                   error="Sorry! No Tweets Found,Enter Again")
Beispiel #7
0
def plot_coverage(title, region_start, region_end, alignment_filename):
    if not validate_alignment(alignment_filename):
        return None
        exit()
    data = read_file(alignment_filename)[1:]
    starts = [eval(row[0]) for row in data]
    ends = [eval(row[1]) for row in data]
    
    counts = coverage(sorted(starts), sorted(ends), region_start, region_end)
    
    alignment_filename = alignment_filename.replace('.csv', '.svg')
    svg_file = plot.plot_graph(region_start, region_end, counts, title, alignment_filename)
    
    if len(counts) == 0:
        minimum_coverage, maximum_coverage, average_coverage = 0, 0, 0
    else:
        minimum_coverage = min(item for item in counts)
        maximum_coverage = max(item for item in counts)
        average_coverage = sum(counts)/float(len(counts))
    return (minimum_coverage, maximum_coverage, round(average_coverage,2))
def evaluate(
    airsim_traj_path,
    meshroom_traj_path,
    airsim_ply_path,
    meshroom_ply_path,
    crop_bbox_path,
    alignment_matrix_path,
):
    assert os.path.isfile(
        airsim_traj_path), f"File not found: '{airsim_traj_path}'"
    assert os.path.isfile(
        meshroom_traj_path), f"File not found: '{meshroom_traj_path}'"
    assert os.path.isfile(
        airsim_ply_path), f"File not found: '{airsim_ply_path}'"
    assert os.path.isfile(
        meshroom_ply_path), f"File not found: '{meshroom_ply_path}'"
    assert os.path.isfile(
        crop_bbox_path), f"File not found: '{crop_bbox_path}'"
    assert os.path.isfile(
        alignment_matrix_path), f"File not found: '{alignment_matrix_path}'"

    # TODO update TanksAndTemples's python_toolbox/evaluation/run.py and move
    # the main "evaluation portion" of the code to the TanksAndTemples class.
    os.makedirs(EVALUATION_OUT_FOLDER, exist_ok=False)

    # Load reconstruction and according ground-truth
    print(meshroom_ply_path)
    pcd = o3d.io.read_point_cloud(meshroom_ply_path)
    print(airsim_ply_path)
    gt_pcd = o3d.io.read_point_cloud(airsim_ply_path)

    traj = read_trajectory(meshroom_traj_path)  # generated .log file
    gt_traj = read_trajectory(airsim_traj_path)  # reference .log file
    gt_trans = np.loadtxt(
        alignment_matrix_path)  # alignment matrix (<scene>_trans.txt)

    transformation = trajectory_alignment(None, traj, gt_traj, gt_trans)

    # Refine alignment by using the actual ground-truth pointcloud
    vol = o3d.visualization.read_selection_polygon_volume(crop_bbox_path)

    # Registration refinement in 3 iterations
    r2 = registration_vol_ds(pcd, gt_pcd, transformation, vol, DTAU, DTAU * 80,
                             20)
    r3 = registration_vol_ds(pcd, gt_pcd, r2.transformation, vol, DTAU / 2,
                             DTAU * 20, 20)
    r = registration_unif(pcd, gt_pcd, r3.transformation, vol, 2 * DTAU, 20)

    # Histograms and P/R/F1
    precision, recall, fscore, *histograms_data = EvaluateHisto(
        SCENE,
        filename_mvs=EVALUATION_OUT_FOLDER,
        source=pcd,
        target=gt_pcd,
        trans=r.transformation,
        crop_volume=vol,
        voxel_size=DTAU / 2,
        threshold=DTAU,
        plot_stretch=5,
    )

    # XXX ^^^^ move to a specific function

    print("==============================")
    print("evaluation result : %s" % SCENE)
    print("==============================")
    print("distance tau : %.3f" % DTAU)
    print("precision : %.4f" % precision)
    print("recall : %.4f" % recall)
    print("f-score : %.4f" % fscore)
    print("==============================")

    # Plotting
    edges_source, cum_source, edges_target, cum_target = histograms_data
    plot_graph(
        SCENE,
        mvs_outpath=EVALUATION_OUT_FOLDER,
        fscore=fscore,
        edges_source=edges_source,
        cum_source=cum_source,
        edges_target=edges_target,
        cum_target=cum_target,
        plot_stretch=5,
        dist_threshold=DTAU,
    )
Beispiel #9
0
from graph import Graph
from plot import plot_graph
from graphRandomizer import generate_Gnl_graph, generate_Gnp_graph

if __name__ == "__main__":
    print("Generate G(n, l):")
    G_nl = generate_Gnl_graph(6, 10)
    plot_graph(G_nl)

    print("Generate G(n, p):")
    G_np = generate_Gnp_graph(8, 0.3)
    plot_graph(G_np)
Beispiel #10
0
def run_evaluation(dataset_dir, traj_path, ply_path, out_dir):
    scene = os.path.basename(os.path.normpath(dataset_dir))

    if scene not in scenes_tau_dict:
        print(dataset_dir, scene)
        raise Exception("invalid dataset-dir, not in scenes_tau_dict")

    print("")
    print("===========================")
    print("Evaluating %s" % scene)
    print("===========================")

    dTau = scenes_tau_dict[scene]
    # put the crop-file, the GT file, the COLMAP SfM log file and
    # the alignment of the according scene in a folder of
    # the same scene name in the dataset_dir
    colmap_ref_logfile = os.path.join(dataset_dir, scene + "_COLMAP_SfM.log")
    alignment = os.path.join(dataset_dir, scene + "_trans.txt")
    gt_filen = os.path.join(dataset_dir, scene + ".ply")
    cropfile = os.path.join(dataset_dir, scene + ".json")
    map_file = os.path.join(dataset_dir, scene + "_mapping_reference.txt")

    make_dir(out_dir)

    # Load reconstruction and according GT
    print(ply_path)
    pcd = o3d.io.read_point_cloud(ply_path)
    print(gt_filen)
    gt_pcd = o3d.io.read_point_cloud(gt_filen)

    gt_trans = np.loadtxt(alignment)
    traj_to_register = read_trajectory(traj_path)
    gt_traj_col = read_trajectory(colmap_ref_logfile)

    trajectory_transform = trajectory_alignment(map_file, traj_to_register,
                                                gt_traj_col, gt_trans, scene)

    # Refine alignment by using the actual GT and MVS pointclouds
    vol = o3d.visualization.read_selection_polygon_volume(cropfile)
    # big pointclouds will be downlsampled to this number to speed up alignment
    dist_threshold = dTau

    # Registration refinment in 3 iterations
    r2 = registration_vol_ds(pcd, gt_pcd, trajectory_transform, vol, dTau,
                             dTau * 80, 20)
    r3 = registration_vol_ds(pcd, gt_pcd, r2.transformation, vol, dTau / 2.0,
                             dTau * 20, 20)
    r = registration_unif(pcd, gt_pcd, r3.transformation, vol, 2 * dTau, 20)

    # Histogramms and P/R/F1
    plot_stretch = 5
    [
        precision,
        recall,
        fscore,
        edges_source,
        cum_source,
        edges_target,
        cum_target,
    ] = EvaluateHisto(
        pcd,
        gt_pcd,
        r.transformation,
        vol,
        dTau / 2.0,
        dTau,
        out_dir,
        plot_stretch,
        scene,
    )
    eva = [precision, recall, fscore]
    print("==============================")
    print("evaluation result : %s" % scene)
    print("==============================")
    print("distance tau : %.3f" % dTau)
    print("precision : %.4f" % eva[0])
    print("recall : %.4f" % eva[1])
    print("f-score : %.4f" % eva[2])
    print("==============================")

    # Plotting
    plot_graph(
        scene,
        fscore,
        dist_threshold,
        edges_source,
        cum_source,
        edges_target,
        cum_target,
        plot_stretch,
        out_dir,
    )
Beispiel #11
0
	users = cur.fetchall()
	con.close()
	return users

def clear_data():
	con=sql.connect("database.db")
	cur=con.cursor()
	cur.execute("DELETE from users")
	con.commit()
	con.close()

def reset_data():
	con=sql.connect('database.db')
	cur=con.cursor()
	cur.execute("DROP table if exists users")
	con.commit()
	con.close()

if __name__=='__main__':
	choice=0
	print("Create Table : ")
	choice=int(input())
	if choice==1:
		createtbl()
	d=getPaths.main()
	for i in d[:5]:
		insert_db(i[0],i[1])	
	print(get_data())
	plot.plot_graph(g)
	reset_data()
Beispiel #12
0
'''
This is the entry point for the program, everything will happen through here
'''

import fetch
import input
import plot
import simplify

data = 0
while data == 0:
    #Get the input from the user
    symbol = input.get_input()

    #Fetch the data from Google Finance
    if symbol:
        data = fetch.get_data(symbol)

#Simplify the data from Google
simplified_Data = simplify.simplify_data(data)

#Plot the graph of the stock price over time
plot.plot_graph(simplified_Data)
Beispiel #13
0
from handleInput import check_type_of_input, BadInputException
from graph import Graph
from plot import plot_graph

if __name__ == "__main__":
    with open('input/input.txt', 'r') as graph_input:
        try:
            graph_input = graph_input.readlines()
            input_type = check_type_of_input(graph_input)
            graph = Graph.create_graph_representation(input_type, graph_input)

            plot_graph(graph)

        except BadInputException:
            print(BadInputException)