Example #1
0
def build_detail_site(data, label_func, j2_env, linestyles, batch=False):
    for (name, runs) in data.items():
        print("Building '%s'" % name)
        all_runs = runs.keys()
        label = label_func(name)
        data = {"normal": [], "scatter": []}

        for plottype in args.plottype:
            xn, yn = plot_variants[plottype]
            data["normal"].append(
                create_plot(runs, xn, yn, convert_linestyle(linestyles),
                            j2_env))
            if args.scatter:
                data["scatter"].append(
                    create_plot(runs, xn, yn, convert_linestyle(linestyles),
                                j2_env, "Scatterplot ", "bubble"))

        # create png plot for summary page
        data_for_plot = {}
        for k in runs.keys():
            data_for_plot[k] = prepare_data(runs[k], 'k-nn', 'qps')
        plot.create_plot(
            data_for_plot, False, False, True, 'k-nn', 'qps',
            args.outputdir + get_algorithm_name(name, batch) + ".png",
            linestyles, batch)
        with open(args.outputdir + get_algorithm_name(name, batch) + ".html",
                  "w") as text_file:
            text_file.write(
                j2_env.get_template("detail_page.html").render(title=label,
                                                               plot_data=data,
                                                               args=args,
                                                               batch=batch))
def build_detail_site(data, label_func, j2_env, linestyles, batch=False):
    for (name, runs) in data.items():
        print("Building '%s'" % name)
        all_runs = runs.keys()
        label = label_func(name)
        data = {"normal": [], "scatter": []}

        for plottype in args.plottype:
            xn, yn = plot_variants[plottype]
            data["normal"].append(create_plot(
                runs, xn, yn, convert_linestyle(linestyles), j2_env))
            if args.scatter:
                data["scatter"].append(
                    create_plot(runs, xn, yn, convert_linestyle(linestyles),
                                j2_env, "Scatterplot ", "bubble"))

        # create png plot for summary page
        data_for_plot = {}
        for k in runs.keys():
            data_for_plot[k] = prepare_data(runs[k], 'k-nn', 'qps')
        plot.create_plot(
            data_for_plot, False,
            False, True, 'k-nn', 'qps',
            args.outputdir + get_algorithm_name(name, batch) + ".png",
            linestyles, batch)
        output_path = "".join([args.outputdir,
                               get_algorithm_name(name, batch),
                               ".html"])
        with open(output_path, "w") as text_file:
            text_file.write(j2_env.get_template("detail_page.html").
                            render(title=label, plot_data=data,
                                   args=args, batch=batch))
def optimize(
    filename='sodium-chloride-example.npz',
    dim=3,
    temp=120,
    width=0,
    tol_opt=1 / 500,
):

    print('optimize')
    parameters, positions, types, boxsize = load.load_input_file(filename)
    width = boxsize[1] / 1000
    particles = create_particle_object(parameters, positions, types)
    print(parameters)
    box = system.Box(dim, boxsize, particles, temp)
    # return system.Box(dim, boxsize, particles, temp)

    if width == 0:  # if no width specified
        width = boxsize[0] / 5000
    r_skin_LJ = 2 * n_reuse_nblist * np.linalg.norm(width * np.ones(3))
    save_system_history = True

    box.compute_LJneighbourlist(r_cut_LJ, r_skin_LJ)
    box.compute_LJ_potential(r_cut_LJ, r_skin_LJ)
    box.optimize(n_opt_max, n_steps_opt, tol_opt, n_reuse_nblist, n_skip,
                 width, save_system_history, r_cut_LJ, r_skin_LJ)
    box.simulate(n_steps_sim, n_reuse_nblist, n_skip, width,
                 save_system_history, r_cut_LJ, r_skin_LJ)

    pos_history = box.pos_history
    pot_history = np.asarray(box.pot_history)

    plot.create_plot(boxsize[1], pos_history, pot_history, r_cut_LJ,
                     n_steps_opt, n_skip)
    print('running time:', time.time() - start)
Example #4
0
 def makePlot(self, home, away, betData, dbResult):
     if len(betData) > 0:
         if len(betData[0]) == 3 and not None in betData[0]:
             winH = list(map(lambda x: x[0], betData))
             remisX = list(map(lambda x: x[2], betData))
             winA = list(map(lambda x: x[1], betData))
             plot.create_plot(home,
                              away,
                              winH,
                              winA,
                              remisX,
                              result=dbResult)
         elif len(betData[0]) == 2 or None in betData[0]:
             winH = list(map(lambda x: x[0], betData))
             winA = list(map(lambda x: x[1], betData))
             plot.create_plot(home, away, winH, winA, result=dbResult)
Example #5
0
def index():
    """
    the main page of the app

    POST:
            validate input symbol and create heroku plot

    GET:
            load search bar
    """

    if request.method == 'POST' and 'symbol' in request.form:
        symbl = request.form['symbol'].upper()
        if 'WIKI/' + symbl in tickers['quandl code'].values:
            try:
                name = tickers.loc[tickers['quandl code'] ==
                                   'WIKI/' + symbl, 'name'].values[0]
                script, div = create_plot(symbl, name)
                return render_template('index.html', place_holder='Input Stock Symbol...',
                                       plot_script=script, plot_div=div)
            except:
                return render_template('index.html',
                                       place_holder='An error has occured.')
        else:
            return render_template('index.html',
                                   place_holder='Stock Symbol not recognized. Please try again...')
    else:
        return render_template('index.html', place_holder='Input Stock Symbol...')
Example #6
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Calculate and render probabilities of matchups for the show "Are You The One?".'
    )
    parser.add_argument(
        'season',
        type=str,
        help='the name of a season available in the "seasons" folder, like "s1"'
    )

    parser.add_argument('--profile',
                        dest='profile',
                        action='store_true',
                        help='if provided, profiles the code')

    args = parser.parse_args()
    print(f'Reading season {args.season}')
    season_data = seasons.read_season(args.season)

    print(f'Preparing season')
    season = seasons.convert_season(season_data)

    print(f'Simulating season')
    with timer.start(args.profile) as t:
        output = simulation.simulate(season)
    print(f'Simulation completed in {t.elapsed} seconds')

    print(f'Plotting season')
    fig = plot.create_plot(season, output)
    fig.show()
Example #7
0
def index():
    """
    the main page of the app

    POST:
            validate input symbol and create heroku plot

    GET:
            load search bar
    """

    if request.method == 'POST' and 'symbol' in request.form:
        symbl = request.form['symbol'].upper()
        if 'WIKI/' + symbl in tickers['quandl code'].values:
            try:
                name = tickers.loc[tickers['quandl code'] == 'WIKI/' + symbl,
                                   'name'].values[0]
                script, div = create_plot(symbl, name)
                return render_template('index.html',
                                       place_holder='Input Stock Symbol...',
                                       plot_script=script,
                                       plot_div=div)
            except:
                return render_template('index.html',
                                       place_holder='An error has occured.')
        else:
            return render_template(
                'index.html',
                place_holder='Stock Symbol not recognized. Please try again...'
            )
    else:
        return render_template('index.html',
                               place_holder='Input Stock Symbol...')
def simulate(filename='sodium-chloride-example.npz'):
    print('simulate')
    parameters, positions, types, boxsize = load.load_input_file(filename)
    width = boxsize[1] / 1000
    particles = create_particle_object(parameters, positions, types)
    box = system.Box(dim, boxsize, particles, temp)

    box.compute_LJneighbourlist(r_cut_LJ, r_skin_LJ)
    box.compute_LJ_potential(r_cut_LJ, r_skin_LJ)
    box.simulate(n_steps_sim, n_reuse_nblist, n_skip, width,
                 save_system_history, r_cut_LJ, r_skin_LJ)

    pos_history = box.pos_history
    pot_history = np.asarray(box.pot_history)

    plot.create_plot(boxsize[1], pos_history, pot_history, r_cut_LJ,
                     n_steps_opt, n_skip)
    print('running time:', time.time() - start)
Example #9
0
def index():
    if request.method=='POST':
        print("Posted")
        bar=None
        content_title="NEW SAMPLE PLOT"
        content=request.form.to_dict()
        print(content)
        bar=create_plot(content)
        return render_template('index.html',plot=bar,comment='This is your gantt chart:')
    else:
        return render_template('index.html',plot=None,comment='Your chart will appear here:')
Example #10
0
def main():
    print "K-Means algorithm illustrated through the iris dataset"
    print "The algorithm uses random initialization and iterates until no iris"
    print "switches clusters."
    print "If matplotlib is installed, the resulting clusters are illustrated"
    print "in a graphical manner."
    print

    # import the data
    irii = import_csv()

    # create and initialize the algorithm
    kmeans = KMeans(3, irii, euclidean_similarity)

    # run the algorithm
    num_iterations = kmeans.run()
    print "K-Means ran in %d iterations" % (num_iterations)
    print

    print "SSE Values for each cluster:"
    for cluster_num, sse in enumerate(kmeans.sses()):
        num_members = len(kmeans.clusters[cluster_num])
        print "Cluster %d (%d members): %f" % (cluster_num, num_members, sse)

    print

    # create a plot
    try:
        print "Plotting sepal length vs sepal width."
        print "Colors indicate the cluster, as identified by k-means across all"
        print "attributes. The symbol indicates the 'correct' group, as"
        print "determined by the name of the IRIS. The black + symbols indicate"
        print "the centroids."
        create_plot(kmeans)
    except:
        print "There was a plotting error. Do you have matplotlib installed?"
Example #11
0
def run(args):
    if not os.path.exists('outputs'):
        os.mkdir('outputs')

    # Select the optimization criterion/task
    LearnerClass = Learner_Clustering
    criterion = MCL()
    # Prepare dataloaders
    train_loader, eval_loader = cxr(batch_sz=args["batch_size"],
                                    num_workers=args["workers"],
                                    mask=args["mask"])
    # Prepare the model
    if args["num_classes"] < 0:  # Use ground-truth number of classes/clusters
        args["num_classes"] = train_loader.num_classes
    model = LearnerClass.create_model(args["model_type"], args["model"],
                                      args["num_classes"])
    model = torch.nn.DataParallel(model)

    # GPU
    if args["use_gpu"]:
        model = model.cuda()
        criterion = criterion.cuda()
        torch.cuda.manual_seed(1)
    print('Criterion:', criterion)

    optim_args = {'lr': args["lr"]}
    #optimizer = torch.optim.__dict__[args["optimizer"]](model.parameters(), **optim_args)
    #optimizer = torch.optim.Adam(model.parameters(),lr=0.0002, betas=(0.9, 0.999))
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args["lr"],
                                 betas=(0.9, 0.999))
    scheduler = MultiStepLR(optimizer, milestones=args["schedule"], gamma=0.1)
    learner = LearnerClass(model, criterion, optimizer, scheduler)
    all_loss_history = []
    train_acc_history = []
    train_auc_history = []
    val_acc_history = []
    val_auc_history = []
    for epoch in range(args["start_epoch"], args["epochs"]):
        best_perm_acc, best_perm_auc, loss_history, avg_best_acc, avg_best_auc = train(
            epoch, train_loader, learner, args)
        train_acc_history.append(avg_best_acc)
        train_auc_history.append(avg_best_auc)
        all_loss_history = all_loss_history + loss_history
        #KPI = 0
        val_acc, val_auc = evaluate(eval_loader, model, args, best_perm_auc,
                                    best_perm_acc)
        val_acc_history.append(val_acc)
        val_auc_history.append(val_auc)
    print("==========================Summary==================")
    #print("model", args["model"])
    print("lr", args["lr"])
    print("loss", args["loss"])
    print("number of epoch", args["epochs"])
    print("weight_decay", args["weight_decay"])
    print("mask", args["mask"])
    print("train_auc_history", train_auc_history)
    print("val_auc_history", val_auc_history)
    print("train_acc_history", train_acc_history)
    print("val_acc_history", val_acc_history)
    create_plot(all_loss_history, args, train_acc_history, val_acc_history,
                train_auc_history, val_auc_history)
    torch.save(
        model, "model/" + args["loss"] + "_mask" + str(args["mask"]) + "_wd" +
        str(args["weight_decay"]) + '.pt')
            args["mask"]) + "_" + args["loss"] + "_lr" + str(
                args["lr"]) + "_wd" + str(
                    args["weight_decay"]) + "_drop_rate" + str(
                        args["drop_rate"]) + "_" + str(auroc_avg) + ".pkl"
        torch.save(model.state_dict(), model_saved_path)
    print("==========================Summary==================")
    print("number of epoch", args["epoch"])
    print("model", args["model"])
    print("batch size", args["batch_size"])
    print("mask", args["mask"])
    print("learning rate: ", args["lr"])
    print("scheduler: ", args["scheduler"])
    print("weight_decay", args["weight_decay"])
    print("loss function", args["loss"])
    print("drop rate: ", args["drop_rate"])
    print("train_auc_history", train_auc_history)
    print("val_auc_history", val_auc_history)
    print("train_acc_history", train_acc_history)
    print("val_acc_history", val_acc_history)
    print("train_perc_history", train_perc_history)
    print("val_perc_history", val_perc_history)
    print("train_recall_history", train_recall_history)
    print("val_recall_history", val_recall_history)
    create_plot(loss_history, args, train_acc_history, val_acc_history,
                train_auc_history, val_auc_history)
    #all_thresholds
    best_t_index = val_auc_history.index(max(val_auc_history))
    #print("best_t_index",best_t_index)
    best_t = all_thresholds[best_t_index]
    #print("best_t", best_t)
Example #13
0
def index():
    bar = create_plot()
    return render_template("index.html", grapJSON=grapJSON)
Example #14
0
        <h2>Plots for %(id)s</h2>""" % { "id" : ds_name }
    for plottype in args.plottype:
        xn, yn = plot_variants[plottype]
        print "Processing '%s' with %s" % (ds, plottype)
        output_str += create_plot(ds_name, runs, xn, yn, linestyles)
    if args.scatter:
        output_str += """
        <hr />
        <h2>Scatterplots for %(id)s""" % { "id" : ds_name }
        for plottype in args.plottype:
            xn, yn = plot_variants[plottype]
            print "Processing scatterplot '%s' with %s" % (ds, plottype)
            output_str += create_plot(ds, runs, xn, yn, linestyles, "Scatterplot ", "bubble")
    # create png plot for summary page
    plot.create_plot(runs, True, False,
            False, True, 'k-nn', 'qps',  args.outputdir + ds + ".png",
            create_linestyles(all_algos))
    output_str += """
        </div>
    </div>
    </body>
</html>
"""
    with open(args.outputdir + ds + ".html", "w") as text_file:
        text_file.write(output_str)

# Build a website for each algorithm
# Build website. TODO Refactor with dataset code.
for (algo, runs) in all_runs_by_algorithm.items():
    all_data = runs.keys()
    output_str = get_html_header(algo)
Example #15
0
def load_tree(filename):
    """
    filename: str, 从文件中读取已经生成的决策树
    """
    import pickle
    fr = open(filename, 'rb')
    #print(fr.readlines())
    return pickle.load(fr)


def get_lenses(filename='lenses.txt'):
    fr = open(filename)
    lenses = [inst.strip().split('\t') for inst in fr.readlines()]
    feature_name = ['age', 'prescript', 'astigmatic', 'tear_rate']
    assert len(lenses[0]) == 5
    labels = [inst[4] for inst in lenses]
    lenses = [inst[:4] for inst in lenses]
    return lenses, labels, feature_name


if __name__ == '__main__':
    #feature_name = ['no surfacing', 'flippers']
    #my_tree = retrieve_tree(0)
    #save_tree(my_tree, 'classify_tree.pkl')
    #print(load_tree('classify_tree.pkl'))
    #print(classify(my_tree, feature_name, [1, 1]))

    lenses, labels, feature_name = get_lenses()
    lenses_tree = create_tree(lenses, labels, feature_name)
    create_plot(lenses_tree)
Example #16
0
df = pd.read_csv("/data/home/cs224n/fake_news_data/merged.csv")

# Set `y`
Y = [1 if label == 'REAL' else 0 for label in df.label]

# Drop the `label` column
df.drop("label", axis=1)

print('extract configuration from input texts ...')
X = df['summarized_text']

config = fit_input_text(X)
config['num_target_tokens'] = 2

print('configuration extracted from input texts ...')

classifier = LstmClassifier(config)

Xtrain, Xtest, Ytrain, Ytest = train_test_split(X,
                                                Y,
                                                test_size=0.2,
                                                random_state=42)

print('training size: ', len(Xtrain))
print('testing size: ', len(Xtest))

print('start fitting ...')
history = classifier.fit(Xtrain, Ytrain, Xtest, Ytest)

create_plot(history, "LSTM RNN")
Example #17
0
def cvrp(data, set, vehicles, strategy):
    """Solve the CVRP problem."""
    # Instantiate the data problem.
    data, name = modelData.create_data_model(data, set, vehicles)

    # Create the routing index manager.
    manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
                                           data['num_vehicles'], data['depot'])

    # Create Routing Model.
    routing = pywrapcp.RoutingModel(manager)

    # initial plot
    plot.create_initial_plot(data['points'], name)

    # Create and register a transit callback.
    def distance_callback(from_index, to_index):
        """Returns the distance between the two nodes."""
        # Convert from routing variable Index to distance matrix NodeIndex.
        from_node = manager.IndexToNode(from_index)
        to_node = manager.IndexToNode(to_index)
        return data['distance_matrix'][from_node][to_node]

    transit_callback_index = routing.RegisterTransitCallback(distance_callback)

    # Define cost of each arc.
    routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)

    # Add Capacity constraint.
    def demand_callback(from_index):
        """Returns the demand of the node."""
        # Convert from routing variable Index to demands NodeIndex.
        from_node = manager.IndexToNode(from_index)
        return data['demands'][from_node]

    demand_callback_index = routing.RegisterUnaryTransitCallback(
        demand_callback)
    routing.AddDimensionWithVehicleCapacity(
        demand_callback_index,
        0,  # null capacity slack
        data['vehicle_capacities'],  # vehicle maximum capacities
        True,  # start cumul to zero
        'Capacity')

    # Setting solution heuristic.
    search_parameters = pywrapcp.DefaultRoutingSearchParameters()

    if strategy == 'first-solution':
        search_parameters.first_solution_strategy = (
            routing_enums_pb2.FirstSolutionStrategy.AUTOMATIC)
    elif strategy == 'local-search':
        search_parameters.local_search_metaheuristic = (
            routing_enums_pb2.LocalSearchMetaheuristic.AUTOMATIC)
        search_parameters.time_limit.seconds = 20
    else:
        search_parameters.local_search_metaheuristic = (
            routing_enums_pb2.LocalSearchMetaheuristic.AUTOMATIC)
        search_parameters.time_limit.seconds = 25
        # search_parameters.local_search_metaheuristic = (
        #     routing_enums_pb2.LocalSearchMetaheuristic.TABU_SEARCH)
        # search_parameters.time_limit.seconds = 20

    # Solve the problem.
    assignment = routing.SolveWithParameters(search_parameters)

    # Print solution.
    if assignment:
        vehicle_distance, vehicle_load, text = print.print_solution(
            data, manager, routing, assignment)
    routes = getRoutes.get_routes(manager, routing, assignment,
                                  data['num_vehicles'])
    # Display the routes.
    route_arr = print.print_routes(routes)
    plot.create_plot(data['points'], routes, vehicle_distance, name)
    return vehicle_distance, vehicle_load, text, name, route_arr