def bokehplot(): figure = make_plot() fig_script, fig_div = components(figure) return render_template( "bokeh.html.j2", fig_script=fig_script, fig_div=fig_div, bkversion=bokeh.__version__, )
def option(): if request.method == 'GET': route_n = request.args.get('route_n', '') option = get_option(route_n) if option == "Route1": df = pd.read_csv("./data/df1out.csv") coords, lat_mean, lon_mean = get_coords(df) elif option == "Route2": df = pd.read_csv("./data/df2out.csv") coords, lat_mean, lon_mean = get_coords(df) elif option == "Route3": df = pd.read_csv("./data/df3out.csv") coords, lat_mean, lon_mean = get_coords(df) elif option == "Route4": df = pd.read_csv("./data/df4out.csv") coords, lat_mean, lon_mean = get_coords(df) elif option == "Route5": df = pd.read_csv("./data/df5out.csv") coords, lat_mean, lon_mean = get_coords(df) figure = make_plot(df) fig_script, fig_div = components(figure) map_options = GMapOptions(lat=lat_mean, lng=lon_mean, map_type="roadmap", zoom=14) #map_options = GMapOptions(lat=51.759, lng=-1.255, map_type="roadmap", zoom=15) google_api = 'AIzaSyBdPAkzHMWahHjT9ml9zGeM6pR3u-VucKc' fig_map = gmap(google_api, map_options, title="Route Taken", plot_height=400, plot_width=500) fig_map.xaxis.axis_label = 'Longitude' fig_map.yaxis.axis_label = 'Latitude' source = ColumnDataSource(coords) fig_map.circle(x="lon", y="lat", size=5, fill_color="blue", fill_alpha=0.8, source=source) # show(fig_map) script2, div2 = components(fig_map) #figure = make_plot() #fig_script, fig_div = components(figure) return render_template("location2.html.j2", option=option, route_n=route_n, fig_script=fig_script, fig_div=fig_div, bkversion=bokeh.__version__, df=df, script2=script2, div2=div2)
def bokehplot(): figure = make_plot() fig_script, fig_div = components(figure) template=render_template( "bokeh.html.j2", fig_script=fig_script, fig_div=fig_div, bkversion=bokeh.__version__, ) response_object = { 'state':'success', 'message':'dataframe plot successfully processed', 'data': [{ 'data': df.to_json(), 'plot_html':template }] } return jsonify(response_object)
results_path.joinpath("trial_accuracy_means.csv"))\ .groupby('model').get_group('exp')\ .sort('trial') trials = data['trial'] acc = data['median'] * 100 fig, ax = plt.subplots() ax.plot(trials, acc, 'k.') ax.set_xlim(1, 200) ax.set_ylim(70, 100) ax.set_xlabel("Trial", fontsize=14) ax.set_ylabel("Percent correct", fontsize=14) util.clear_right(ax) util.clear_top(ax) util.outward_ticks(ax) fig.set_figheight(3.5) fig.set_figwidth(4.5) plt.draw() plt.tight_layout() pths = [fig_path.joinpath("trial_accuracy.%s" % ext) for ext in ('png', 'pdf')] for pth in pths: util.save(pth, close=False) return pths if __name__ == "__main__": util.make_plot(plot)
def main(device=torch.device('cuda:0')): # CLI arguments parser = arg.ArgumentParser( description='We all know what we are doing. Fighting!') parser.add_argument("--datasize", "-d", default="small", type=str, help="data size you want to use, small, medium, total") # Parsing args = parser.parse_args() # Data loaders datasize = args.datasize pathname = "data/nyu.zip" tr_loader, va_loader, te_loader = getTrainingValidationTestingData( datasize, pathname, batch_size=config("unet.batch_size")) # Model model = Net() # TODO: define loss function, and optimizer learning_rate = util.config("unet.learning_rate") criterion = DepthLoss(0.1) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) number_of_epoches = 10 # # print("Number of float-valued parameters:", util.count_parameters(model)) # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = util.restore_checkpoint( model, util.config("unet.checkpoint")) # axes = utils.make_training_plot() # Evaluate the randomly initialized model # evaluate_epoch( # axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats # ) # loss = criterion() # initial val loss for early stopping # prev_val_loss = stats[0][1] running_va_loss = [] running_va_acc = [] running_tr_loss = [] running_tr_acc = [] # TODO: define patience for early stopping # patience = 1 # curr_patience = 0 # tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device) acc, loss = util.evaluate_model(model, va_loader, device) running_va_acc.append(acc) running_va_loss.append(loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) # Loop over the entire dataset multiple times # for epoch in range(start_epoch, config('cnn.num_epochs')): epoch = start_epoch # while curr_patience < patience: while epoch < number_of_epoches: # Train model util.train_epoch(tr_loader, model, criterion, optimizer, device) tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device) va_acc, va_loss = util.evaluate_model(model, va_loader, device) running_va_acc.append(va_acc) running_va_loss.append(va_loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) # Evaluate model # evaluate_epoch( # axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats # ) # Save model parameters util.save_checkpoint(model, epoch + 1, util.config("unet.checkpoint"), stats) # update early stopping parameters """ curr_patience, prev_val_loss = early_stopping( stats, curr_patience, prev_val_loss ) """ epoch += 1 print("Finished Training") # Save figure and keep plot open # utils.save_training_plot() # utils.hold_training_plot() util.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)
def main(device=torch.device('cuda:0')): """Train CNN and show training plots.""" # Data loaders """ if check_for_augmented_data("./data"): tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders( task="target", batch_size=config("cnn.batch_size"), augment=True ) else: tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders( task="target", batch_size=config("cnn.batch_size"), ) """ # pathname = "data/nyu_depth.zip" pathname = "data/nyu_small.zip" tr_loader, va_loader, te_loader = getTrainingValidationTestingData(pathname, batch_size=util.config("unet.batch_size")) # Model model = Net() # TODO: define loss function, and optimizer learning_rate = util.config("unet.learning_rate") criterion = DepthLoss(0.1) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) number_of_epoches = 10 # # print("Number of float-valued parameters:", util.count_parameters(model)) # Attempts to restore the latest checkpoint if exists print("Loading unet...") model, start_epoch, stats = util.restore_checkpoint(model, util.config("unet.checkpoint")) # axes = utils.make_training_plot() # Evaluate the randomly initialized model # evaluate_epoch( # axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats # ) # loss = criterion() # initial val loss for early stopping # prev_val_loss = stats[0][1] running_va_loss = [] running_va_acc = [] running_tr_loss = [] running_tr_acc = [] # TODO: define patience for early stopping # patience = 1 # curr_patience = 0 # tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device) acc, loss = util.evaluate_model(model, va_loader, device) running_va_acc.append(acc) running_va_loss.append(loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) # Loop over the entire dataset multiple times # for epoch in range(start_epoch, config('cnn.num_epochs')): epoch = start_epoch # while curr_patience < patience: while epoch < number_of_epoches: # Train model util.train_epoch(tr_loader, model, criterion, optimizer) tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device) va_acc, va_loss = util.evaluate_model(model, va_loader, device) running_va_acc.append(va_acc) running_va_loss.append(va_loss) running_tr_acc.append(tr_acc) running_tr_loss.append(tr_loss) # Evaluate model # evaluate_epoch( # axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats # ) # Save model parameters util.save_checkpoint(model, epoch + 1, util.config("unet.checkpoint"), stats) # update early stopping parameters """ curr_patience, prev_val_loss = early_stopping( stats, curr_patience, prev_val_loss ) """ epoch += 1 print("Finished Training") # Save figure and keep plot open # utils.save_training_plot() # utils.hold_training_plot() util.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)