def start_test_function(dfid, algorithms, options): if dfid is not None and algorithms is not None: res = test( dfid, algorithms, options ) #task, s1, s2, pipelines s1 = acc or rmse, s2 = f1 or r2 oppure task, dataframe print(res) if isinstance(res[1], pd.DataFrame): first_score = res[1].iloc[:1] second_score = res[1].iloc[1:] bars = {'first': [], 'second': []} titles = [] if (res[0] == 'classification'): titles = ['Accuracy Score:', 'F1 Score:'] else: titles = ['RMSE Score:', 'R2 Score:'] for col in first_score: bars['first'].append( go.Bar(y=first_score[col], name=col.split('-') [0])) #attenzione alla x, ora l'ho rimossa bars['second'].append( go.Bar(y=second_score[col], name=col.split('-')[0])) return [ html.Div([ html.H2('Test Results form DataFrame ' + str(dfid)), html.H4(titles[0]), dbc.Table.from_dataframe(first_score, striped=True, bordered=True, hover=True), html.H4(titles[1]), dbc.Table.from_dataframe(second_score, striped=True, bordered=True, hover=True), html.Div( dbc.Row([ dbc.Col( dcc.Graph(figure=go.Figure( data=bars['first'], layout=go.Layout(xaxis=dict( title='Datasets'), yaxis=dict( title=titles[0]))))), dbc.Col( dcc.Graph(figure=go.Figure( data=bars['second'], layout=go.Layout(xaxis=dict( title='Datasets'), yaxis=dict( title=titles[1]))))), ], align="center")) ]) ] else: if res[0] is None: return [html.P(res[1], style={'color': 'red'})] else: s1, s2, pipeline = res[1] if (res[0] == 'classification'): text = 'Accuracy: ' + str(s1) + ' f1_score: ' + str(s2) else: text = 'RMSE: ' + str(s1) + ' r2_score: ' + str(s2) return [ html.Div([ html.P('Dataframe results ' + str(dfid) + " by using the algorithm: " + str(algorithms) + " with running time: " + str(options[algorithms]['time']) + " " + str(options[algorithms]['type'])), html.P(text), html.P(pipeline) ]) ] else: raise PreventUpdate
def main(): if len(sys.argv) > 1: args = parse_args() print('------ Experiments Parameters ------') for k, v in args.__dict__.items(): print(k, ':', v) else: print('Please provide some parameters for experiments') sys.exit() #create loader train_loader, val_loader, test_loader = load_GTSRB(args.train_dir, args.test_dir) #choose model if args.model == 'lenet': model = lenet().to(args.device) else: model = models.resnet18().to(args.device) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, args.num_classes) model = model.to(args.device) #set optimizer opt = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) #set scheduler scheduler = lr_scheduler.StepLR(opt, step_size=args.step, gamma=args.lr_decay) #plain if args.binaly == False: if args.hist == True: save_train_file = 'hist_train_results.json' save_test_file = 'hist_test_results.json' save_log_file = 'hist_log.csv' else: save_train_file = 'train_results.json' save_test_file = 'test_results.json' save_log_file = 'log.csv' #train & validation logs = [] for epoch in range(args.epochs): print('Epoch[{}/{}]'.format(epoch + 1, args.epochs)) train_acc, train_loss = train(model=model, loader=train_loader, opt=opt, binaly=args.binaly) val_acc, val_loss = validation(model=model, loader=val_loader, scheduler=scheduler, binaly=args.binaly) log = { 'epoch': epoch, 'train_loss': train_loss, 'validation_loss': val_loss, 'train_acc': train_acc, 'validation_acc': val_acc } logs.append(log) scheduler.step() results = {} results['train_acc'] = train_acc results['train_loss'] = train_loss results['val_acc'] = val_acc results['val_loss'] = val_loss df = pd.DataFrame(logs) df.to_csv('./log/' + save_log_file, index=False) with open(os.path.join(args.save_dir, args.model, save_train_file), 'w') as f: json.dump(results, f, indent=4) #test model.load_state_dict( torch.load( os.path.join(args.model_dir, args.model, 'trained_model'))) test_acc, _ = test(model=model, loader=test_loader, binaly=args.binaly) results = {} results['test_acc'] = test_acc with open(os.path.join(args.save_dir, args.model, save_test_file), 'w') as f: json.dump(results, f, indent=4) #binaly else: if args.hist == True: save_train_file = 'hist_binaly_train_results.json' save_test_file = 'hist_binaly_test_results.json' save_log_file = 'hist_binaly_log.csv' else: save_train_file = 'binaly_train_results.json' save_test_file = 'binaly_test_results.json' save_log_file = 'binaly_log.csv' #train & validation_binaly logs = [] for epoch in range(args.epochs): print('Epoch[{}/{}]'.format(epoch + 1, args.epochs)) train_acc, train_loss = train(model=model, loader=train_loader, opt=opt, binaly=args.binaly) val_acc, val_loss = validation(model=model, loader=val_loader, scheduler=scheduler, binaly=args.binaly) log = { 'epoch': epoch, 'train_loss': train_loss, 'validation_loss': val_loss, 'train_acc': train_acc, 'validation_acc': val_acc } logs.append(log) results_binaly = {} results_binaly['train_binaly_acc'] = train_acc results_binaly['train_binaly_loss'] = train_loss results_binaly['val_binaly_acc'] = val_acc results_binaly['val_binaly_loss'] = val_loss df = pd.DataFrame(logs) df.to_csv('./log/' + save_log_file, index=False) with open(os.path.join(args.save_dir, args.model, save_train_file), 'w') as f: json.dump(results_binaly, f, indent=4) #test_binaly model.load_state_dict( torch.load( os.path.join(args.model_dir, args.model, 'binaly_trained_model'))) test_acc, _ = test(model=model, loader=test_loader, binaly=args.binaly) results_binaly = {} results_binaly['test_binaly_acc'] = test_acc with open(os.path.join(args.save_dir, args.model, save_test_file), 'w') as f: json.dump(results_binaly, f, indent=4)
def main(args): print(vars(args)) print('model is', args.model) mat2npy(save_dir=NPY_PATH) prepare_local_feature(patch_size=args.patch_size, num_nearest_neurons=args.patch_nearest_num, head_neuron=args.head_neuron, tail_neuron=args.tail_neuron, data_root=NPY_PATH, save_root=FEATURE_PATH) prepare_hist_pairwise_feature(num_bins=args.bin_num, num_edge=args.edge_num, head_neuron=args.head_neuron, tail_neuron=args.tail_neuron, file_root=NPY_PATH, save_root=FEATURE_PATH) train_loader = torch.utils.data.DataLoader(MyDataSet( root=FEATURE_PATH, mode='train', patch_size=args.patch_size, num_bins=args.bin_num, num_edges=args.edge_num, patch_nearest=args.patch_nearest_num), batch_size=args.batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader(MyDataSet( root=FEATURE_PATH, mode='val', patch_size=args.patch_size, num_bins=args.bin_num, num_edges=args.edge_num, patch_nearest=args.patch_nearest_num), batch_size=args.batch_size, shuffle=True) model = ClassifierNetwork(bin_num=args.bin_num, edge_num=args.edge_num).cuda(args.gpu) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) criterion = nn.CrossEntropyLoss().cuda(args.gpu) train_logger_path = './logs/{}_bins_{}_edges_{}_lr_{}_wd{}/train'.format( args.model, args.bin_num, args.edge_num, args.lr, args.weight_decay) val_logger_path = './logs/{}_bins_{}_edges_{}_lr_{}_wd_{}/val'.format( args.model, args.bin_num, args.edge_num, args.lr, args.weight_decay) if os.path.exists(train_logger_path): shutil.rmtree(train_logger_path) if os.path.exists(val_logger_path): shutil.rmtree(val_logger_path) logger_train = Logger(train_logger_path) logger_val = Logger(val_logger_path) best_acc = 0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) best_acc = checkpoint['best_acc'] optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) for epoch in range(args.start_epoch, args.epochs): # print('learing rate is {}\n'.format(optimizer.param_groups[0]['lr'])) # train for one epoch train(train_loader, model, criterion, optimizer, epoch, logger_train, args) # evaluate for one epoch val_loss, val_acc = validate(val_loader, model, criterion, epoch, logger_val, args) # adjust for proper learning rate # scheduler.step(val_loss) # save checkpoint model_save_dir = './trained_model/' if not os.path.exists(model_save_dir): os.mkdir(model_save_dir) model_save_path = os.path.join( model_save_dir, '{}_bins_{}_edges_{}_epochs_{}.pth.tar'.format( args.model, args.bin_num, args.edge_num, args.epochs)) # save best_acc checkpoint is_best = val_acc > best_acc best_acc = max(val_acc, best_acc) if is_best: best_epoch = epoch save_checkpoint( { 'epoch': epoch + 1, 'best_acc': best_acc, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() }, model_save_path) print('best top1 accuracy is', best_acc) print('best top1 accuracy epoch is', best_epoch) print('-' * 20) print('Testing') test(args=args, model_save_dir='./trained_model')
def main(): args = sys.argv train_loader, test_loader = load_cifar10(batch_size=batch_size) #train if args[1] == 'train1': train_errs, train_losses = [], [] model = resnet20().to(device) opt = optim.SGD(model.parameters(), lr=1e-1, momentum=0.9, weight_decay=0.0005) train_err, train_loss = train(epochs=epochs, model=model, loader=train_loader, opt=opt) results = {} results['train_error'] = train_err results['train_loss'] = train_loss #train 1-8mix if args[1] == 'train2': train_errs, train_losses = [], [] model = resnet20().to(device) opt = optim.SGD(model.parameters(), lr=1e-1, momentum=0.9, weight_decay=0.0005) train_err, train_loss = train_1_8bit(epochs=epochs, model=model, loader=train_loader, opt=opt) results = {} results['train_error'] = train_err results['train_loss'] = train_loss #test if args[2] == 'test': test_errs = [] model = resnet20().to(device) model.load_state_dict(torch.load(path + 'pixel_quantized_model')) model.eval() test_err, _ = test(model=model, loader=test_loader) results = {} results['test_error'] = test_err with open(save_path, 'w') as f: json.dump(results, f, indent=4) #1-8bit test if args[2] == 'test_1_8': test_errs = [] model = resnet20().to(device) model.load_state_dict(torch.load(path + 'pixel_quantized_model')) model.eval() for i in range(1, 9): test_err, _ = test_1_8bit(model=model, loader=test_loader, bitdepth=i) test_errs.append(test_err) results = {} results['test_errors'] = test_errs with open(save_path_1_8, 'w') as f: json.dump(results, f, indent=4) #1-8bit train, pixel_quantize test if args[2] == 'test3': test_errs = [] model = resnet20().to(device) for i in range(1, 9): model.load_state_dict( torch.load(dither_path + '{}bit_dither'.format(i))) model.eval() test_err, _ = test(model=model, loader=test_loader) test_errs.append(test_err) results = {} results['test_errors'] = test_errs with open(save_path_3, 'w') as f: json.dump(results, f, indent=4)
def main(): args = sys.argv train_loader, test_loader = load_GTSRB(root_dir=root_dir, batch_size=batch_size, test_split=test_split) #plain if args[1] != 'binaly': binaly = False #train model = lenet().to(device) num_ftrs = model.fc3.in_features model.fc3 = nn.Linear(num_ftrs, num_classes) model = model.to(device) opt = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-6) train_err, train_loss = train(epochs=epochs, model=model, loader=train_loader, opt=opt, binaly=binaly) results = {} results['train_error'] = train_err results['train_loss'] = train_loss with open(save_path + 'train_results.json', 'w') as f: json.dump(results, f, indent=4) #test model = models.resnet18().to(device) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) model = model.to(device) model.load_state_dict(torch.load(model_path + 'trained_model')) model.eval() test_err, _ = test(model=model, loader=test_loader, binaly=binaly) results = {} results['test_error'] = test_err with open(save_path + 'test_results.json', 'w') as f: json.dump(results, f, indent=4) #binaly else: binaly = True #train_binaly model = models.resnet18().to(device) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) model = model.to(device) opt = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=1e-6) train_err, train_loss = train(epochs=epochs, model=model, loader=train_loader, opt=opt, binaly=binaly) results_binaly = {} results_binaly['train_binaly_error'] = train_err results_binaly['train_binaly_loss'] = train_loss with open(save_path + 'binaly_train_results.json', 'w') as f: json.dump(results_binaly, f, indent=4) #test_binaly model = models.resnet18().to(device) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) model = model.to(device) model.load_state_dict(torch.load(model_path + 'binaly_trained_model')) model.eval() test_err, _ = test(model=model, loader=test_loader, binaly=binaly) results_binaly = {} results_binaly['test_binaly_error'] = test_err with open(save_path + 'binaly_test_results.json', 'w') as f: json.dump(results_binaly, f, indent=4)