def test(self): self.m1.hide() self.m2.hide() self.m3.hide() self.m4.hide() self.m5.hide() self.m6.hide() test() m1 = QLabel(self) m1.setPixmap(QPixmap('estimated_layout.png').scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) m1.adjustSize() m1.move(0,70) m2 = QLabel(self) m2.setPixmap(QPixmap('real_layout.png').scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) m2.adjustSize() m2.move(0,520) m3 = QLabel(self) m3.setPixmap(QPixmap('estimated_layout2.png').scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) m3.adjustSize() m3.move(450,70) m4 = QLabel(self) m4.setPixmap(QPixmap('real_layout2.png').scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) m4.adjustSize() m4.move(450,520) m5 = QLabel(self) m5.setPixmap(QPixmap('estimated_layout3.png').scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) m5.adjustSize() m5.move(950,70) m6 = QLabel(self) m6.setPixmap(QPixmap('real_layout3.png').scaled(500, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) m6.adjustSize() m6.move(950,520) self.m1 = m1 self.m2 = m2 self.m3 = m3 self.m4 = m4 self.m5 = m5 self.m6 = m6 self.m1.show() self.m2.show() self.m3.show() self.m4.show() self.m5.show() self.m6.show()
def main(): # Training settings parser = argparse.ArgumentParser(description='Models of change detection') parser.add_argument('--image-set', type=str, default='A', metavar='I', help='image set to train on: A, B, C, D (default: A)') parser.add_argument('--model', type=str, default='STPNet', metavar='M', help='model to train: STPNet, RNN, or STPRNN (default: STPNet)') parser.add_argument('--model-path', type=str, default='', help='path to saved model') parser.add_argument('--noise-std', type=float, default=0.0, metavar='N', help='standard deviation of noise (default: 0.0)') parser.add_argument('--syn-tau', type=float, default=6.0, metavar='N', help='STPNet recovery time constant (default: 6.0)') parser.add_argument('--hidden-dim', type=int, default=16, metavar='N', help='hidden dimension of model (default: 16)') parser.add_argument('--seq-length', type=int, default=50000, metavar='N', help='length of each trial (default: 50000)') parser.add_argument('--delay-dur', type=int, default=500, metavar='N', help='delay duration (default: 500 ms)') parser.add_argument('--batch-size', type=int, default=128, metavar='N', help='number of test trial batches (default: 128)') parser.add_argument('--omit-frac', type=float, default=0.0, metavar='O', help='fraction of omitted flashes (default: 0.0)') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # Set random seed np.random.seed(args.seed) torch.manual_seed(args.seed) # Create test stimulus generator test_generator = StimGenerator(image_set=args.image_set, seed=args.seed, batch_size=args.batch_size, seq_length=args.seq_length, delay_dur=args.delay_dur, omit_frac=args.omit_frac) # Get input dimension of feature vector input_dim = len(test_generator.feature_dict[0]) # Create model if args.model == 'STPNet': model = STPNet(input_dim=input_dim, hidden_dim=args.hidden_dim, syn_tau=args.syn_tau, noise_std=args.noise_std).to(device) elif args.model == 'STPRNN': model = STPRNN(input_dim=input_dim, hidden_dim=args.hidden_dim, syn_tau=args.syn_tau, noise_std=args.noise_std).to(device) elif args.model == 'RNN': model = OptimizedRNN(input_dim=input_dim, hidden_dim=args.hidden_dim, noise_std=args.noise_std).to(device) else: raise ValueError("Model not found") # Load saved parameters model.load_state_dict(torch.load(args.model_path)['state_dict']) # Test model dprime, hr, far, input, hidden, output, pred, image, labels, omit = test( args, device, test_generator, model) # Save results results_dict = {} results_dict['dprime'] = dprime results_dict['hr'] = hr results_dict['far'] = far results_dict['input'] = input.cpu().numpy() results_dict['hidden'] = hidden.cpu().numpy() results_dict['output'] = output.cpu().numpy() results_dict['pred'] = pred.cpu().numpy() results_dict['image'] = image results_dict['labels'] = labels.cpu().numpy() # Compute confusion matrix response_matrix, total_matrix, confusion_matrix = compute_confusion_matrix(test_generator.num_images, labels, image, pred, test_generator.image_steps+test_generator.delay_steps) results_dict['response_matrix'] = response_matrix results_dict['total_matrix'] = total_matrix results_dict['confusion_matrix'] = confusion_matrix # Compute omitted flash results if args.omit_frac > 0: shift = 3 results_dict['omit'] = omit all_flashes = np.where( (labels.cpu().numpy().squeeze() == 0) & (image != 8) & (omit == 0)) omitted_flashes = np.where(omit) post_omitted_flashes = np.where( np.pad(omit, ((0, 0), (shift, 0)), mode='constant')[:, :-shift]) results_dict['all_flashes'] = (pred[all_flashes[0], all_flashes[1]].sum().float() / len(all_flashes[0])).item() results_dict['omitted_flashes'] = (pred[omitted_flashes[0], omitted_flashes[1]].sum().float() / len(omitted_flashes[0])).item() results_dict['post_omitted_flashes'] = (pred[post_omitted_flashes[0], post_omitted_flashes[1]].sum().float() / len(post_omitted_flashes[0])).item() import pickle save_path = './RESULT/'+args.model if not os.path.exists(save_path): os.makedirs(save_path) pickle.dump(results_dict, open(os.path.join(save_path, "_".join( [args.model, args.image_set, str(args.seed)])+'.pkl'), 'wb'), protocol=2)
parser.add_argument('--output', action = 'store', dest = 'output', type=int, help = 'number of output classes') parser.add_argument('--print_every', action = 'store', default=20, dest = 'print_every', type=int, help ='print losses and accuracy in every the number of batches specified here') parser.add_argument('--train', action = 'store_true', dest = 'train', default=False, help = 'set true to start training and false if you do not want') parser.add_argument('--save_dir', action = 'store', dest = 'save', default = None, help = 'set the checkpoint file name') results = parser.parse_args() if results.train == True: #load data into image loaders (train, test, validation) trainloader, testloader, validloader, classes = utilities.load_transform(results.datadir) #load the classes into training process if exist model, optimizer = utilities.network(results.pre_model, results.hidden_units, results.output, results.lrate, results.gpu) utilities.train(trainloader, validloader, model, optimizer, results.epochs, results.print_every, results.gpu) utilities.test(model, testloader, results.gpu) saved_dict = {'arch' : results.pre_model, 'hidden_units' : results.hidden_units, 'class_labels' : classes, 'epochs': results.epochs, 'learning_rate': results.lrate, 'output_classes' : results.output, 'model_state_dict' : model.state_dict(), 'optimizer' : optimizer.state_dict()} utilities.save_checkpoint(saved_dict, results.save) print("the checkpoint was saved in {}".format(results.save)) ''' The average validation accuracy: 87.019 Accuracy of the network on the test images: 83.57 %
alpha=a, log_epoch=2) saver_path = os.path.join( saver_dir_models, 'checkpoint_{}_epochs_{}_alpha_{}'.format( type(i).__name__, epochs, a)) torch.cuda.empty_cache() f = open(saver_path, "wb") pickle.dump(i, f) f.close metric_val, metric_test = test(data_val, data_test, i, output_val, output_test, sensitive_val, sensitive_test, threshold, model_AIF, k, dataloader_val, dataloader_test, prot, un_gr, pr_gr) for column, _ in enumerate(columns): if column == 0: name = type(i).__name__ sheets.write(row, column, name) elif column > 0 and column < 9: sheets.write(row, column, metric_val[column - 1]) elif column == len(columns) - 1: sheets.write(row, column, a) else: sheets.write(row, column, metric_test[column - 9]) wb.save(saver_dir_results)