Exemplo n.º 1
0
def main(args):
    config = getconfig(args)
    config['testdir'] = args.testdir
    print("loading data.")
    vocab, c2i, i2c = get_vocab_from_file(args.testdir + "/vocab.txt")
    print("Vocab size is", len(vocab))
    s_t, e_t, n_t = get_input_data(args.testdir + "/out.txt",
                                   args.testdir + "/out_y.txt",
                                   args.testdir + "/out_names.txt", c2i)
    test_data = ToyDataset(s_t, e_t, n_t)
    print("Done.")

    ## make data generator
    test_dataloader = torch.utils.data.DataLoader(
        test_data,
        pin_memory=True,
        batch_size=config['batch_size'],
        collate_fn=mycollate,
        num_workers=0,
        shuffle=False)

    model = DockRegressor(config['vocab_size'],
                          config['emb_size'],
                          max_len=config['max_len']).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

    epoch_start = 0
    if args.ct:
        print("Continuing from save.")
        pt = torch.load(args.logdir + "/autosave.model.pt")
        model.load_state_dict(pt['state_dict'])
        optimizer.load_state_dict(pt['optim_state_dict'])

    test_model(model, optimizer, test_dataloader, config)
Exemplo n.º 2
0
def main(config):
    if config['mode'] == 'test':
        phases = ['train', 'test']
    else:
        phases = ['train', 'val']
    dataloaders, ques_vocab, ans_vocab = load_datasets(config, phases)

    # add model parameters to config
    config['model']['params']['vocab_size'] = len(ques_vocab)
    config['model']['params']['output_size'] = len(
        ans_vocab) - 1  # -1 as don't want model to predict '<unk>'
    config['model']['params'][
        'exatract_img_features'] = 'preprocess' in config['data'][
            'images'] and config['data']['images']['preprocess']
    # which features dir? test, train or validate?
    config['model']['params']['features_dir'] = os.path.join(
        config['data']['dir'], config['data']['test']['emb_dir'])
    if config['model']['type'] == 'vqa':
        model = VQAModel(mode=config['mode'], **config['model']['params'])
    elif config['model']['type'] == 'san':
        model = SANModel(mode=config['mode'], **config['model']['params'])
    print(model)
    criterion = nn.CrossEntropyLoss()

    if config['optim']['class'] == 'sgd':
        optimizer = optim.SGD(
            filter(lambda p: p.requires_grad, model.parameters()),
            **config['optim']['params'])
    elif config['optim']['class'] == 'rmsprop':
        optimizer = optim.RMSprop(
            filter(lambda p: p.requires_grad, model.parameters()),
            **config['optim']['params'])
    else:
        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, model.parameters()),
            **config['optim']['params'])

        best_acc = 0
    # Pdb().set_trace()
    startEpoch = 0
    if 'reload' in config['model']:
        pathForTrainedModel = os.path.join(config['save_dir'],
                                           config['model']['reload'])
        if os.path.exists(pathForTrainedModel):
            print("=> loading checkpoint/model found at '{0}'".format(
                pathForTrainedModel))
            checkpoint = torch.load(pathForTrainedModel)
            startEpoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            # optimizer.load_state_dict(checkpoint['optimizer'])
    if config['use_gpu']:
        model = model.cuda()

    print('config mode ', config['mode'])
    save_dir = os.path.join(os.getcwd(), config['save_dir'])

    if config['mode'] == 'train':
        if 'scheduler' in config['optim'] and config['optim'][
                'scheduler'].lower() == 'CustomReduceLROnPlateau'.lower():
            print('CustomReduceLROnPlateau')
            exp_lr_scheduler = CustomReduceLROnPlateau(
                optimizer, config['optim']['scheduler_params']
                ['maxPatienceToStopTraining'],
                config['optim']['scheduler_params']['base_class_params'])
        else:
            # Decay LR by a factor of gamma every step_size epochs
            print('lr_scheduler.StepLR')
            exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                                   step_size=7,
                                                   gamma=0.1)

        print("begin training")
        model = train_model(model,
                            dataloaders,
                            criterion,
                            optimizer,
                            exp_lr_scheduler,
                            save_dir,
                            num_epochs=config['optim']['n_epochs'],
                            use_gpu=config['use_gpu'],
                            best_accuracy=best_acc,
                            start_epoch=startEpoch)
    elif config['mode'] == 'test':
        outputfile = os.path.join(save_dir, config['mode'] + ".json")
        test_model(model,
                   dataloaders['test'],
                   VQADataset.ans_vocab,
                   outputfile,
                   use_gpu=config['use_gpu'])
    else:
        print("Invalid config mode %s !!" % config['mode'])
Exemplo n.º 3
0
# Set model and lod weights
model = UNet3D(3, 3).to(device)
model.load_state_dict(torch.load(MODEL))

# Set optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
#optimizer = torch.optim.Adam(model.parameters())

# Set criterion
criterion = LossFunction().to(device)

val_loss = []
# Iterate over videos.
for video_step, video_loader in enumerate(val_loader):
    # Iterate over frames.
    for sample_step, sample in enumerate(video_loader):

        # Send data to device
        y, x = sample['y'].to(device), sample['x'].to(device)

        # Test model with sample
        loss = test_model(model, {'x': x, 'y': y}, criterion, optimizer)
        test_loss.append(loss)

        if sample_step == 0:
            #log.log_images(x, y,'<PATH>/{}_'.format(n_samples))

# Logs after test
log.log_time('Total Loss: {:.6f}\tAvg Loss: {:.6f}'
             .format(np.sum(test_loss), np.average(test_loss)))
        if args.optimizer == 'adam':
            optimizer = optim.Adam(model.parameters(), lr=args.lr)
        elif args.optimizer == 'sgd':
            optimizer = optim.SGD(model.parameters(), lr=args.lr)
        elif args.optimizer == 'sgdmom':
            optimizer = optim.SGD(model.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum)
        else:
            optimizer = optim.SGD(model.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum,
                                  weight_decay=0.0005)

        # Decay LR by a factor of gamma every step_size epochs
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                               step_size=args.step_size,
                                               gamma=1)

        # TODO: Store the parameters and use them to initialise next time.
        model = train_model(model,
                            dataloaders,
                            criterion,
                            optimizer,
                            exp_lr_scheduler,
                            args.save_dir,
                            num_epochs=args.epochs,
                            use_gpu=args.use_gpu)

    test_model(model, dataloaders['test'], use_gpu=args.use_gpu)
Exemplo n.º 5
0
def main():
    print(torch.__version__)
    # برای تقسیم داده ها به آموزش و تست به صورت خودکار
    # این تابع درصدی از داده های آموزش را ضمن حفظ اسامی پوشه ها، به پوشه تست منتقل می کند 
    if(not os.path.exists(vars.val_dir)):
        utils.create_validation_data(vars.train_dir, vars.val_dir, vars.val_split_ratio, 'jpg')


    def handler(signum, frame):
        print('Signal handler called with signal', signum)
        print('Training will finish after this epoch')
        vars.stop_training = True
        #raise OSError("Couldn't open vars.device!")

    signal.signal(signal.SIGINT, handler) # only in python version >= 3.2

    print("Start Time: ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    print("Active Mode: " + vars.mode)
    plt.ion()   # interactive mode
    ######################################################################
    # Load Data
    # Data augmentation and normalization for training
    # Just normalization for validation

    data_transforms = {
        'train': transforms.Compose([
            transforms.Resize([vars.input_size, vars.input_size]),
            #transforms.ColorJitter(0.1, 0.1, 0.1, 0.01),
            #transforms.RandomRotation(5),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val': transforms.Compose([
            transforms.Resize([vars.input_size, vars.input_size]),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'test': transforms.Compose([
            transforms.Resize([vars.input_size, vars.input_size]),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }


    # image_dataset_train = {'train': datasets.ImageFolder(os.path.join(vars.data_dir, 'train'), data_transforms['train'])}
    # image_dataset_test = {'val': datasets.ImageFolder(os.path.join(vars.data_dir, 'val'), data_transforms['val'])}
    # image_dataset_train.update(image_dataset_test)
    # خط پایین با سه خط بالا برابری می کند!
    image_datasets = {x: datasets.ImageFolder(os.path.join(vars.data_dir, x), data_transforms[x])
                    for x in ['train', 'val']}

    vars.dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=vars.batch_size, shuffle=True, num_workers=0)
                for x in ['train', 'val']}

    vars.dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
    vars.class_names = image_datasets['train'].classes

    # Get a batch of training data
    inputs, classes = next(iter(vars.dataloaders['train']))

    # Make a grid from batch
    out = torchvision.utils.make_grid(inputs)

    #utils.imshow(out, title=[vars.class_names[x] for x in classes])


    ######################################################################
    # Finetuning the convnet
    # Load a pretrained model and reset final fully connected layer.

    ##\\//\\//model, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
    if(vars.model_name.find('vgg') != -1):
        model = models.vgg11_bn(pretrained=vars.pre_trained)
        num_ftrs = model.classifier[6].in_features
        model.classifier[6] = nn.Linear(num_ftrs, len(vars.class_names))
    elif(vars.model_name == 'resnet152'):
        model = models.resnet152(pretrained=vars.pre_trained)	
    elif(vars.model_name == 'resnet50'):
        model = models.resnet50(pretrained=vars.pre_trained)
    elif(vars.model_name == 'resnet18'):
        model = models.resnet18(pretrained=vars.pre_trained)
    elif(vars.model_name == 'googlenet'):
        model = models.googlenet(pretrained=vars.pre_trained)
    elif(vars.model_name == 'darknet53'):
        model = darknet.darknet53(1000)
        optimizer = optim.SGD(model.parameters(), lr = vars.learning_rate, momentum=0.9)
        if(vars.pre_trained):
            #model.load_state_dict( torch.load('D:\\Projects\\_Python\\car Detection\\model_best.pth.tar') )	
            checkpoint = torch.load('D:\\Projects\\_Python\\car Detection\\model_best.pth.tar')
            start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if vars.device.type == 'cuda':
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(vars.device)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
    elif(vars.model_name == 'car3conv'):
        model = mymodels.car3conv()
    elif(vars.model_name == 'car5conv'):
        model = mymodels.car5conv()	
    elif(vars.model_name == 'car2conv'):
        model = mymodels.car2conv()		
    elif(vars.model_name == 'mymodel'):
        model = mymodels.MyModel()
    elif(vars.model_name == 'mymodel2'):
        model = mymodels.MyModel2()
    else:
        return hogmain.main(None)
        

    if(vars.model_name != 'vgg11'):
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, len(vars.class_names))

    model.to(vars.device)

    if(vars.mode == 'test'):#test
        #model.load_state_dict(torch.load("D:\\Projects\\Python\\Zeitoon Detection\"))
        model.load_state_dict(torch.load(vars.test_model))

        # log_dir = '{}-{}-{}-batch-{}'.format(vars.model_name, 'SGD', 'cuda', vars.batch_size)

        # if(vars.pre_trained):
        #	 log_dir = log_dir + '-pretrained'
        # if not os.path.exists(log_dir):
        #	 os.mkdir(log_dir)

        log_file = open(".\\Time-{}-{}.log".format(vars.model_name, vars.batch_size),"w")	
        for dev in ['cuda', 'cpu']:
            vars.device = torch.device(dev)
            model = model.to(vars.device)
            #run model on one batch to allocate required memory on device (and have more exact results)
            inputs = inputs.to(vars.device)
            outputs = model(inputs)

            s = test_model(model, vars.criterion, 'val', 100)
            log_file.write(s)
            #log_file.write('\n' + '-'*80)
        
        log_file.write(summary(model, input_size=(3, vars.input_size, vars.input_size), batch_size=-1, device=vars.device.type))
        log_file.close() 


        print(summary(model, input_size=(3, vars.input_size, vars.input_size), batch_size=-1, device=vars.device.type))
    else:
        print(summary(model, input_size=(3, vars.input_size, vars.input_size), batch_size=-1, device=vars.device.type))

        #model.load_state_dict(torch.load("C:\\Projects\\Python\\Car Detection\\darknet53-SGD-cuda-batch-32\\ep7-acc97.83-loss0.0667.pth"))

        optimizer = optim.SGD(model.parameters(), lr = vars.learning_rate, momentum=0.9)
        #optimizer = optim.Adam(model.parameters(), lr=0.05)
        # Decay LR by a factor of 0.6 every 6 epochs
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size = vars.scheduler_step_size, gamma = vars.scheduler_gamma)
        model = model.to(vars.device)
        model = train_model(model, vars.criterion, optimizer, exp_lr_scheduler, vars.num_epochs)
        visualize_model(model)
        plt.ioff()
        plt.show()
Exemplo n.º 6
0
def main(args):
	global network
	train_set = utils.get_dataset(vars.train_dir) #Load dataset
	val_set = utils.get_dataset(vars.val_dir) 

	# Get a list of image paths and their labels
	train_img_list, train_labels = utils.get_image_paths_and_labels(train_set)
	assert len(train_img_list)>0, 'The training set should not be empty'

	val_img_list, val_labels = utils.get_image_paths_and_labels(val_set)

	#utils.augment_images(train_img_list, 4) #it only must be called one time to generate several images from single image (don't forget to set validation_set_split_ratio = 0)
	
	if(os.path.exists('train_descs.npy')):
		train_descs	= np.load('train_descs.npy')
	else:
		train_descs = hogutils.get_hog_desc(train_img_list, False)
		np.save('train_descs.npy', train_descs)

	if(os.path.exists('val_descs.npy')):
		val_descs	= np.load('val_descs.npy')
	else:
		val_descs = hogutils.get_hog_desc(val_img_list, False)
		np.save('val_descs.npy', val_descs)

	train_labels = np.array(train_labels, dtype=np.int64)
	val_labels = np.array(val_labels, dtype=np.int64)
	# Shuffle data
	rand = np.random.RandomState(10)
	shuffle = rand.permutation(len(train_labels))	
	train_descs, train_labels = train_descs[shuffle], train_labels[shuffle]

	##############################################################################
	if(vars.model_name == 'mlp-torch'):
		model = torch.nn.Sequential(
				torch.nn.Linear(2025, 128),
				torch.nn.ReLU(),
				torch.nn.Linear(128, 64),
				torch.nn.ReLU(),
				torch.nn.Linear(64, 5),
			)

		train_dataset = Data.TensorDataset(torch.from_numpy(train_descs), torch.from_numpy(train_labels))
		val_dataset = Data.TensorDataset(torch.from_numpy(val_descs), torch.from_numpy(val_labels))
		datasets = {'train': train_dataset, 'val': val_dataset}

		vars.dataloaders = {x: Data.DataLoader(datasets[x], batch_size=vars.batch_size, shuffle=True, num_workers=0)
				for x in ['train', 'val']}

		vars.dataset_sizes = {x: len(datasets[x]) for x in ['train', 'val']}
		#vars.class_names = datasets['train'].classes

		optimizer = optim.SGD(model.parameters(), lr = vars.learning_rate, momentum=0.9)
		#optimizer = optim.Adam(model.parameters(), lr=0.05)
		# Decay LR by a factor of 0.6 every 6 epochs
		exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size = vars.scheduler_step_size, gamma = vars.scheduler_gamma)
		model = model.to(vars.device)
		model = train_model(model, vars.criterion, optimizer, exp_lr_scheduler, vars.num_epochs)

		log_file = open(".\\Time-{}-{}.log".format(vars.model_name, vars.batch_size),"w")	
		for dev in ['cuda', 'cpu']:
			vars.device = torch.device(dev)
			model = model.to(vars.device)
			#run model on one batch to allocate required memory on device (and have more exact results)
			inputs, classes = next(iter(vars.dataloaders['train']))
			inputs = inputs.to(vars.device)
			outputs = model(inputs)

			s = test_model(model, vars.criterion, 'val', 100)
			log_file.write(s)
			#log_file.write('\n' + '-'*80)
		
		#log_file.write(summary(model, input_size=(3, vars.input_size, vars.input_size), batch_size=-1, device=vars.device.type))
		log_file.close() 
	elif (vars.model_name == 'svm'):
		print('Training SVM model ...')
		model = svmInit()
		svmTrain(model, train_descs, train_labels)
		model.save('svm_model.xml')
		print('Evaluating model ... ')
		svmEvaluate(model, None, train_descs, train_labels)
		t0 = time.time()
		svmEvaluate(model, None, val_descs, val_labels)
		time_elapsed = time.time()-t0
		print('Test completed over {} samples in {:.2f}s'.format(len(train_labels), time_elapsed))
		print('Test time per sample {:.3f}ms'.format(time_elapsed * 1000 / len(train_labels)))
	elif (vars.model_name == 'knn'):
		print('Training KNN model ...')
		model = cv2.ml.KNearest_create()
		model.setDefaultK(5)
		model.setIsClassifier(True)
		model.train(train_descs, cv2.ml.ROW_SAMPLE, train_labels)
		model.save('knn.xml')
		print('Evaluating model ... ')
		svmEvaluate(model, None, train_descs, train_labels)
		t0 = time.time()
		svmEvaluate(model, None, val_descs, val_labels)
		time_elapsed = time.time()-t0
		print('Test completed over {} samples in {:.2f}s'.format(len(train_labels), time_elapsed))
		print('Test time per sample {:.3f}ms'.format(time_elapsed * 1000 / len(train_labels)))		
	elif(vars.model_name == 'bayes'):
		print('Training Bayes model ...')
		model = cv2.ml.NormalBayesClassifier_create()
		model.train(train_descs, cv2.ml.ROW_SAMPLE, train_labels)
		model.save('bayes.xml')
		print('Evaluating model ... ')
		svmEvaluate(model, None, train_descs, train_labels)
		t0 = time.time()
		svmEvaluate(model, None, val_descs, val_labels)
		time_elapsed = time.time()-t0
		print('Test completed over {} samples in {:.2f}s'.format(len(train_labels), time_elapsed))
		print('Test time per sample {:.3f}ms'.format(time_elapsed * 1000 / len(train_labels)))

	elif(vars.model_name == 'mlp-keras'):
		train_labels = to_categorical(train_labels)
		if (len(val_labels) > 0):
			val_labels = to_categorical(val_labels)


		network.add(layers.Dense(128, activation='relu', input_shape=(2025,)))
		network.add(layers.Dense(64, activation='relu'))
		network.add(layers.Dense(5, activation='softmax'))
		
		opt = keras.optimizers.SGD(lr=0.05, momentum=0.5, decay=1e-3, nesterov=False)]
			#keras.optimizers.RMSprop(lr=0.001, decay=1e-6)]#
			#keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
			#keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-10, schedule_decay=0.004)

		network.summary()

		network.reset_states()
		network.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])		
		#saves the model weights after each epoch if the validation loss decreased
		now = datetime.now() # current date and time
		checkpointer = ModelCheckpoint(filepath='best_model_' + now.strftime("%Y%m%d") + '.hdf5', verbose=1, save_best_only=True)
		
		manageTrainEvents = ManageTrainEvents()
		history = network.fit(train_descs, train_labels, validation_data=(val_descs, val_labels), 
				epochs=vars.num_epochs, batch_size=vars.batch_size, callbacks=[checkpointer, manageTrainEvents])

		network.save('Rec_' + now.strftime("%Y%m%d-%H%M") + '.hdf5')
		#Plot loss and accuracy
		acc = history.history['acc']
		val_acc = history.history['val_acc']
		loss = history.history['loss']
		val_loss = history.history['val_loss']
		utils.plot_graphs(loss, val_loss, acc, val_acc, True)
		#Evaluate on test dataset
		print("\nComputing test accuracy")
		test_loss, test_acc = network.evaluate(val_descs, val_labels)
		print('test_acc:', test_acc)
Exemplo n.º 7
0
def main(config):
    logfile = join(config['save_dir'], 'log')
    log(config, logfile)
    if config['mode'] == 'test':
        phases = ['test']
    else:
        phases = ['train', 'val']
    dataloaders, review_vocab, summary_vocab = load_datasets(
        config, phases, logfile)

    # Create Model
    model = build_model(config, review_vocab, summary_vocab, logfile)

    if config['mode'] == 'train':
        # Select Optimizer
        if config['optim']['class'] == 'sgd':
            optimizer = optim.SGD(
                filter(lambda p: p.requires_grad, model.parameters()),
                **config['optim']['params'])
        elif config['optim']['class'] == 'rmsprop':
            optimizer = optim.RMSprop(
                filter(lambda p: p.requires_grad, model.parameters()),
                **config['optim']['params'])
        else:
            optimizer = optim.Adam(
                filter(lambda p: p.requires_grad, model.parameters()),
                **config['optim']['params'])
        # Reload model from checkpoint if provided
        model, optimizer, best_fscore, start_epoch = reload(
            config, model, optimizer, logfile)
        log(optimizer, logfile)
        criterion = nn.CrossEntropyLoss()
        patience = config['optim']['scheduler']['patience']
        factor = config['optim']['scheduler']['factor']
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   mode='min',
                                                   patience=patience,
                                                   factor=factor,
                                                   threshold=0.05,
                                                   threshold_mode='rel',
                                                   verbose=True)
        log(scheduler, logfile)
        log("Begin Training...", logfile)
        model = train_model(model,
                            dataloaders,
                            criterion,
                            optimizer,
                            scheduler,
                            config['save_dir'],
                            num_epochs=config['training']['n_epochs'],
                            use_gpu=config['use_gpu'],
                            best_fscore=best_fscore,
                            start_epoch=start_epoch,
                            logfile=logfile)
    elif config['mode'] == 'test':
        # Reload model from checkpoint if provided
        model, _, _, _ = reload(config, model, logfile=logfile)
        log('Testing on {}...'.format(config['data']['test']['jsonfile']))
        test_model(model,
                   dataloaders['test'],
                   config['outputfile'],
                   use_gpu=config['use_gpu'],
                   logfile=logfile)
    else:
        log("Invalid config mode %s !!" % config['mode'], logfile)
Exemplo n.º 8
0
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 collate_fn=my_collate_fn)

    model = NamedEntityRecog(word_vocab.size(),
                             args.word_embed_dim,
                             args.word_hidden_dim,
                             alphabet.size(),
                             args.char_embedding_dim,
                             args.char_hidden_dim,
                             args.feature_extractor,
                             label_vocab.size(),
                             args.dropout,
                             pretrain_embed=pretrain_word_embedding,
                             use_char=args.use_char,
                             use_crf=args.use_crf,
                             use_gpu=use_gpu)
    if use_gpu:
        model = model.cuda()
    if use_gpu:
        model = model.cuda()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
    model.load_state_dict(torch.load(model_name))
    test_model(test_dataloader, model, word_vocab, label_vocab, test_pred_file,
               use_gpu)

    print()
    print()
    print("Compressed sentence is \n")
    print(prediction(test_pred_file))
Exemplo n.º 9
0
                
                model = model.to(device)
                
                if adam or finetune:
                    print("Using adam")
                    opt = optim.Adam(model.parameters())
                else:
                    opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9)
                scheduler = lr_scheduler.StepLR(opt, step_size=BATCH_SIZE//2, gamma=0.1)

                train_model(model, loss, metric, opt, scheduler, dataloaders[o], device, dataset_sizes[o], 
                            inepoch_plot=False, loss_name="Batch DICE Loss", metric_name="Batch DICE", model_name=basename + "-" + o, num_epochs=NEPOCHS, 
                            save=True, savefolder=model_folder)

                print("Testing train result...")
                test_model(model, loss, metric, dataloaders[o], dataset_sizes[o], device,
                        loss_name="Batch DICE Loss", metric_name="Batch DICE")
        # Only test and show results
        else: 
            print("Loading saved result and testing full volume with {}...".format(basename))
            plt.title("Loss in all orientations")
            models = get_models(bias, e2d, res, small, bn, dunet, model_folder=model_folder)
            for i, o in enumerate(orientations):
                path = os.path.join(model_folder, basename + "-" + o + ".pkl")
                loaded_results = TrainResults.load(path)
                
                loaded_results.plot(show=False, loss_only=True, o=o)

                print(o)
                if notest: continue
               
                models[o] = models[o].to(device)
Exemplo n.º 10
0
    # Reshape datat to 4-dims
    # Data Order [FLAIR, IAM, T1W]
    test_data = [
        np.expand_dims(test_data[:, :, :, i], axis=3)
        for i in range(data_chn_num)
    ]
    ''' Train Networks'''
    train_config = TrainConfig(args)

    # U-Net (only FLAIR)
    test_model(
        train_config,
        test_data[0],
        test_trgt,
        net_depth=3,
        SALIENCY=False,
        DILATION=False,
        restore_dir=restore_weights_path +
        'UNet_depth3_FLAIR_20190116-0142_UNet_depth3_new_basic_ep80_0/train_models.h5',
        net_type='UNet_depth3_FLAIR',
        label_list=label_list,
        affine_list=affine_list)

    # U-Net (only IAM)
    K.clear_session()
    sess = tf.Session(config=config)
    K.set_session(sess)
    test_model(
        train_config,
        test_data[1],
        test_trgt,
        net_depth=3,
Exemplo n.º 11
0
    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, len(vars.class_names))

model.to(vars.device)
print(
    summary(model,
            input_size=(3, vars.input_size, vars.input_size),
            batch_size=-1,
            device=vars.device.type))

if (vars.mode == 'test'):  #test
    #model.load_state_dict(torch.load("D:\\Projects\\Python\\Zeitoon Detection\"))
    model.load_state_dict(torch.load(vars.test_model))
    model = model.to(vars.device)

    test_model(model, vars.criterion, 'test')
else:
    optimizer = optim.SGD(model.parameters(),
                          lr=0.001 if vars.pre_trained else 0.06,
                          momentum=0.9)
    #optimizer = optim.Adam(model.parameters(), lr=0.05)
    # Decay LR by a factor of 0.6 every 6 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(
        optimizer, step_size=10 if vars.pre_trained else 6, gamma=0.6)
    model = model.to(vars.device)
    model = train_model(model, vars.criterion, optimizer, exp_lr_scheduler,
                        vars.num_epochs)
    visualize_model(model)

# ######################################################################
# # ConvNet as fixed feature extractor
Exemplo n.º 12
0
    # try:
    #     s = sys.argv[1]
    # except IndexError:
    #     s = ""
    # create_interactions(s)


    epochs = 1

    for i in range(0, 10):
        # Generate new data
        create_interactions(str(i))
        # Load
        model = load_model(name="model")
        model.compile(loss='mse',
           optimizer=RMSprop())
        # Train
        train_filename = "/ssd/train_extra.csv{}".format(i)
        model, losses = train_model(model, epochs, train_filename, nb_epoch=2)
        # Test
        print("MSE", losses[-1])
        test_filename = "/ssd/test_extra.csv{}".format(i)
        m = test_model(model, test_filename)
        # Save model
        save_model(model, name="model")
        # if m > 0.93:
        #     break



Exemplo n.º 13
0
    # Similar to our train script, but we do this k times
    for k, datasets in enumerate(iterate_folds(fold_sets)):
        train, val, test = datasets
        model = BeatNet(downbeats=args.downbeats)
        if cuda_device is not None:
            model.cuda(args.cuda_device)

        output_file = make_fold_output_name(args.output_file, k)

        train_loader, val_loader, test_loader = make_data_loaders(
            (train, val, test), batch_size=args.batch_size)

        train_loop(model,
                   train_loader,
                   val_loader=val_loader,
                   num_epochs=args.num_epochs,
                   cuda_device=cuda_device,
                   output_file=output_file,
                   davies_stopping_condition=args.davies_stopping_condition,
                   fold=k)

        if args.output_file is not None:
            save_model(model, output_file)

        if args.dataset_output_file is not None:
            save_dir = make_fold_output_name(args.dataset_output_file, k)
            save_datasets((train, val, test), save_dir)

        test_model(model, test_loader, cuda_device=cuda_device)
Exemplo n.º 14
0
        transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
        # transforms.Normalize((0.485, 0.456, 0.406),(0.485, 0.456, 0.406)),
    ])
    """
    Shape of images is (batch,CLASSES , 256, 256)
    Shape of masks is (batch,  256, 256)
    """
    # LOSS FUNCTION
    loss_fn = nn.CrossEntropyLoss()

    # LOADING THE DATASET INTO TRAINLOADER
    trainset = SegDataset(image_path, mask_path, transform=preprocess)
    train_loader = DataLoader(trainset,
                              BATCH_SIZE,
                              num_workers=NUM_WORKERS,
                              pin_memory=PIN_MEM,
                              shuffle=True)

    # Load the model & Optimizer
    model = SegNet()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)

    if args.train:
        print("Train Mode.")
        # Train Model
        train_model(model, optimizer, train_loader, loss_fn, device, EPOCHS)
    else:
        print("Test Mode")
        # Test model on batch
        test_model(model, train_loader, device)
Exemplo n.º 15
0
            print('new best f1 on dev set:', best_f1)
            early_stop = 0
            torch.save(model.state_dict(), model_name)
        else:
            early_stop += 1

        epoch_end = time.time()
        cost_time = epoch_end - epoch_begin
        print('train {}th epoch cost {}m {}s'.format(epoch + 1, int(cost_time / 60), int(cost_time % 60)))
        print('Training Loss after epoch no. %s is %s'%(epoch, trainloss))
        print('Validation Loss after epoch no. %s is %s'%(epoch, vloss))
        print()

        if early_stop > args.patience:
            print('early stop')
            break

    train_end = time.time()
    train_cost = train_end - train_begin
    hour = int(train_cost / 3600)
    min = int((train_cost % 3600) / 60)
    second = int(train_cost % 3600 % 60)
    print()
    print()
    print('train end', '-' * 50)
    print('train total cost {}h {}m {}s'.format(hour, min, second))
    print('-' * 50)

    model.load_state_dict(torch.load(model_name))
    test_model(test_dataloader, model, word_vocab, label_vocab, test_pred_file, score_file, eval_script, use_gpu)
Exemplo n.º 16
0
def main():
    ops = train_model(TRAIN_ITERS, 10000, L_RATE, BATCH_SIZE, True, 
                      LATENT_DIM, TAU, INF_LAYERS, GEN_LAYERS, CKPT_PATH)
    test_model(ops, 10000, L_RATE, BATCH_SIZE, LATENT_DIM, 
               TAU, INF_LAYERS, GEN_LAYERS, CKPT_PATH)
Exemplo n.º 17
0
def test(args, config):
    logger = logging.getLogger("BugLoc")
    logger.info('testing ...')
    test_model(args, config)