('relu2', nn.ReLU(inplace=True)), ('dropout2', nn.Dropout()), ('fc3', nn.Linear(1000, 102)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier # Set parameters for training model criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learn_rate) # Train a model with a pre-trained network and compute the accuracy model.train() f.train(epochs, trainloader, valloader, model, criterion, optimizer) # Validate the model model.eval() print("\nValidating the trained model on a different dataset. The accuracy result is below:") f.validation(testloader, model, criterion) # Save the model checkpoint = {'filepath': data_dir, 'model': models.vgg16(pretrained=True), 'classifier': classifier, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'class_to_idx': train_data.class_to_idx, 'criterion': criterion, 'epochs': epochs} torch.save(checkpoint, 'checkpoint.pth')
epoch_test_losses = [] epoch_test_scores = [] lamdaValue = np.ones((opt.epochs)) savePath, date_method, save_model_path = save_path(opt) writer = SummaryWriter(os.path.join(savePath, date_method, 'logfile')) # start training for epoch in range(1, opt.epochs + 1): # train, test model train_losses, train_Target_scores, = train( [targetCNN, sourceImagenet, sourcePlaces], device, train_loader, optimizer, epoch, lamdaValue[epoch - 1], opt) test_total_loss, test_total_score = validation( [targetCNN, sourceImagenet, sourcePlaces], device, optimizer, valid_loader, lamdaValue[epoch - 1], opt) scheduler.step() # save results epoch_train_losses.append(np.mean(train_losses)) epoch_train_scores.append(np.mean(train_Target_scores)) epoch_test_losses.append(np.mean(test_total_loss)) epoch_test_scores.append(np.mean(test_total_score)) # plot average of each epoch loss value writer.add_scalar('Loss/train', np.mean(train_losses), epoch) writer.add_scalar('Loss/test', np.mean(test_total_loss), epoch) writer.add_scalar('scores/train', np.mean(train_Target_scores), epoch) writer.add_scalar('scores/test', test_total_score, epoch)
def test_valid(self): # Массив точек points = [[[1, 2], [5, 6]], [[10, 8], [-3, -4]], [[4, 5], [-2, -4]]] valid = validation((150, 50), (300, 150), points, 56, 50) self.assertEqual(valid, 'Данные валидны')
# record training process savePath, date_method, save_model_path = save_path(opt) train_logger = Logger(os.path.join(savePath, date_method, 'train.log'), ['epoch', 'loss', 'acc', 'lr']) val_logger = Logger(os.path.join(savePath, date_method, 'val.log'), ['epoch', 'loss', 'acc', 'best_acc', 'lr']) writer = SummaryWriter(os.path.join(savePath, date_method,'logfile')) # start training best_acc = 0 for epoch in range(1, opt.epochs + 1): # train, test model train_losses, train_scores, = train([StudentModel, smartModel, perturbation_model], device, train_loader, optimizer, epoch, opt) test_losses, test_scores = validation([StudentModel, smartModel, perturbation_model], device, optimizer, valid_loader, opt) scheduler.step() # plot average of each epoch loss value train_logger.log({ 'epoch': epoch, 'loss': train_losses.avg, 'acc': train_scores.avg, 'lr': optimizer.param_groups[0]['lr'] }) if best_acc < test_scores.avg: best_acc = test_scores.avg torch.save({'state_dict': studentModel.state_dict()}, os.path.join(save_model_path, 'student_best.pth')) val_logger.log({ 'epoch': epoch, 'loss': test_losses.avg,
def test_not_valid(self): points = [[[1, 2], [5, 6]], [[10, 8], [-3, -4]], [[50, 80], [90, 100]]] valid = validation((1024, 768), (600, 400), points, 58, 80) self.assertEqual(valid, 'Валидация не пройдена')