def train_itr(self, config, reporter):
     self.build_net(config)
     
     print('\n[Phase 3] : Training model')
     print('| Training Epochs = ' + str(self.num_epochs))
     print('| Initial Learning Rate = ' + str(self.args.lr))
     print('| Optimizer = ' + str(self.optim_type))
     
     elapsed_time = 0
     for epoch in range(self.start_epoch, self.start_epoch+self.num_epochs):
         start_time = time.time()
     
         self.train(epoch)
         self.test(epoch, reporter)
     
         epoch_time = time.time() - start_time
         elapsed_time += epoch_time
         print('| Elapsed time : %d:%02d:%02d'  %(cf.get_hms(elapsed_time)))
     self.test_pruned_from_path()
     
     print('* Test results : Acc@1 = %.2f%%' %(self.best_acc))
Beispiel #2
0
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        save_point = './checkpoint/' + args.dataset + os.sep
        if not os.path.isdir(save_point):
            os.mkdir(save_point)
        torch.save(state, save_point + file_name + '.t7')
        best_acc = acc


print('\n[Phase 3] : Training model')
print('| Training Epochs = ' + str(num_epochs))
print('| Initial Learning Rate = ' + str(args.lr))
print('| Optimizer = ' + str(optim_type))

elapsed_time = 0
for epoch in range(start_epoch, start_epoch + num_epochs):
    start_time = time.time()

    train(epoch)
    test(epoch)

    epoch_time = time.time() - start_time
    elapsed_time += epoch_time
    print('| Elapsed time : %d:%02d:%02d' % (cf.get_hms(elapsed_time)))

print('\n[Phase 4] : Testing model')
print('* Test results : Acc@1 = %.2f%%' % (best_acc))
Beispiel #3
0
    else:
        print('Error : Network should be either [ResNet34]')
        sys.exit(0)

    net.init_weights()
    net.to(device)

    # Training
    print('\n[Phase 3] : Training model')
    print('| Training Epochs = ' + str(args.num_epochs))
    print('| Initial Learning Rate = ' + str(args.lr))

    optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, 1), momentum=0.9, weight_decay=args.wd)

    elapsed_time = 0
    for epoch in range(1, args.num_epochs + 1):
        start_time = time.time()
        set_learning_rate(optimizer, cf.learning_rate(args.lr, epoch))
        train(net, trainloader, optimizer, epoch)
        test(net, testloader, epoch)

        epoch_time = time.time() - start_time
        elapsed_time += epoch_time
        print('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time)))
        log_file.write('| Elapsed time : %d:%02d:%02d\n' %(cf.get_hms(elapsed_time)))
        log_file.flush()

    print('\n[Phase 4] : Testing model')
    print('* Test results : Acc@1 = %.2f%%' %(best_acc))
    log_file.write('* Test results : Acc@1 = %.2f%%\n' %(best_acc))
Beispiel #4
0
print('\n[Phase 3] : Training model')
print('| Training Epochs = ' + str(cf.num_epochs))
print('| Initial Learning Rate = ' + str(cf.lr))
print('| Optimizer = ' + str(cf.optim_type))
print('| Batch-size = ' + str(cf.batch_size))
elapsed_time = 0

for epoch in range(cf.start_epoch, cf.start_epoch+cf.num_epochs):
    start_time = time.time()
    train(epoch)
    test(epoch)
    printCurves(epoch)
    epoch_time = time.time() - start_time
    elapsed_time += epoch_time
    print('|Epoch Elapsed time : %d:%02d:%02d'  %(cf.get_hms(elapsed_time)))

with open(os.path.join(prediction_path, 'trainingLoss.txt'), "wb") as fp:
    pickle.dump(trainLoss, fp)
with open(os.path.join(prediction_path, 'trainingAccuracy.txt'), "wb") as fp:
    pickle.dump(trainAcc, fp)
with open(os.path.join(prediction_path, 'validationAccuracy.txt'), "wb") as fp:
    pickle.dump(testAcc, fp)

print('\n[Phase 4] : Evaluating model')
start_time = time.time()
final_evaluation(cf.num_epochs)
epoch_time = time.time() - start_time
print('|Final Evaluation Elapsed time : %d:%02d:%02d'  %(cf.get_hms(elapsed_time)))

            test_loss += loss.data[0]*targets.size(0)
            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += predicted.eq(targets.data).cpu().sum()
            nll_loss += nll(m(outputs), targets).data[0]*targets.size(0)
        # Save checkpoint when best model
        acc = 100.*correct/total
        best_test_acc = acc
        best_nll_loss = nll_loss/total
        print("\n| Best Test Acc: Epoch #%d\t\t\tLoss: %.4f NLLLoss: %.4f Acc@1: %.2f%%" %(epoch, test_loss/total, best_nll_loss, acc))
        filep.write("\n| Best Test Acc: Epoch #%d\t\t\tLoss: %.4f NLLLoss: %.4f Acc@1: %.2f%%" %(epoch, test_loss/total, best_nll_loss, acc)+'\n')

print('\n[Phase 3] : Training model')
print('| Training Epochs = ' + str(num_epochs))
print('| Initial Learning Rate = ' + str(args.lr))
print('| Optimizer = ' + str(args.optimizer))

elapsed_time = 0
for epoch in range(start_epoch, start_epoch+num_epochs):
    start_time = time.time()
    train(epoch)
    test(epoch)
    print("\n| Best Test Acc:  Acc@1: %.2f%%" %(best_test_acc))
    filep.write("\n| Best Test Acc:  Acc@1: %.2f%%" %(best_test_acc)+'\n')
    epoch_time = time.time() - start_time
    elapsed_time += epoch_time
    print('| Elapsed time : %d:%02d:%02d'  %(cf.get_hms(elapsed_time)))
    
filep.close()
    
Beispiel #6
0
net = checkpoint['net']

def visActivations():
    cnn_layer = cf.viz_filter
    filter_pos = cf.viz_filter

    for i, path in enumerate(paths):
        for j in range(num_classes):
            for k in cnn_layer:
                grad_cam = GradCam(net, target_layer=k)
                original_image = Image.open(path).convert('RGB')
                original_image= original_image.resize((resize, resize), Image.ANTIALIAS)
                prep_img = preprocess_image(original_image)
                cam = grad_cam.generate_cam(prep_img, j)
                save_class_activation_images(plots_path, original_image, cam, 'testImage_layer_'+str(k)+'_class_'+str(j)+'_actual_'+str(labels[i])+'_'+str(i))

    # Fully connected layer is not needed

    pretrained_model = net.features
    for i in cnn_layer:
        layer_vis = CNNLayerVisualization(pretrained_model, i, filter_pos)
        layer_vis.visualise_layer_without_hooks(plots_path)

elapsed_time = 0
start_time = time.time()
visActivations()
epoch_time = time.time() - start_time
print('|Visualizations generation time : %d:%02d:%02d'  %(cf.get_hms(elapsed_time)))


Beispiel #7
0
    def train(self):
        elapsed_time = 0

        for self.curr_epoch in range(1, self.num_epochs + 1):
            self.model.train()
            self.model.training = True
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=cf.learning_rate(
                                           self.learning_rate,
                                           self.curr_epoch),
                                       momentum=0.9,
                                       weight_decay=5e-4)
            train_loss = 0
            train_correct = 0
            total = 0
            time_start = time.time()

            print('\n=> Training Epoch #%d, LR=%.4f' %
                  (self.curr_epoch,
                   cf.learning_rate(self.learning_rate, self.curr_epoch)))
            for self.curr_batch, (x, y) in enumerate(self.train_loader):
                x, y = x.to(self.device), y.to(self.device)
                # perturb data during noisy training
                if self.training_type == 'noisy':
                    x = self.adversary.perturb(x, self.device, self.variance)
                x, y = Variable(x), Variable(y)
                self.optimizer.zero_grad()
                outputs = self.model(x)
                total += y.size(0)
                loss = self.criterion(outputs, y)
                train_loss += loss
                _, pred = torch.max(outputs.data, 1)
                train_correct += pred.eq(y.data).cpu().sum()
                loss.backward()
                self.optimizer.step()

                # add training on adversarial perturbation during adv training
                if self.training_type == 'adversarial':
                    delta = self.adversary.get_adversarial_examples(
                        self.model, x, y).to(self.device)
                    x, y = x.to(self.device), y.to(self.device)
                    x, y = Variable(x), Variable(y)
                    outcome = self.model(x + delta)

                    _, pred = torch.max(outcome.data, 1)
                    train_correct += pred.eq(y.data).cpu().sum()
                    total += y.size(0)
                    loss = self.criterion(outcome, y)
                    train_loss += loss
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()

                sys.stdout.write('\r')
                sys.stdout.write(
                    '| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                    % (self.curr_epoch, self.num_epochs, self.curr_batch,
                       (len(self.train_dataset) // self.train_batch_size) + 1,
                       train_loss.item(), 100. * train_correct / total))
                sys.stdout.flush()

            train_acc = 100. * train_correct / total

            with torch.no_grad():
                # testing
                self.model.eval()
                self.training = False
                test_loss = 0.
                test_correct = 0
                total = 0
                for self.curr_batch, (x, y) in enumerate(self.test_loader):
                    x_var, y_var = Variable(x), Variable(y)
                    x_var, y_var = x_var.to(self.device), y_var.to(self.device)
                    outcome = self.model(x_var)
                    loss = self.criterion(outcome, y_var)
                    test_loss += loss
                    _, pred = torch.max(outcome.data, 1)
                    test_correct += pred.eq(y_var.data).cpu().sum()
                    total += y_var.size(0)

                test_acc = 100. * test_correct / total
                print(
                    "\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%" %
                    (self.curr_epoch, test_loss.item(), test_acc))

            time_epoch = time.time() - time_start
            elapsed_time += time_epoch
            print('| Elapsed time : %d:%02d:%02d' % (cf.get_hms(elapsed_time)))
            self.write_tb(train_loss.item(), train_acc, test_loss.item(),
                          test_acc)
Beispiel #8
0
            }
            if not os.path.isdir(get_checkpoint_path(args)):
                os.mkdir(get_checkpoint_path(args))
            save_point = "./checkpoint/" + args.dataset + os.sep + args.id + os.sep

            if not os.path.isdir(save_point):
                os.mkdir(save_point)
            torch.save(state, get_checkpoint_file(args, file_name))
            best_acc = acc


logbook.write_message("Phase 3 : Training model")
logbook.write_message("| Training Epochs = " + str(num_epochs))
logbook.write_message("| Initial Learning Rate = " + str(args.lr))
logbook.write_message("| Optimizer = " + str(optim_type))

elapsed_time = 0
for epoch in range(start_epoch, start_epoch + num_epochs):
    start_time = time.time()

    train(epoch)
    test(epoch)

    epoch_time = time.time() - start_time
    elapsed_time += epoch_time
    logbook.write_message("| Elapsed time : %d:%02d:%02d" % (cf.get_hms(elapsed_time)))

logbook.write_message("\n[Phase 4] : Testing model")
logbook.write_message("* Test results : Acc@1 = %.2f%%" % (best_acc))

Beispiel #9
0
def visActivations():
    cnn_layer = cf.viz_filter
    filter_pos = cf.viz_filter

    for i, path in enumerate(paths):
        for j in range(num_classes):
            for k in cnn_layer:
                grad_cam = GradCam(net, target_layer=k)
                original_image = Image.open(path).convert('RGB')
                original_image = original_image.resize((resize, resize),
                                                       Image.ANTIALIAS)
                prep_img = preprocess_image(original_image)
                cam = grad_cam.generate_cam(plots_path, prep_img, j)
                save_class_activation_images(
                    plots_path, original_image, cam,
                    'testImage_layer_' + str(k) + '_class_' + str(j) +
                    '_actual_' + str(labels[i]) + '_' + str(i))

    pretrained_model = net.layers
    for i in cnn_layer:
        layer_vis = BCNNLayerVisualization(pretrained_model, i, filter_pos)
        layer_vis.visualise_layer_without_hooks(plots_path)


elapsed_time = 0
start_time = time.time()
visActivations()
epoch_time = time.time() - start_time
print('|Visualizations generation time : %d:%02d:%02d' %
      (cf.get_hms(elapsed_time)))