Ejemplo n.º 1
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    conf_name = args.data
    conf_name += '_ortho' if args.ortho else '' 
    conf_name += ('_' + args.net_type) if args.net_type != 'default' else ''
    
    args.checkpoint = os.path.join(args.checkpoint, conf_name, 'dynamic_checkpoint')

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.DynamicNet(extractor_type=args.net_type).cuda()
    weight_generator = models.WeightGenerator().cuda()

    print('==> Reading from model checkpoint..')
    assert os.path.isfile(args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})"
            .format(args.model, checkpoint['epoch']))
    cudnn.benchmark = True

    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                     std=[0.5, 0.5, 0.5])

    train_dataset = loader.EpisodicLoader(
        args.data,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])
    )

    novel_dataset = loader.ImageLoader(
        args.data, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]),
        train=True, num_classes=200, 
        num_train_sample=args.num_sample, 
        novel_only=True)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        loader.ImageLoader(args.data, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]), num_classes=200, novel_only=args.test_novel_only),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)
 
    novel_loader = torch.utils.data.DataLoader(
        novel_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True) 


    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(weight_generator.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.94)

    novel_weights_validation = get_novel_weights(novel_loader, model, weight_generator)

    title = 'Episodic training of weight generator'
    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})"
                .format(args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])

    if args.evaluate:
        validate(val_loader, model, novel_weights_validation, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        lr = optimizer.param_groups[0]['lr']
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))
        # train for one epoch
        train_loss, train_acc = train(train_loader, model, weight_generator, criterion, optimizer, epoch)

        # evaluate on validation set
        test_loss, test_acc = validate(val_loader, model, novel_weights_validation, criterion)

        # append logger file
        logger.append([lr, train_loss, test_loss, train_acc, test_acc])

        # remember best prec@1 and save checkpoint
        is_best = test_acc > best_prec1
        best_prec1 = max(test_acc, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best, checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_prec1)
Ejemplo n.º 2
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net(num_classes=200, norm=False, scale=False).cuda()

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    train_dataset = loader.ImageLoader(args.data,
                                       transforms.Compose([
                                           transforms.Resize(256),
                                           transforms.RandomCrop(224),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]),
                                       train=True,
                                       num_classes=200,
                                       num_train_sample=args.num_sample)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        sampler=train_dataset.get_balanced_sampler(),
        num_workers=args.workers,
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(loader.ImageLoader(
        args.data,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]),
        num_classes=200,
        novel_only=args.test_novel_only),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    extractor_params = list(map(id, model.extractor.parameters()))
    classifier_params = filter(lambda p: id(p) not in extractor_params,
                               model.parameters())

    optimizer = torch.optim.SGD([{
        'params': model.extractor.parameters()
    }, {
        'params': classifier_params,
        'lr': args.lr * 10
    }],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=4,
                                                gamma=0.94)

    title = 'AllJoint'
    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        lr = optimizer.param_groups[1]['lr']
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))
        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch)

        # evaluate on validation set
        test_loss, test_acc = validate(val_loader, model, criterion)

        # append logger file
        logger.append([lr, train_loss, test_loss, train_acc, test_acc])

        # remember best prec@1 and save checkpoint
        is_best = test_acc > best_prec1
        best_prec1 = max(test_acc, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_prec1)
Ejemplo n.º 3
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    conf_name = args.data
    conf_name += '_ortho' if args.ortho else '' 
    conf_name += ('_pre_' + args.net_type) if args.net_type != 'default' else ''
    
    args.checkpoint = os.path.join(args.checkpoint, conf_name, 'imprint_checkpoint')
    print(args.data)
    print(args.checkpoint)

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net(extractor_type=args.net_type).cuda()


    print('==> Reading from model checkpoint..')
    assert os.path.isfile(args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    args.start_epoch = checkpoint['epoch']
    best_prec1 = checkpoint['best_prec1']
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})"
            .format(args.model, checkpoint['epoch']))
    cudnn.benchmark = True


    ## check orthogonality
    #import numpy as np
    #W = model.classifier.fc.weight.data
    #print(W.size())
    
    #d_list = []
    #for i in range(W.size(0)):
    #    for j in range(i,W.size(0)):
        
    #        if i==j:
    #            continue
             
    #        r1 = W[i]
    #        r2 = W[j]
                
            #r1 = torch.nn.functional.normalize(r1,p=2,dim=0)
            #r2 = torch.nn.functional.normalize(r2,p=2,dim=0)
    #        d = torch.dot(r1,r2)
    #        d_list.append(d.item())
            
    #d_list = np.array(d_list)
    #np.save('ortho_dotprod_hist.npy', d_list)
    # 
    #import matplotlib.pyplot as plt
    #plt.hist(d_list, bins=200, range=(-0.25,0.25), normed=True)  # arguments are passed to np.histogram
    #plt.title('Dot product histogram $\sigma$ = {:02f}'.format(np.std(d_list)))
    #plt.show() 

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                     std=[0.5, 0.5, 0.5])

    novel_trasforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize]) if not args.aug else transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])

    novel_dataset = loader.ImageLoader(
        args.data,
        novel_trasforms,
        train=True, num_classes=200, 
        num_train_sample=args.num_sample, 
        novel_only=True, aug=args.aug)

    novel_loader = torch.utils.data.DataLoader(
        novel_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        loader.ImageLoader(args.data, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]), num_classes=200, novel_only=args.test_novel_only),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    imprint(novel_loader, model)
    test_acc = validate(val_loader, model)

    save_checkpoint({
            'state_dict': model.state_dict(),
            'best_prec1': test_acc,
        }, checkpoint=args.checkpoint)
Ejemplo n.º 4
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net().cuda()

    print('==> Reading from model checkpoint..')
    assert os.path.isfile(
        args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    args.start_epoch = checkpoint['epoch']
    best_prec1 = checkpoint['best_prec1']
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})".format(
        args.model, checkpoint['epoch']))
    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    novel_trasforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalize
    ]) if not args.aug else transforms.Compose([
        transforms.Resize(256),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])

    novel_dataset = loader.ImageLoader(args.data,
                                       novel_trasforms,
                                       train=True,
                                       num_classes=200,
                                       num_train_sample=args.num_sample,
                                       novel_only=True,
                                       aug=args.aug)

    novel_loader = torch.utils.data.DataLoader(novel_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(loader.ImageLoader(
        args.data,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]),
        num_classes=200,
        novel_only=args.test_novel_only),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    imprint(novel_loader, model)
    test_acc = validate(val_loader, model)

    save_checkpoint(
        {
            'state_dict': model.state_dict(),
            'best_prec1': test_acc,
        },
        checkpoint=args.checkpoint)

    print('Test acc:')
    print(test_acc)
Ejemplo n.º 5
0
def main():

    global args, best_prec1
    args = parser.parse_args()
    print(args)
    fileSh = 'script/imprint_ft.sh'
    print('*' * 10, 'Save .sh in ', args.checkpoint, '*' * 10)
    shutil.copy(fileSh, os.path.join(args.checkpoint))

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    torch.cuda.synchronize()

    model = models.Net().to(device)

    print('==> Reading from model checkpoint..')
    assert os.path.isfile(
        args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})".format(
        args.model, checkpoint['epoch']))
    cudnn.benchmark = True
    # model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
    # print(range(torch.cuda.device_count()))
    # model = model.module

    # Data loading code - Normalize to (-1,1) = [(0,1)-0.5 / 0.5]
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    novel_dataset = loader.ImageLoader(args.data,
                                       transforms.Compose([
                                           transforms.Resize(256),
                                           transforms.CenterCrop(224),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]),
                                       train=True,
                                       num_classes=200,
                                       num_train_sample=args.num_sample,
                                       novel_only=True)

    novel_loader = torch.utils.data.DataLoader(novel_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True)

    train_dataset = loader.ImageLoader(args.data,
                                       transforms.Compose([
                                           transforms.Resize(256),
                                           transforms.RandomCrop(224),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]),
                                       train=True,
                                       num_classes=200,
                                       num_train_sample=args.num_sample)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        sampler=train_dataset.get_balanced_sampler(),
        num_workers=args.workers,
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(loader.ImageLoader(
        args.data,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]),
        num_classes=200,
        novel_only=args.test_novel_only),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # imprint weights first
    imprint(novel_loader, model)

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=4,
                                                gamma=0.94)

    title = 'Impriningt + FT'
    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        lr = optimizer.param_groups[0]['lr']
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))
        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch)

        # evaluate on validation set
        test_loss, test_acc = validate(val_loader, model, criterion)

        # append logger file
        logger.append([lr, train_loss, test_loss, train_acc, test_acc])

        # remember best prec@1 and save checkpoint
        is_best = test_acc > best_prec1
        best_prec1 = max(test_acc, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=os.path.join(args.checkpoint))

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_prec1)
Ejemplo n.º 6
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    print('dataset:', args.dataset)
    if args.dataset == 'EPIC':
        with open(args.foptions, 'rb') as handle:
            options = pickle.load(handle)
            if options['root'][0] == '.':
                # update relative path
                options['root'] = 'basemodel' + options['root'][1:]
            options['num_crops'] = 1
    else:
        options = None

    model = models.Net(options=options).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    extractor_params = list(map(id, model.extractor.parameters()))
    classifier_params = filter(lambda p: id(p) not in extractor_params,
                               model.parameters())

    optimizer = torch.optim.SGD([{
        'params': model.extractor.parameters()
    }, {
        'params': classifier_params,
        'lr': args.lr * 10
    }],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=30,
                                                gamma=0.1)
    # optionally resume from a checkpoint
    # title = 'CUB'
    title = args.dataset

    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    if args.dataset == 'CUB':
        print('Using dataset CUB')
        train_dataset = loader.ImageLoader(
            args.data,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]),
            train=True)

        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=args.workers,
                                                   pin_memory=True)

        val_loader = torch.utils.data.DataLoader(loader.ImageLoader(
            args.data,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])),
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)

    elif args.dataset == 'EPIC':
        print('Using dataset EPIC')
        train_dataset, val_dataset, train_loader, val_loader = get_datasets_and_dataloaders(
            options, device=options['device'])

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    print('# train_loader:', len(train_loader))
    print('# val_loader:', len(val_loader))

    print('Training epoch from {} to {}'.format(args.start_epoch, args.epochs))

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        lr = optimizer.param_groups[1]['lr']
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))
        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch)

        # evaluate on validation set
        test_loss, test_acc = validate(val_loader, model, criterion)

        # append logger file
        logger.append([lr, train_loss, test_loss, train_acc, test_acc])

        # remember best prec@1 and save checkpoint
        is_best = test_acc > best_prec1
        best_prec1 = max(test_acc, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_prec1)