Exemplo n.º 1
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
        )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Exemplo n.º 2
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if not args.distributed:
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()
    else:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=False,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=False)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
Exemplo n.º 3
0
#val_idx = np.logical_and(coin_toss >= 0.6, coin_toss < 0.8)
#test_idx = (coin_toss >= 0.8)

np.random.seed(224610)
train_idx = (coin_toss < 0.8)
val_idx = (coin_toss >= 0.8)

#batch_size=512 # for Alexnet
batch_size = 32  # For vgg11
#batch_size=2 # For resnet18

train_style_loader = StyleLoader(
    img_dir,
    train[train_idx],
    transforms.Compose([
        transforms.RandomSizedCrop(224),
        transforms.Lambda(
            lambda image:  # Random transforms
            ImageEnhance.Contrast(image).enhance(np.random.random())),
        transforms.Lambda(lambda image: ImageEnhance.Sharpness(image).enhance(
            np.random.random())),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ]))

train_loader = torch.utils.data.DataLoader(train_style_loader,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=5,
                                           pin_memory=False)
Exemplo n.º 4
0
import os
import time

import torch
from torchvision import models, transforms
from torch import optim, nn
from torch.autograd import Variable
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader

# define image transforms to do data augumentation
data_transforms = {
    'train':
    transforms.Compose([
        transforms.RandomSizedCrop(299),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]),
    'val':
    transforms.Compose([
        transforms.Scale(320),
        transforms.CenterCrop(299),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
}

# define data folder using ImageFolder to get images and classes from folder
root = '/media/sherlock/Files/kaggle_dog_vs_cat/'
data_folder = {
Exemplo n.º 5
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        #        if args.arch == 'resnet50':
        #            import resnet_model
        #            model = resnet_model.resnet50_new(pretrained=True)
        #            print('save resnet50 to resnet50.weights')
        #            model.saveas_darknet_weights('resnet50.weights')
        if args.arch == 'resnet50-pytorch':
            model = models.resnet50(pretrained=True)
        elif args.arch == 'resnet50-darknet':
            from darknet import Darknet
            model = Darknet('cfg/resnet50.cfg')
            print('load weights from resnet50.weights')
            model.load_weights('resnet50.weights')
        elif args.arch == 'resnet50-caffe2darknet':
            from darknet import Darknet
            model = Darknet('resnet50-caffe2darknet.cfg')
            print('load weights from resnet50-caffe2darknet.weights')
            model.load_weights('resnet50-caffe2darknet.weights')
        elif args.arch == 'vgg16-pytorch2darknet':
            from darknet import Darknet
            model = Darknet('vgg16-pytorch2darknet.cfg')
            print('load weights from vgg16-pytorch2darknet.weights')
            model.load_weights('vgg16-pytorch2darknet.weights')
        elif args.arch == 'resnet50-pytorch2caffe':
            from caffenet import CaffeNet
            model = CaffeNet('resnet50-pytorch2caffe.prototxt')
            print('load weights resnet50-pytorch2caffe.caffemodel')
            model.load_weights('resnet50-pytorch2caffe.caffemodel')
        elif args.arch == 'resnet50-pytorch2caffe.nobn':
            from caffenet import CaffeNet
            model = CaffeNet('resnet50-pytorch2caffe.nobn.prototxt')
            print('load weights resnet50-pytorch2caffe.nobn.caffemodel')
            model.load_weights('resnet50-pytorch2caffe.nobn.caffemodel')
        elif args.arch == 'resnet50-darknet2caffe':
            from caffenet import CaffeNet
            model = CaffeNet('resnet50-darknet2caffe.prototxt')
            print('load weights resnet50-darknet2caffe.caffemodel')
            model.load_weights('resnet50-darknet2caffe.caffemodel')
        elif args.arch == 'resnet50-kaiming':
            from caffenet import CaffeNet
            model = CaffeNet('ResNet-50-deploy.prototxt')
            print('load weights from ResNet-50-model.caffemodel')
            model.load_weights('ResNet-50-model.caffemodel')
        elif args.arch == 'resnet50-kaiming-dk':
            from darknet import Darknet
            model = Darknet('ResNet-50-model.cfg')
            print('load weights from ResNet-50-model.weights')
            model.load_weights('ResNet-50-model.weights')
        elif args.arch == 'resnet18-caffe':
            from caffenet import CaffeNet
            model = CaffeNet('cfg/resnet-18.prototxt')
            print('load weights from resnet-18.caffemodel')
            model.load_weights('resnet-18.caffemodel')
        elif args.arch == 'resnet18-darknet':
            from darknet import Darknet
            model = Darknet('resnet-18.cfg')
            print('load weights from resnet-18.weights')
            model.load_weights('resnet-18.weights')
        elif args.arch == 'resnet50-test':
            from darknet import Darknet
            model = Darknet('test/ResNet-50-model.cfg')
            print('load weights from test/ResNet-50-model.weights')
            model.load_weights('test/ResNet-50-model.weights')
        else:
            model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        if args.arch.startswith('mobilenet'):
            model = Net()
            print(model)
        else:
            model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        if args.arch != 'vgg16-pytorch2darknet':
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    if args.arch == 'resnet50-test' or args.arch == 'resnet50-kaiming' or args.arch == 'resnet50-kaiming-dk':
        normalize = transforms.Normalize(mean=[0.0, 0.0, 0.0],
                                         std=[1.0, 1.0, 1.0])
    elif args.arch == 'resnet18-darknet' or args.arch == 'resnet18-caffe':
        normalize = transforms.Normalize(
            mean=[104 / 255.0, 117 / 255.0, 123 / 255.0], std=[1.0, 1.0, 1.0])
    else:
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
Exemplo n.º 6
0
def main():
    if args.gpu is not None:
        print('Using GPU %d' % args.gpu)
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    else:
        print('CPU mode')

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose([
        transforms.RandomSizedCrop(227),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])

    val_transform = transforms.Compose([
        #transforms.Scale(256),
        #transforms.CenterCrop(227),
        transforms.RandomSizedCrop(227),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    # DataLoader initialize
    train_data = DataLoader(args.pascal_path,
                            'trainval',
                            transform=train_transform)
    train_loader = torch.utils.data.DataLoader(dataset=train_data,
                                               batch_size=args.batch,
                                               shuffle=True,
                                               num_workers=CORES)

    val_data = DataLoader(args.pascal_path,
                          'test',
                          transform=val_transform,
                          random_crops=args.crops)
    val_loader = torch.utils.data.DataLoader(dataset=val_data,
                                             batch_size=args.batch,
                                             shuffle=False,
                                             num_workers=CORES)

    N = len(train_data.names)
    iter_per_epoch = N / args.batch
    # Network initialize
    #net = Network(groups = 2)
    net = Network(num_classes=21)
    if args.gpu is not None:
        net.cuda()

    if args.model is not None:
        net.load(args.model, args.fc)

    if args.freeze is not None:
        # Freeze layers up to conv4
        for i, (name, param) in enumerate(net.named_parameters()):
            if 'conv' in name or 'features' in name:
                param.requires_grad = False

    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       net.parameters()),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=0.0001)

    if not os.path.exists(args.checkpoint):
        os.makedirs(args.checkpoint + '/train')
        os.makedirs(args.checkpoint + '/test')


#    logger_test = None
    logger_train = Logger(args.checkpoint + '/train')
    logger_test = Logger(args.checkpoint + '/test')

    ############## TRAINING ###############
    print('Start training: lr %f, batch size %d' % (args.lr, args.batch))
    print('Checkpoint: ' + args.checkpoint)

    # Train the Model
    steps = args.iter_start
    for epoch in range(iter_per_epoch * args.iter_start, args.epochs):
        adjust_learning_rate(optimizer,
                             epoch,
                             init_lr=args.lr,
                             step=80,
                             decay=0.1)

        mAP = []
        for i, (images, labels) in enumerate(train_loader):
            images = Variable(images)
            labels = Variable(labels)
            if args.gpu is not None:
                images = images.cuda()
                labels = labels.cuda()

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            outputs = net(images)

            mAP.append(compute_mAP(labels.data, outputs.data))

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            loss = loss.cpu().data.numpy()

            if steps % 100 == 0:
                print '[%d/%d] %d), Loss: %.3f, mAP %.2f%%' % (
                    epoch + 1, args.epochs, steps, loss,
                    100 * np.mean(mAP[-20:]))

            if steps % 20 == 0:
                logger_train.scalar_summary('mAP', np.mean(mAP[-20:]), steps)
                logger_train.scalar_summary('loss', loss, steps)
                data = images.cpu().data.numpy().transpose([0, 2, 3, 1])
                logger_train.image_summary('input', data[:10], steps)

            steps += 1

        if epoch % 5 == 0:
            net.save(args.checkpoint, epoch + 1)
            print 'Saved: ' + args.checkpoint

        if epoch % 5 == 0:
            test(net, criterion, logger_test, val_loader, steps)

        if os.path.exists(args.checkpoint + '/stop.txt'):
            # break without using CTRL+C
            break
def main(args):

    if os.path.isfile(args.rnn_save_path):
        print('The rnn model file %s already exists' % (args.rnn_save_path))
        sys.exit(1)

    # Figure out the datatype we will use; this will determine whether we run on
    # CPU or on GPU. Run on GPU by adding the command-line flag --use_gpu
    dtype = torch.FloatTensor
    if args.use_gpu:
        dtype = torch.cuda.FloatTensor

    # Image preprocessing
    # For normalization, see https://github.com/pytorch/vision#models
    train_transform = T.Compose([
        T.Scale(256),
        T.RandomSizedCrop(224),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
    ])

    # Build data loader
    train_dset = MultiLabelImageFolder(args.train_dir, args.train_labels_file, args.label_list_file, \
        transform=train_transform, target_transform = transform_target_to_1_0_vect)

    train_loader = DataLoader(train_dset,
                              batch_size=args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True)

    val_transform = T.Compose([
        T.Scale(224),
        T.CenterCrop(224),
        T.ToTensor(),
        T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
    ])

    val_dset = MultiLabelImageFolder(args.val_dir, args.val_labels_file, args.label_list_file, \
        transform=val_transform, target_transform = transform_target_to_1_0_vect)

    val_loader = DataLoader(val_dset,
                            batch_size=args.batch_size,
                            num_workers=args.num_workers)

    # Build the models
    encoder = EncoderCNN(dtype, model_type='densenet')
    decoder = DecoderBinaryRNN(args.lstm_hidden_size, encoder.output_size, 17)
    for param in encoder.parameters():
        param.requires_grad = False
    for param in decoder.parameters():
        param.requires_grad = True

    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    # Loss and Optimizer
    loss_fn = nn.MultiLabelSoftMarginLoss().type(dtype)

    decay_rate = 1.6
    learning_rate = decay_rate * args.lr1
    best_f2 = -np.inf

    # Train the Models (just rnn)
    for epoch in range(args.num_epochs1):
        print('Epoch [%d/%d]' % (epoch, args.num_epochs1))

        learning_rate /= decay_rate
        optimizer = torch.optim.Adam(decoder.parameters(), lr=learning_rate)
        run_epoch(encoder,
                  decoder,
                  loss_fn,
                  train_loader,
                  optimizer,
                  args.save_loss_path,
                  is_cnn_training=False)

        # Save the models
        f2 = check_f2(nn.Sequential(encoder, decoder),
                      val_loader,
                      dtype,
                      recomp_thresh=True)
        print('Val f2: %f' % (f2))
        if f2 > best_f2:
            best_f2 = f2
            print('found a new best!')
            torch.save(decoder.state_dict(), args.rnn_save_path)
            torch.save(encoder.state_dict(), args.cnn_save_path)
            np.save(args.save_thresholds_path,
                    label_thresholds,
                    allow_pickle=False)

    for param in encoder.parameters():
        param.requires_grad = True

    decay_rate = 1.4
    learning_rate = args.lr2 * decay_rate
    # Train the Model (cnn and rnn)
    for epoch in range(args.num_epochs2):
        print('Epoch [%d/%d]' % (epoch, args.num_epochs2))

        learning_rate /= decay_rate
        optimizer = torch.optim.Adam(list(decoder.parameters()) +
                                     list(encoder.parameters()),
                                     lr=learning_rate)
        run_epoch(encoder,
                  decoder,
                  loss_fn,
                  train_loader,
                  optimizer,
                  args.save_loss_path,
                  is_cnn_training=True)

        # Save the models
        f2 = check_f2(nn.Sequential(encoder, decoder),
                      val_loader,
                      dtype,
                      recomp_thresh=True)
        print('Val f2: %f' % (f2))
        if f2 > best_f2:
            best_f2 = f2
            print('found a new best!')
            torch.save(decoder.state_dict(), args.rnn_save_path)
            torch.save(encoder.state_dict(), args.cnn_save_path)
            np.save(args.save_thresholds_path,
                    label_thresholds,
                    allow_pickle=False)
Exemplo n.º 8
0
import torch.nn as nn
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable

# Data Normalization - channel=(channel-mean)/std
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
# Data augmetation
train_data = datasets.ImageFolder(
    '/Users/xiechangrun/Desktop/data/train',
    transforms.Compose([
        transforms.Scale(256),
        transforms.RandomSizedCrop(227),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ]))

# batch size
batch_s = 128
n_iters = 3000
num_classes = len(train_data.classes)
num_epochs = n_iters / (len(train_data) / batch_s)
num_epochs = int(num_epochs)

# load initial picture
im = Image.open(train_data.imgs[2000][0])
plt.imshow(im)
Exemplo n.º 9
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(model_names)
    # Load pretrained model
    print("=> using pre-trained model '{}'".format(args.arch))
    model = models.__dict__[args.arch](pretrained=True)

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        torchvision.datasets.ImageNet(root=args.data,
                                      split='train',
                                      download=None,
                                      transform=transforms.Compose([
                                          transforms.RandomSizedCrop(224),
                                          transforms.RandomHorizontalFlip(),
                                          transforms.ToTensor(),
                                          normalize,
                                      ])),
        batch_size=args.batch_size // args.batch_split,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(torchvision.datasets.ImageNet(
        root=args.data,
        split='val',
        download=None,
        transform=transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    for n, m in model.named_modules():
        if n == 'module.fc':
            print("Found head")

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    lr = args.lr / args.batch_split / args.num_specialist if args.average else args.lr / args.batch_split
    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    specialist_modules = []
    found_head = False
    for n, module in model.named_modules():
        if isinstance(module, nn.modules.batchnorm.BatchNorm2d):
            specialist_modules.append(module)
        if isinstance(module, nn.modules.batchnorm.BatchNorm1d):
            specialist_modules.append(module)
        if n == 'module.fc' and args.spawn_head:
            specialist_modules.append(module)
            found_head = True

    if args.spawn_head:
        assert found_head

    assert len(specialist_modules) > 1
    print("Found {} convertible units".format(len(specialist_modules)))

    specialist_param = []
    per_specialist_param = [[] for _ in range(args.num_specialist)]
    for m in specialist_modules:
        ensemble_util.convert_specialist(m, args.num_specialist, args.std)
        for s in range(args.num_specialist):
            for p in m.specialist_modules[s].parameters():
                per_specialist_param[s].append(p)

    for s in range(args.num_specialist):
        specialist_param += per_specialist_param[s]

    spec_lr = args.spec_lr / args.batch_split

    specialist_optimizer = []
    if not args.per_spec_optim:
        specialist_optimizer = torch.optim.SGD(specialist_param,
                                               spec_lr,
                                               momentum=args.momentum,
                                               weight_decay=args.weight_decay)
    else:
        for s in range(args.num_specialist):
            specialist_optimizer.append(
                torch.optim.SGD(per_specialist_param[s],
                                spec_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay))

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    filename = 'arch_' + str(args.arch)
    filename += '_spec_' + str(args.num_specialist)
    filename += '_std_' + str(args.std)
    filename += '_lr_' + str(args.lr)
    filename += '_speclr_' + str(args.spec_lr)
    filename += '_step_' + str(args.steps)
    filename += '_specstep_' + str(args.spec_steps)
    filename += '_head_' + str(args.spawn_head)
    filename += '_avr_' + str(args.average)
    filename += '_anneal_' + str(args.anneal)
    filename += '_specoptim_' + str(args.per_spec_optim)
    filename += '_' + str(args.id)

    log_file_name = filename + '_performance.txt'
    filename += '_checkpoint.pth.tar'
    print(filename)

    log_file = open(log_file_name, 'w')

    steps = args.steps.split(",") if args.steps != "" else []
    specialist_steps = args.spec_steps.split(
        ",") if args.spec_steps != "" else []
    print(steps)
    print(specialist_steps)

    for epoch in range(args.start_epoch, args.epochs):
        print("Adjust meta LR")
        adjust_learning_rate(lr, optimizer, epoch, steps, args.anneal)

        print("Adjust specialist LR")
        if not args.per_spec_optim:
            adjust_learning_rate(spec_lr, specialist_optimizer, epoch,
                                 specialist_steps, args.anneal)
        else:
            for o in specialist_optimizer:
                adjust_learning_rate(spec_lr, o, epoch, specialist_steps,
                                     args.anneal)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, specialist_optimizer,
              specialist_modules, args.batch_split, args.per_spec_optim, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)
        print('Epoch:\t{}\tPrecision\t{}'.format(epoch, prec1), file=log_file)
        log_file.flush()

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            },
            is_best,
            filename=filename)
Exemplo n.º 10
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # create model
    if args.arch != "VGG_FACE":
        if args.pretrained:
            print("=> using pre-trained model '{}'".format(args.arch))
            model = models.__dict__[args.arch](pretrained=True)
            # Don't update non-classifier learned features in the pretrained networks
            for param in model.parameters():
                param.requires_grad = False
            # Replace the last fully-connected layer
            # Parameters of newly constructed modules have requires_grad=True by default
            # Final dense layer needs to replaced with the previous out chans, and number of classes
            # in this case -- resnet 101 - it's 2048 with two classes (cats and dogs)
            model.fc = nn.Linear(2048, 7)

        else:
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()

        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            #model.cuda()
            model = torch.nn.DataParallel(model).cuda()
    else:
        import VGG_FACE
        model = VGG_FACE.VGG_FACE
        model.load_state_dict(torch.load('VGG_FACE.pth'))
        for param in model.parameters():
            param.requires_grad = False
        list_model = list(model.children())
        del list_model[-1] #delete softmax
        list_model[-1] =  torch.nn.Sequential(VGG_FACE.Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),torch.nn.Linear(4096,64))
        list_model.append(  nn.ReLU() )
        list_model.append(  nn.Dropout(0.5) )
        list_model.append( torch.nn.Sequential(VGG_FACE.Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),torch.nn.Linear(64,7)) )
        model =  nn.Sequential(*list_model)
        model = torch.nn.DataParallel(model).cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(args.evaluate, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    testdir = os.path.join(args.data, 'test')

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    train_loader = data.DataLoader(
        datasets.ImageFolder(traindir,
                             transforms.Compose([
                                 transforms.Scale(240),
                                 transforms.RandomSizedCrop(224),
                                 transforms.RandomHorizontalFlip(),
                                 transforms.ToTensor(),
                                 normalize,
                             ])),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    val_loader = data.DataLoader(
        datasets.ImageFolder(valdir,
                             transforms.Compose([
                                 transforms.Scale(240),
                                 transforms.CenterCrop(224),
                                 transforms.ToTensor(),
                                 normalize,
                             ])),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    test_loader = data.DataLoader(
        TestImageFolder(testdir,
                        transforms.Compose([
                            transforms.Scale(240),
                            transforms.CenterCrop(224),
                            transforms.ToTensor(),
                            normalize,
                        ])),
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=False)

    if args.test:
        print("Testing the model and generating a output csv for submission")
        test(test_loader, train_loader.dataset.class_to_idx, model)
        return
    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    #optimizer = optim.Adam(model.module.fc.parameters(), args.lr, weight_decay=args.weight_decay)
    optimizer = optim.Adam( filter(lambda p: p.requires_grad, model.parameters()) , args.lr, weight_decay=args.weight_decay)
    

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best Accuracy and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best)
Exemplo n.º 11
0
def main():
    
    global best_prec1
    
    print(models.__dict__)
    
    model = models.__dict__['resnet18'](num_classes=7)
    model.cuda()
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['model'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    data_dir = '/data/cenj/places365_train'
    traindir = os.path.join(data_dir, 'train')
    valdir = os.path.join(data_dir, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    class ImageFolderWithPaths(datasets.ImageFolder):
        """Custom dataset that includes image file paths. Extends
        torchvision.datasets.ImageFolder
        """

        # override the __getitem__ method. this is the method dataloader calls
        def __getitem__(self, index):
            # this is what ImageFolder normally returns 
            original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
            # the image file path
            path = self.imgs[index][0]
            # make a new tuple that includes original and the path
            tuple_with_path = (original_tuple + (path,))
            return tuple_with_path

    train_dataset = ImageFolderWithPaths(traindir, transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    val_dataset = ImageFolderWithPaths(valdir, transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))
    k = train_dataset.classes
    print(k)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    params = list(model.parameters())

    optimizer = torch.optim.SGD(params, args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, object_idt, criterion)
        return
    
    accuracies_list = []
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        if epoch != 0 and epoch % 5 == 0:
            print("=> loading checkpoint '{}'".format('Conv_vgg_best_150obj_joint_conv'+str(args.count)+str(args.threshold)+'.pth.tar'))
            checkpoint = torch.load('Conv_vgg_best_150obj_joint_conv'+str(args.count)+str(args.threshold)+'.pth.tar')
            model.load_state_dict(checkpoint['model'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
              
        # train for one epoch
        train(train_loader,model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)
        accuracies_list.append("%.2f"%prec1.tolist())
        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'model': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best)
        print("The best accuracy obtained during training is = {}".format(best_prec1))
Exemplo n.º 12
0
	my_model.cuda()
	loss_bce.cuda()

BASE_LR = lr
EPOCH_DECAY = 30
DECAY_WEIGHT = 0.1
optimizer = optim.RMSprop([{'params': my_model.fc.parameters(), 'lr': fc_ori_lr_weight * lr}],
						  lr=lr, weight_decay=1e-5)
#########################################

######## define transform #############
mean=(0.485, 0.456, 0.406)
std=(0.229, 0.224, 0.225)
img_transform_train= transforms.Compose([
	transforms.Resize(size=img_size),
	transforms.RandomSizedCrop(crop_img_size),
	transforms.RandomHorizontalFlip(),
	transforms.ColorJitter(0.4, 0.4, 0.4),
	transforms.ToTensor(),
	transforms.Normalize(mean=mean, std=std)
])

def ten_crop(img):
    imgs = Fe.ten_crop(img,crop_img_size)
    return torch.stack([Fe.normalize(Fe.to_tensor(x), mean=mean, std=std) for x in imgs],0)
img_transform_test = transforms.Compose([
        transforms.Resize(size=(img_size,img_size)),
        transforms.Lambda(ten_crop)
    ])

img_transform ={
Exemplo n.º 13
0
                writer.add_scalar('data/valacc', epoch_acc, epoch)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss,
                                                       epoch_acc))

    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
    return model


if __name__ == '__main__':

    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomSizedCrop(48),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]),
        'val':
        transforms.Compose([
            transforms.Scale(64),
            transforms.CenterCrop(48),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5],
                                 [0.5, 0.5, 0.5])  # 初始化训练集和测试集
        ]),
    }

    data_dir = './train_val_data/'  # 数据集所在的位置
Exemplo n.º 14
0
import config
from models import *
from utils import *

use_cuda = torch.cuda.is_available()
best_acc = 0  # best val accuracy
best_epoch = 0
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

train_path = "./data/train/"
val_path = "./data/val/"

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.RandomSizedCrop(224),  #对图片随机切割后,resize成224*224
    transforms.RandomHorizontalFlip(),  #对图片进行随机的翻转
    transforms.ToTensor(),  #将图片数据由[0,255]转化为[w,h,c] 格式,范围在[0,1]
    transforms.Normalize((0.4914, 0.4822, 0.4465),
                         (0.2023, 0.1994, 0.2010)),  # 设置(R,G,B)的平均值,方差
])

# trainset 表示所需的所有数据的数据库,按照transform的格式读取
# torch.utils.data.DataLoader,将dataset封装成一个迭代器,
trainset = torchvision.datasets.ImageFolder(train_path, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=64,
                                          shuffle=True,
                                          num_workers=2)

valset = torchvision.datasets.ImageFolder(val_path, transform=transform)
def main(args):
    # Figure out the datatype we will use; this will determine whether we run on
    # CPU or on GPU. Run on GPU by adding the command-line flag --use_gpu
    dtype = torch.FloatTensor
    if args.use_gpu:
        dtype = torch.cuda.FloatTensor

    # Use the torchvision.transforms package to set up a transformation to use
    # for our images at training time. The train-time transform will incorporate
    # data augmentation and preprocessing. At training time we will perform the
    # following preprocessing on our images:
    # (1) Resize the image so its smaller side is 256 pixels long
    # (2) Take a random 224 x 224 crop to the scaled image
    # (3) Horizontally flip the image with probability 1/2
    # (4) Convert the image from a PIL Image to a Torch Tensor
    # (5) Normalize the image using the mean and variance of each color channel
    #     computed on the ImageNet dataset.
    train_transform = T.Compose([
        T.Scale(256),
        T.RandomSizedCrop(224),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
    ])

    # You load data in PyTorch by first constructing a Dataset object which
    # knows how to load individual data points (images and labels) and apply a
    # transform. The Dataset object is then wrapped in a DataLoader, which iterates
    # over the Dataset to construct minibatches. The num_workers flag to the
    # DataLoader constructor is the number of background threads to use for loading
    # data; this allows dataloading to happen off the main thread. You can see the
    # definition for the base Dataset class here:
    # https://github.com/pytorch/pytorch/blob/master/torch/utils/data/dataset.py
    #
    # and you can see the definition for the DataLoader class here:
    # https://github.com/pytorch/pytorch/blob/master/torch/utils/data/dataloader.py#L262
    #
    # The torchvision package provides an ImageFolder Dataset class which knows
    # how to read images off disk, where the image from each category are stored
    # in a subdirectory.
    #
    # You can read more about the ImageFolder class here:
    # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
    train_dset = ImageFolder(args.train_dir, transform=train_transform)
    train_loader = DataLoader(train_dset,
                              batch_size=args.batch_size,
                              num_workers=args.num_workers,
                              shuffle=True)

    # Set up a transform to use for validation data at test-time. For validation
    # images we will simply resize so the smaller edge has 224 pixels, then take
    # a 224 x 224 center crop. We will then construct an ImageFolder Dataset object
    # for the validation data, and a DataLoader for the validation set.
    val_transform = T.Compose([
        T.Scale(224),
        T.CenterCrop(224),
        T.ToTensor(),
        T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
    ])
    val_dset = ImageFolder(args.val_dir, transform=val_transform)
    val_loader = DataLoader(val_dset,
                            batch_size=args.batch_size,
                            num_workers=args.num_workers)

    # Now that we have set up the data, it's time to set up the model.
    # For this example we will finetune a ResNet-18 model which has been
    # pretrained on ImageNet. We will first reinitialize the last layer of the
    # model, and train only the last layer for a few epochs. We will then finetune
    # the entire model on our dataset for a few more epochs.

    # First load the pretrained ResNet-18 model; this will download the model
    # weights from the web the first time you run it.
    model = torchvision.models.resnet18(pretrained=True)

    # Reinitialize the last layer of the model. Each pretrained model has a
    # slightly different structure, but from the ResNet class definition
    # we see that the final fully-connected layer is stored in model.fc:
    # https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L111
    num_classes = len(train_dset.classes)
    model.fc = nn.Linear(model.fc.in_features, num_classes)
    model.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)

    # Cast the model to the correct datatype, and create a loss function for
    # training the model.
    model.type(dtype)
    loss_fn = nn.CrossEntropyLoss().type(dtype)

    # First we want to train only the reinitialized last layer for a few epochs.
    # During this phase we do not need to compute gradients with respect to the
    # other weights of the model, so we set the requires_grad flag to False for
    # all model parameters, then set requires_grad=True for the parameters in the
    # last layer only.
    for param in model.parameters():
        param.requires_grad = False
    for param in model.fc.parameters():
        param.requires_grad = True
    for param in model.conv1.parameters():
        param.requires_grad = True

    # Construct an Optimizer object for updating the last layer only.
    optimizer = torch.optim.Adam(model.fc.parameters(), lr=1e-3)
    optimizer = torch.optim.Adam(model.conv1.parameters(), lr=1e-3)

    # Update only the last layer for a few epochs.
    for epoch in range(args.num_epochs1):
        # Run an epoch over the training data.
        print('Starting epoch %d / %d' % (epoch + 1, args.num_epochs1))
        run_epoch(model, loss_fn, train_loader, optimizer, dtype)

        # Check accuracy on the train and val sets.
        train_acc = check_accuracy(model, train_loader, dtype)
        val_acc = check_accuracy(model, val_loader, dtype)
        print('Train accuracy: ', train_acc)
        print('Val accuracy: ', val_acc)
        print()

    # Now we want to finetune the entire model for a few epochs. To do thise we
    # will need to compute gradients with respect to all model parameters, so
    # we flag all parameters as requiring gradients.
    for param in model.parameters():
        param.requires_grad = True

    # Construct a new Optimizer that will update all model parameters. Note the
    # small learning rate.
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)

    # Train the entire model for a few more epochs, checking accuracy on the
    # train and validation sets after each epoch.
    for epoch in range(args.num_epochs2):
        print('Starting epoch %d / %d' % (epoch + 1, args.num_epochs2))
        run_epoch(model, loss_fn, train_loader, optimizer, dtype)

        train_acc = check_accuracy(model, train_loader, dtype)
        val_acc = check_accuracy(model, val_loader, dtype)
        print('Train accuracy: ', train_acc)
        print('Val accuracy: ', val_acc)
        print()
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchSz', type=int, default=128)
    parser.add_argument('--nEpochs', type=int, default=300)
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--save')
    parser.add_argument('--lr', type=float, default=0.1)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--opt',
                        type=str,
                        default='sgd',
                        choices=('sgd', 'adam', 'rmsprop'))
    args = parser.parse_args()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    #args.save = args.save or 'work/lr/lr0.5'
    setproctitle.setproctitle(args.save)

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    #if os.path.exists(args.save):
    #    shutil.rmtree(args.save)
    #os.makedirs(args.save, exist_ok=True)

    normMean = [0.485, 0.456, 0.406]
    normStd = [0.229, 0.224, 0.225]
    normTransform = transforms.Normalize(normMean, normStd)

    trainTransform = transforms.Compose([
        transforms.RandomCrop(64, padding=8),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normTransform
    ])
    testTransform = transforms.Compose([transforms.ToTensor(), normTransform])

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    trainLoader = torch.utils.data.DataLoader(dset.ImageFolder(
        '/datadisk/A/liubaoen/data/TINYIMAGENET_1000_80/train/',
        transforms.Compose([
            transforms.Scale(72),
            transforms.RandomSizedCrop(64),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])),
                                              batch_size=args.batchSz,
                                              shuffle=True,
                                              num_workers=16,
                                              pin_memory=True)

    valLoader = torch.utils.data.DataLoader(dset.ImageFolder(
        '/datadisk/A/liubaoen/data/TINYIMAGENET_1000_80/val/',
        transforms.Compose([
            transforms.Scale(72),
            transforms.CenterCrop(64),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])),
                                            batch_size=args.batchSz,
                                            shuffle=False,
                                            num_workers=16,
                                            pin_memory=True)

    #net = resnet.ResNet18()
    net = torch.load(os.path.join(args.save, 'latest.pth'))
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in net.parameters()])))
    #if args.cuda:
    #    net = net.cuda()
    #    net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
    #    cudnn.benchmark = True

    if args.opt == 'sgd':
        optimizer = optim.SGD(net.parameters(),
                              args.lr,
                              momentum=0.9,
                              weight_decay=0.0005)
    elif args.opt == 'adam':
        optimizer = optim.Adam(net.parameters(), weight_decay=1e-4)
    elif args.opt == 'rmsprop':
        optimizer = optim.RMSprop(net.parameters(), weight_decay=1e-4)

    trainF = open(os.path.join(args.save, 'train.csv'), 'a')
    testF = open(os.path.join(args.save, 'test.csv'), 'a')

    for epoch in range(151, args.nEpochs + 1):
        adjust_opt(args.opt, optimizer, args.lr, epoch)
        train(args, epoch, net, trainLoader, optimizer, trainF)
        test(args, epoch, net, valLoader, optimizer, testF)
        torch.save(net, os.path.join(args.save, 'latest.pth'))
        os.system('./plot.py {} &'.format(args.save))

    trainF.close()
    testF.close()
Exemplo n.º 17
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print args
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    print model
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.arch.lower())
Exemplo n.º 18
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # create model
    
    if 'cifar' in args.arch:
        print "CIFAR Model Fix args.lastout As 8"
        args.lastout += 1
        
    
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = resnext_models[args.arch](pretrained=True, numlayers = args.numlayers,\
                                          expansion = args.xp, x = args.x, d = args.d, \
                                         upgroup = True if args.ug else False, downgroup = True if args.dg else False,\
                                         secord = True if args.secord else False, soadd = args.soadd, \
                                         att = True if args.att else False, lastout = args.lastout, dilpat = args.dp, \
                                         deform = args.df, fixx = args.fixx, sqex = args.sqex , ratt = args.ratt, \
                                         nocompete = args.labelnocompete)
        
    else:
        print("=> creating model '{}'".format(args.arch))
        model = resnext_models[args.arch](numlayers = args.numlayers, \
                                          expansion = args.xp, x = args.x , d = args.d, \
                                         upgroup = True if args.ug else False, downgroup = True if args.dg else False,\
                                         secord = True if args.secord else False, soadd = args.soadd, \
                                         att = True if args.att else False, lastout = args.lastout, dilpat = args.dp,
                                         deform = args.df, fixx = args.fixx , sqex = args.sqex , ratt = args.ratt ,\
                                         nocompete = args.labelnocompete)
        #print("args.df: {}".format(args.df))
    
    
    # get the number of model parameters
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    
    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            
            #print type(checkpoint)
            
            model.load_state_dict(checkpoint['state_dict'])
            
            if args.finetune:
                args.start_epoch = 0
                print "start_epoch is ",args.start_epoch
                topfeature = int(args.x * args.d * 8 * args.xp)
                model.fc = nn.Linear(topfeature, args.nclass)
                
                
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
            
            # For Fine-tuning
            
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    if args.ds == "dir":
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

        train_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder(traindir, transforms.Compose([
                transforms.RandomSizedCrop(args.lastout*32),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])),
            batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True)
        
        if args.evaluate == 2:
            
            val_loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(valdir, transforms.Compose([
                    transforms.Scale((args.lastout+args.evalmodnum)*32),
                    transforms.CenterCrop(args.lastout*32),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    normalize,
                ])),
                batch_size=args.batch_size, shuffle=False,
            num_workers=args.workers, pin_memory=True)
        
        if args.evaluate == 3:
            
            val_loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(valdir, transforms.Compose([
                    transforms.Scale((args.lastout+args.evalmodnum)*32),
                    transforms.RandomCrop((args.lastout+args.evalmodnum)*32),
                    transforms.RandomCrop(args.lastout*32),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    normalize,
                ])),
                batch_size=args.batch_size, shuffle=False,
            num_workers=args.workers, pin_memory=True)
            
        else:
            
            val_loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(valdir, transforms.Compose([
                    transforms.Scale((args.lastout+1)*32),
                    transforms.CenterCrop(args.lastout*32),
                    transforms.ToTensor(),
                    normalize,
                ])),
                batch_size=args.batch_size, shuffle=False,
                num_workers=args.workers, pin_memory=True)
        
    elif args.ds in ["CIFAR10","CIFAR100"]:
        normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
                                     std=[x/255.0 for x in [63.0, 62.1, 66.7]])
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
            ])
        
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            normalize
            ])
        
        if args.ds == "CIFAR10":
            
            train_loader = torch.utils.data.DataLoader(
                datasets.CIFAR10('../data', train=True, download=True,
                             transform=transform_train),
                             batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
            val_loader = torch.utils.data.DataLoader(
                datasets.CIFAR10('../data', train=False, transform=transform_test),
                batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
        else:
            
            train_loader = torch.utils.data.DataLoader(
                datasets.CIFAR100('../data', train=True, download=True,
                             transform=transform_train),
                             batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
            val_loader = torch.utils.data.DataLoader(
                datasets.CIFAR100('../data', train=False, transform=transform_test),
                batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
        
    else:
        print "Unrecognized Dataset. Halt."
        return 0
        
        
    # define loss function (criterion) and pptimizer
    #criterion = nn.CrossEntropyLoss().cuda()
    if 'L1' in args.arch or args.L1 == 1:
        criterion = nn.L1Loss(size_average=True).cuda()
    else:
        criterion = nn.CrossEntropyLoss().cuda()

        
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,nesterov=False if args.nes == 0 else True)
    #optimizer = torch.optim.Adam(model.parameters(), args.lr)
    
    if args.evaluate == 2 :
        NUM_MULTICROP = 2
        for i in range(0,NUM_MULTICROP):
            test_output(val_loader, model, 'Result_{0}_{1}_{2}'.format(args.evaluate, i, args.evalmodnum))
        return
    
    elif args.evaluate == 3 :
        NUM_MULTICROP = 8
        for i in range(0,NUM_MULTICROP):
            # Reset Val_Loader!!
            val_loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(valdir, transforms.Compose([
                    transforms.Scale((args.lastout+args.evalmodnum)*32),
                    transforms.RandomCrop((args.lastout+args.evalmodnum)*32),
                    transforms.RandomCrop(args.lastout*32),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    normalize,
                ])),
                batch_size=args.batch_size, shuffle=False,
            num_workers=args.workers, pin_memory=True)
            # Test
            test_output(val_loader, model, args.evaltardir+'Result_{0}_{1}_{2}'.format(args.evaluate, i, args.evalmodnum))
        return
    
    elif args.evaluate == 1:
        test_output(val_loader, model, 'Result_00')
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        for i in range(args.tl):
            train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best)
        print 'Current best accuracy: ', best_prec1
    print 'Global best accuracy: ', best_prec1