コード例 #1
0
lr = 0.1

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
train_gen = ImageDataGenerator(
    horizontal_flip=True,
    width_shift_range=0.1,
    height_shift_range=0.1,
    shear_range=0.1,
    zoom_range=0.1,
).flow(x_train, y_train, batch_size=train_batch_size)
test_gen = ImageDataGenerator().flow(x_test, y_test, batch_size=test_batch_size)

reduce_lr_epoch = [150, 225]
testnet = net.PyramidNet(config, data_shape, num_classes, weight_decay, 'channels_last')
for epoch in range(epochs):
    print('-'*20, 'epoch', epoch, '-'*20)
    train_acc = []
    train_loss = []
    test_acc = []
    # reduce learning rate
    if epoch in reduce_lr_epoch:
        lr = lr * 0.1
        print('reduce learning rate =', lr, 'now')
    # train one epoch
    for iter in range(num_train//train_batch_size):
        # get and preprocess image
        images, labels = train_gen.next()
        images = images - mean
        # train_one_batch also can accept your own session
コード例 #2
0
def main():
    global args, best_err1
    args = parser.parse_args()
    if args.tensorboard: configure("runs_WHOI/%s" % (args.expname))

    # Data loading code
    normalize = transforms.Normalize(
        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
        std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
    # normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3]],
    # std=[x/255.0 for x in [63.0]])

    if args.augment:
        transform_train = transforms.Compose([
            transforms.Scale(36),
            transforms.RandomCrop(32),
            # transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])
    else:
        transform_train = transforms.Compose([
            transforms.Scale(36),
            transforms.RandomCrop(32),
            # transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])
    transform_test = transforms.Compose([
        transforms.Scale(36),
        transforms.CenterCrop(32),
        transforms.ToTensor(), normalize
    ])

    kwargs = {'num_workers': 1, 'pin_memory': True}

    numberofclass = 89  # changed to 89
    if args.dataset == 'cifar100':
        train_loader = torch.utils.data.DataLoader(datasets.CIFAR100(
            '../data', train=True, download=True, transform=transform_train),
                                                   batch_size=args.batchsize,
                                                   shuffle=True,
                                                   **kwargs)
        val_loader = torch.utils.data.DataLoader(datasets.CIFAR100(
            '../data', train=False, transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
    elif args.dataset == 'cifar10':
        train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
            '../data', train=True, download=True, transform=transform_train),
                                                   batch_size=args.batchsize,
                                                   shuffle=True,
                                                   **kwargs)
        val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
            '../data', train=False, transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
        numberofclass = 10
    elif args.dataset == 'WebVision':
        train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            '/media/ouc/30bd7817-d3a1-4e83-b7d9-5c0e373ae434/LiuJing/WebVision/info/train',
            transform=transform_train),
                                                   batch_size=args.batchsize,
                                                   shuffle=True,
                                                   **kwargs)
        val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            '/media/ouc/30bd7817-d3a1-4e83-b7d9-5c0e373ae434/LiuJing/WebVision/info/val',
            transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
        numberofclass = 1000
    else:
        raise Exception('unknown dataset: {}'.format(args.dataset))

    print('Training PyramidNet-{} on {} dataset:'.format(
        args.depth, args.dataset.upper()))

    # create model
    # model = RN.ResNet(args.depth, 10, bottleneck=args.bottleneck) # for ResNet
    model = PYRM.PyramidNet(args.depth,
                            args.alpha,
                            numberofclass,
                            bottleneck=args.bottleneck)  # for PyramidNet

    # get the number of model parameters

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    # for training on multiple GPUs.
    # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
    #model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_err1 = checkpoint['best_err1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    print(model)

    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        err1 = validate(val_loader, model, criterion, epoch)

        # remember best err1 and save checkpoint
        is_best = err1 <= best_err1
        best_err1 = min(err1, best_err1)
        print 'Current best accuracy (error):', best_err1
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_err1': best_err1,
            }, is_best)
    print 'Best accuracy (error): ', best_err1
コード例 #3
0
def main():
    global args, best_err1, numberofclass
    args = parser.parse_args()
    if args.tensorboard: configure("runs_WHOI/%s" % (args.expname))

    # Data loading code
    normalize = transforms.Normalize(
        mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
        std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
    # normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3]],
    # std=[x/255.0 for x in [63.0]])

    transform_test = transforms.Compose([
        transforms.Scale(36),
        transforms.CenterCrop(32),
        transforms.ToTensor(), normalize
    ])

    kwargs = {'num_workers': 1, 'pin_memory': True}

    if args.dataset == 'cifar100':
        val_loader = torch.utils.data.DataLoader(datasets.CIFAR100(
            '../data', train=False, transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
        numberofclass = 100
    elif args.dataset == 'cifar10':
        val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
            '../data', train=False, transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
        numberofclass = 10
    elif args.dataset == 'WHOI':
        val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            '/media/ouc/30bd7817-d3a1-4e83-b7d9-5c0e373ae434/LiuJing/2014',
            transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
        numberofclass = 103
    elif args.dataset == 'plankton':
        val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            '/media/ouc/30bd7817-d3a1-4e83-b7d9-5c0e373ae434/DuAngAng/oceans-2018/codes/plankton-set/test',
            transform=transform_test),
                                                 batch_size=args.batchsize,
                                                 shuffle=True,
                                                 **kwargs)
        numberofclass = 121
    else:
        raise Exception('unknown dataset: {}'.format(args.dataset))

    print('Training PyramidNet-{} on {} dataset:'.format(
        args.depth, args.dataset.upper()))

    # create model
    # model = RN.ResNet(args.depth, numberofclass, bottleneck=args.bottleneck) # for ResNet
    model = PYRM.PyramidNet(args.depth,
                            args.alpha,
                            numberofclass,
                            bottleneck=args.bottleneck)  # for PyramidNet

    # get the number of model parameters

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    # for training on multiple GPUs.
    # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
    #model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {}, the best err1)".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    print(model)

    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # evaluate on validation set
    err1, cm = validate(val_loader, model, criterion, numberofclass)

    # plot confusion matrix and save the fig
    plot_and_save_confusion_matrix(cm, numberofclass, normalize=True)

    # compute average precision, recall and F1 score
    average_precision, average_recall, average_f1_score = compute_precision_recall_f1_score(
        cm)

    print("Accuracy (error): {}".format(err1))
    print("Precision: {}".format(average_precision))
    print("Recall: {}".format(average_recall))
    print("F1-score: {}".format(average_f1_score))
コード例 #4
0
def main():
    global args, best_err1, best_err5
    args = parser.parse_args()
    if args.tensorboard: configure("runs/%s" % (args.expname))

    args.distributed = args.world_size > 1
    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    if args.dataset.startswith('cifar'):
        normalize = transforms.Normalize(
            mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
            std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
        if args.augment:
            transform_train = transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])
        else:
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ])
        transform_test = transforms.Compose([transforms.ToTensor(), normalize])

        if args.dataset == 'cifar100':
            train_loader = torch.utils.data.DataLoader(
                datasets.CIFAR100('../data',
                                  train=True,
                                  download=True,
                                  transform=transform_train),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.workers,
                pin_memory=True)
            val_loader = torch.utils.data.DataLoader(
                datasets.CIFAR100('../data',
                                  train=False,
                                  transform=transform_test),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.workers,
                pin_memory=True)
            numberofclass = 100
        elif args.dataset == 'cifar10':
            train_loader = torch.utils.data.DataLoader(
                datasets.CIFAR10('../data',
                                 train=True,
                                 download=True,
                                 transform=transform_train),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.workers,
                pin_memory=True)
            val_loader = torch.utils.data.DataLoader(
                datasets.CIFAR10('../data',
                                 train=False,
                                 transform=transform_test),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.workers,
                pin_memory=True)
            numberofclass = 10
        else:
            raise Exception('unknown dataset: {}'.format(args.dataset))

    elif args.dataset == 'imagenet':
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        if args.distributed:
            train_sampler = torch.utils.data.distributed.DistributedSampler(
                train_dataset)
        else:
            train_sampler = None

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=(train_sampler is None),
            num_workers=args.workers,
            pin_memory=True,
            sampler=train_sampler)

        val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])),
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)
        numberofclass = 1000

    else:
        raise Exception('unknown dataset: {}'.format(args.dataset))

    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.net_type))
        try:
            model = models.__dict__[str(args.net_type)](pretrained=True)
        except (KeyError, TypeError):
            print('unknown model')
            print('torchvision provides the follwoing pretrained model:',
                  model_names)
            return
    else:
        print("=> creating model '{}'".format(args.net_type))
        if args.net_type == 'resnet':
            model = RN.ResNet(args.dataset, args.depth, numberofclass,
                              args.bottleneck)  # for ResNet
        elif args.net_type == 'preresnet':
            model = PRN.PreResNet(args.dataset, args.depth, numberofclass,
                                  args.bottleneck)  # for Pre-activation ResNet
        elif args.net_type == 'pyramidnet':
            model = PYRM.PyramidNet(args.dataset, args.depth, args.alpha,
                                    numberofclass,
                                    args.bottleneck)  # for ResNet
        else:
            raise Exception('unknown network architecture: {}'.format(
                args.net_type))

    if not args.distributed:
        if args.net_type.startswith('alexnet') or args.net_type.startswith(
                'vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()
    else:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)

    print(model)
    print('the number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_err1 = checkpoint['best_err1']
            best_err5 = checkpoint['best_err5']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    print(epochs)

    for epoch in range(args.start_epoch, args.epochs + 1):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        err1, err5 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = err1 <= best_err1
        best_err1 = min(err1, best_err1)
        if is_best:
            best_err5 = err5
        print('Current best accuracy (top-1 and 5 error):', best_err1,
              best_err5)
        save_checkpoint(
            {
                'epoch': epoch,
                'arch': args.net_type,
                'state_dict': model.state_dict(),
                'best_err1': best_err1,
                'best_err5': best_err5,
                'optimizer': optimizer.state_dict(),
            }, is_best)
    print('Best accuracy (top-1 and 5 error):', best_err1, best_err5)
コード例 #5
0
ファイル: try_test.py プロジェクト: liujing1003/benchmark
def main():
    global args, best_err1
    args = parser.parse_args()
    if args.tensorboard: configure("runs_WHOI/%s"%(args.expname))
    
    # Data loading code
    normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
                                     std=[x/255.0 for x in [63.0, 62.1, 66.7]])
    # normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3]],
                                     # std=[x/255.0 for x in [63.0]])
    
    transform_test = transforms.Compose([
        transforms.Scale(36),
        transforms.CenterCrop(32),
        transforms.ToTensor(),
        normalize
        ])

    kwargs = {'num_workers': 1, 'pin_memory': True}

    numberofclass = 89  # changed to 89
 
    if args.dataset == 'WHOI':
        val_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder('/media/ouc/30bd7817-d3a1-4e83-b7d9-5c0e373ae434/LiuJing/WHOI-MVCO/TestSet', transform=transform_test),
            batch_size=args.batchsize, shuffle=True, **kwargs)
	numberofclass = 89 
    else: 
        raise Exception ('unknown dataset: {}'.format(args.dataset)) 

    print('Training PyramidNet-{} on {} dataset:'.format(args.depth, args.dataset.upper()))
    
    # create model
    # model = RN.ResNet(args.depth, 10, bottleneck=args.bottleneck) # for ResNet
    model = PYRM.PyramidNet(args.depth, args.alpha, numberofclass, bottleneck=args.bottleneck) # for PyramidNet

    # get the number of model parameters
    
    print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
    
    # for training on multiple GPUs. 
    # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
    #model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_err1 = checkpoint['best_err1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    print(model)

    cudnn.benchmark = True

    for epoch in range(args.start_epoch, args.epochs):

        # evaluate on validation set
        err1 = validate(val_loader, model, criterion, epoch)
        confusion_matrix = (val_loader, model, criterion, epoch)
        # remember best err1 and save checkpoint
        is_best = err1 <= best_err1
        best_err1 = min(err1, best_err1)
        print 'Current best accuracy (error):', best_err1
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_err1': best_err1,
        }, is_best)
    print 'Best accuracy (error): ', best_err1