def main():
    global args, best_prec1, globaliter_train, globaliter_val
    globaliter_train = 0
    globaliter_val = 0
    args = parser.parse_args()
    print(args)
    # create model
    num_classes = args.num_classes
    if args.transfer != "":
        num_classes = 365
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        if args.arch.lower() == 'wideresnet50': 
            model  = wideresnet.resnet50(num_classes=num_classes)
        if args.arch.lower() == 'wideresnet18': 
            model  = wideresnet.resnet18(num_classes=num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)
    
    print(model)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))


    if args.transfer != "":
        if os.path.isfile(args.transfer):
            print("=> loading checkpoint '{}'".format(args.transfer))
            checkpoint = torch.load(args.transfer, map_location=lambda storage, loc: storage)
            state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}
            model.load_state_dict(state_dict)
            # print("=> loaded checkpoint '{}' (epoch {})"
                #   .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.transfer))
        for param in model.parameters():
            param.requires_grad = False
        if freze == 2:
            for param in model.layer4.parameters():
                param.requires_grad = True
        args.arch.lower() == 'wideresnet50':
            model.fc = nn.Linear(2040 , 2)
        else:
            model.fc = nn.Linear(512 , 2)
        model.fc.reset_parameters()
def load_model(model_file):
    if args.model == 'resnet18':
        model = wideresnet.resnet18(num_classes=365)
    if args.model == 'resnet50':
        model = wideresnet.resnet50(num_classes=365)
    checkpoint = torch.load(model_file,
                            map_location=lambda storage, loc: storage)
    state_dict = {
        str.replace(k, 'module.', ''): v
        for k, v in checkpoint['state_dict'].items()
    }
    model.load_state_dict(state_dict)
    model.eval()
    return model
Exemple #3
0
def load_model(path, model):
    model_file = path
    if model == 'resnet18':
        model = wideresnet.resnet18(num_classes=365)
    if model == 'resnet50':
        model = wideresnet.resnet50(num_classes=365)
    checkpoint = torch.load(model_file,
                            map_location=lambda storage, loc: storage)
    state_dict = {
        str.replace(k, 'module.', ''): v
        for k, v in checkpoint['state_dict'].items()
    }
    #model.load_state_dict(checkpoint['state_dict'])
    model.load_state_dict(state_dict)
    for i, (name, module) in enumerate(model._modules.items()):
        module = recursion_change_bn(model)
    model.eval()
    return model
Exemple #4
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)
    torch.cuda.set_device(ini_device)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    elif args.arch.lower().startswith('se'):
        model = SENet.se_resnet(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    print('#parameters:', sum(param.numel() for param in model.parameters()))

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features, device_ids)
        model.to(ini_device)
    else:
        model = torch.nn.DataParallel(model, device_ids).to(ini_device)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            del checkpoint
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            return
    else:
        print(model)

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().to(ini_device)

    if args.evaluate:
        #validate(val_loader, model, criterion)

        trainMidOutputs = getMidOutputs(train_loader, model, 15)
        valMidOutputs = getMidOutputs(val_loader, model, 15)
        fcModel = SENet.simpleFcNet(365)
        fcModel = torch.nn.DataParallel(fcModel, device_ids).cuda()
        optimizer = torch.optim.SGD(fcModel.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
        num_epochs = 180
        print('init train acc')
        validate(trainMidOutputs, fcModel, criterion)
        print('init val acc')
        validate(valMidOutputs, fcModel, criterion)

        trainFc(trainMidOutputs, 1e-5, num_epochs, criterion, optimizer,
                fcModel, valMidOutputs)

        validate(trainMidOutputs, fcModel, criterion)
        validate(valMidOutputs, fcModel, criterion)

        return
    else:

        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
        for epoch in range(args.start_epoch, args.epochs):
            adjust_learning_rate(optimizer, epoch)

            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch)
            # evaluate on validation set
            prec1 = validate(val_loader, model, criterion)

            # remember best prec@1 and save checkpoint
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                }, is_best, args.arch.lower())
def main():
    global args, best_prec1
    args = parser.parse_args()
    print args
    # create model
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        print("=> creating model '{}'".format(args.arch))
        model = wideresnet.resnet50(num_classes=args.num_classes)
    elif args.evaluate:
        print("resuming model for validation")
        model = models.__dict__[args.arch](num_classes=args.num_classes)
        model = torch.nn.DataParallel(model).cuda()

        checkpoint = torch.load(args.validation_model)

        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])

        print("=> loaded checkpoint of validation'{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    elif args.evaluate:
        print(" ")
    else:
        model = torch.nn.DataParallel(model).cuda()

    print model
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)

            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])

            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            num_ftrs = model.module.fc.in_features
            model.module.fc = torch.nn.Linear(num_ftrs, 4)
            model.cuda()
            print("final layer replaced with 4 neurons")
            print model
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = GetDataset(
        csv_file=
        '/scratch/shubodh/places365/rapyuta4_classes/csv_data_labels/train_all.csv',
        root_dir='/scratch/shubodh/places365/rapyuta4_classes/train_all',
        transform=transform)
    test_dataset = GetDataset(
        csv_file=
        '/scratch/satyajittourani/data_raputa/raputa_test/raputa_test.csv',
        root_dir='/scratch/satyajittourani/data_raputa/raputa_test',
        transform=transform)

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    optimizer = torch.optim.SGD([
        {
            "params": model.module.fc.parameters(),
            "lr": 1.0
        },
    ],
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.arch.lower())
def main():
    global args, best_prec1
    args = parser.parse_args()
    print args
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model  = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    print model
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best, args.arch.lower())
Exemple #7
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)

    ## which model will be used (for WideResnet/AlexNet we need to modify something, else wt just use the name of the model)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)
        # model = resnet.resnet18(num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    print(model)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    ## Some pytorch optimization tool to make training faster. (Yet, it works well only if all the input data is the same size)
    cudnn.benchmark = True

    # Data loading code
    # data_dir = places_dir + '/places365_standard_home'
    data_dir = places_dir  # check if that works
    ## inside the data dir I have 2 folders - train & val
    traindir = os.path.join(data_dir, 'train')
    valdir = os.path.join(data_dir, 'val')

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    ## DataLoader provides an iterable over the given dataset
    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    ## define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # # check results with ADAM (plain, no changes)
    # optimizer = torch.optim.Adam(model.parameters())

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    accuracies_list = []
    for epoch in range(args.start_epoch, args.epochs):
        ## every 30 epochs the learning rate is divided by 10
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        accuracies_list.append("%.2f" % prec1.tolist())

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.arch.lower())
    print("The best accuracy obtained during training is = {}".format(
        best_prec1))
def main():
    global args
    global best_prec1, best_epoch1
    global best_prec5, best_epoch5
    global best_prec1_10crop, best_epoch1_10crop
    global best_prec5_10crop, best_epoch5_10crop
    args = parser.parse_args()
    print args

    assert args.batch_size >= 2, 'Error: batch size must be greater than or equal to 2'
    assert args.sub_division >= 1, 'Error: sub-division size must be greater than or equal to 2'
    assert (args.batch_size % args.sub_division
            ) == 0, 'Error: batch size must divided by sub-division!'
    assert (
        args.batch_size / args.sub_division
    ) >= 2, 'Error: The quotient of the batch size divided by sub-division must be greater than or equal to 2,!'

    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.use_custom_model:
        if args.arch.lower().startswith('wideresnet'):
            # a customized resnet model with last feature map size as 14x14 for better class activation mapping
            model = wideresnet.resnet50(num_classes=args.num_classes)
        else:
            model = models.__dict__[args.arch](num_classes=args.num_classes)
    else:
        model = custom_models.__dict__[args.arch](num_classes=args.num_classes,
                                                  pretrained=None)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    # print model

    if not os.path.isdir(args.save_path):
        os.mkdir(args.save_path)
    assert os.path.isdir(
        args.save_path), 'Error: no save directory! %s' % (args.save_path)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
    #                              weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            best_prec5 = checkpoint['best_prec5']
            best_epoch1 = checkpoint['best_epoch1']
            best_epoch5 = checkpoint['best_epoch5']
            best_prec1_10crop = checkpoint['best_prec1_10crop']
            best_prec5_10crop = checkpoint['best_prec5_10crop']
            best_epoch1_10crop = checkpoint['best_epoch1_10crop']
            best_epoch5_10crop = checkpoint['best_epoch5_10crop']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_data_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    val_data_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])

    val_data_transforms_10crop = transforms.Compose([
        transforms.Resize(256),
        transforms.TenCrop(224),
        transforms.Lambda(lambda crops: torch.stack(
            [transforms.ToTensor()(crop) for crop in crops])),
        transforms.Lambda(
            lambda crops: torch.stack([normalize(crop) for crop in crops])),
    ])

    if args.dataset_format == 'places365':
        dataloader = places_dataset
        traindir = os.path.join(args.data, 'data_large')
        valdir = os.path.join(args.data, 'val_large')
        filelist = os.path.join(args.data, 'filelist')

        train_data_loader = places_dataset(traindir,
                                           filelist,
                                           train_data_transforms,
                                           train=True)
        val_data_loader = places_dataset(valdir,
                                         filelist,
                                         val_data_transforms,
                                         train=False)

        if not args.ten_crop_validation:
            val_data_loader_10crop = places_dataset(valdir,
                                                    filelist,
                                                    val_data_transforms_10crop,
                                                    train=False)

    elif args.dataset_format == 'imagenet':
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')

        train_data_loader = datasets.ImageFolder(traindir,
                                                 train_data_transforms)
        val_data_loader = datasets.ImageFolder(valdir, val_data_transforms)

        if not args.ten_crop_validation:
            val_data_loader_10crop = datasets.ImageFolder(
                valdir, val_data_transforms_10crop)

    if not args.class_balanced_sampling:
        sampler_weights = make_weights_for_balanced_classes(train_data_loader)
        train_sampler = torch.utils.data.sampler.WeightedRandomSampler(
            sampler_weights, len(sampler_weights))
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_data_loader,
                                               batch_size=args.batch_size //
                                               args.sub_division,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(val_data_loader,
                                             batch_size=args.batch_size //
                                             (args.sub_division * 2),
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if not args.ten_crop_validation:
        val_loader_10crop = torch.utils.data.DataLoader(
            val_data_loader_10crop,
            batch_size=args.batch_size // (args.sub_division * 2 * 8),
            shuffle=False,
            num_workers=args.workers,
            pin_memory=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1, prec5 = validate(val_loader, model, criterion, tencrop=False)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        # best_prec1 = max(prec1, best_prec1)
        if prec1 > best_prec1:
            best_prec1 = prec1
            best_epoch1 = epoch
        if prec5 > best_prec5:
            best_prec5 = prec5
            best_epoch5 = epoch

        if not args.class_balanced_sampling:
            prec1_10crop, prec5_10crop = validate(val_loader_10crop,
                                                  model,
                                                  criterion,
                                                  tencrop=True)

            # remember best prec@1 and save checkpoint
            is_best_10crop = prec1_10crop > best_prec1_10crop
            # best_prec1 = max(prec1, best_prec1)
            if prec1_10crop > best_prec1_10crop:
                best_prec1_10crop = prec1_10crop
                best_epoch1_10crop = epoch
            if prec5_10crop > best_prec5_10crop:
                best_prec5_10crop = prec5_10crop
                best_epoch5_10crop = epoch
            state = {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'best_prec5': best_prec5,
                'best_epoch1': best_epoch1,
                'best_epoch5': best_epoch5,
                'best_prec1_10crop': best_prec1_10crop,
                'best_prec5_10crop': best_prec5_10crop,
                'best_epoch1_10crop': best_epoch1_10crop,
                'best_epoch5_10crop': best_epoch5_10crop,
                'optimizer_dict': optimizer.state_dict(),
            }

            save_checkpoint(state,
                            is_best_10crop,
                            args.arch.lower(),
                            tencrop=True)

        if not args.class_balanced_sampling:
            save_checkpoint(state, is_best, args.arch.lower(), tencrop=False)
        else:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'best_prec5': best_prec5,
                    'best_epoch1': best_epoch1,
                    'best_epoch5': best_epoch5,
                    'optimizer_dict': optimizer.state_dict(),
                },
                is_best,
                args.arch.lower(),
                tencrop=False)

        print('The best test accuracy-top1: %f  epoch: %d' %
              (best_prec1, best_epoch1))
        print('The best test accuracy-top5: %f  epoch: %d' %
              (best_prec5, best_epoch5))

        print('The best test 10crop accuracy-top1: %f  epoch: %d' %
              (best_prec1_10crop, best_epoch1_10crop))
        print('The best test 10crop accuracy-top5: %f  epoch: %d' %
              (best_prec5_10crop, best_epoch5_10crop))
def main():
    global args, best_prec1
    args = parser.parse_args()
    print args
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        # model = models.__dict__[args.arch](num_classes=args.num_classes)
        model = resnet.resnet18(num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    print model
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    data_dir = places_dir + '/places365_standard_office'
    traindir = os.path.join(data_dir, 'train')
    valdir = os.path.join(data_dir, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    class ImageFolderWithPaths(datasets.ImageFolder):
        """Custom dataset that includes image file paths. Extends
        torchvision.datasets.ImageFolder
        """

        # override the __getitem__ method. this is the method dataloader calls
        def __getitem__(self, index):
            # this is what ImageFolder normally returns
            original_tuple = super(ImageFolderWithPaths,
                                   self).__getitem__(index)
            # the image file path
            path = self.imgs[index][0]
            # make a new tuple that includes original and the path
            tuple_with_path = (original_tuple + (path, ))
            return tuple_with_path

    train_dataset = ImageFolderWithPaths(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    val_dataset = ImageFolderWithPaths(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.arch.lower())
    print("The best accuracy obtained during training is = {}".format(
        best_prec1))
Exemple #10
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    # print (args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet_freeze'):
        
        model_file = 'wideresnet18_places365_1.pth.tar'
        model = wideresnet.resnet18(num_classes=365)
        checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
        state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}
        model.load_state_dict(state_dict)
        model.eval()
        for name,param in model.named_parameters():             
            # print(name)
            if "fc" not in name:     
                param.requires_grad = False
         
    else:
        if args.arch.lower().startswith('wideresnet'):
            # a customized resnet model with last feature map size as 14x14 for better class activation mapping
            
            model  = wideresnet.resnet50(num_classes=args.num_classes)
        else:
            model = models.__dict__[args.arch](num_classes=args.num_classes)

        if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()
    # print (model)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    print("DATA",args.data)
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best, args.arch.lower())
Exemple #11
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)
    # create model
    print(("=> creating model '{}'".format(args.arch)))
    if args.arch.lower().startswith("wideresnet"):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    os.makedirs(os.path.join('models', args.arch.lower()), exist_ok=True)

    if args.arch.lower().startswith("alexnet") or args.arch.lower().startswith(
            "vgg"):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    print(model)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            best_prec1 = checkpoint["best_prec1"]
            model.load_state_dict(checkpoint["state_dict"])
            print(("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint["epoch"])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.resume)))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, "train")
    valdir = os.path.join(args.data, "val")
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomSizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]),
        ),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
    )

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]),
        ),
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True,
    )

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(
        model.parameters(),
        args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
    )

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                "epoch": epoch + 1,
                "arch": args.arch,
                "state_dict": model.state_dict(),
                "best_prec1": best_prec1,
            },
            is_best,
            os.path.join(models, args.arch.lower(), args.arch.lower()),
            epoch=epoch)
Exemple #12
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    print(model)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    dataset = datasets.ImageFolder(traindir)
    class_to_idx = dataset.class_to_idx
    with open('categories_places{}.txt'.format(len(class_to_idx)), 'w') as f:
        for clazz, idx in class_to_idx.items():
            f.write('/{}/{} {}\r\n'.format(clazz[0], clazz.replace('-', '/'),
                                           idx))

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.arch.lower())
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(pretrained=False,
                                    num_channels=args.num_channels,
                                    num_classes=args.num_classes)
        '''
        arch = 'resnet50'
        model_file = 'whole_%s_places365.pth.tar' % arch
        from functools import partial
        import pickle
        pickle.load = partial(pickle.load, encoding="latin1")
        pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
        model = torch.load(model_file, map_location=lambda storage, loc: storage, pickle_module=pickle)
        '''
        print("this is resnet50")
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
        print("this is resnet50 with cuda")

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(
        args.data, 'place_data_train/RGB/')  #you may need to modify this path
    valdir = os.path.join(
        args.data, 'place_data_val/RGB/')  #you may need to modify this path
    if args.num_channels == 4:
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406, 0.29],
                                         std=[0.229, 0.224, 0.225, 0.23])
    #normalize = transforms.Normalize(mean=[0.485],
    #                                 std=[0.229])
    if args.num_channels == 3:
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    train_loader = torch.utils.data.DataLoader(
        ImageFolder(
            traindir,
            transform=transforms.Compose([
                #transforms.RandomResizedCrop(224), # 2.7
                transforms.RandomSizedCrop(224),  #3.6
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]),
            classifier_type=args.classifier_name,
            num_channels=args.num_channels),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=False)

    val_loader = torch.utils.data.DataLoader(
        ImageFolder(
            valdir,
            transform=transforms.Compose([
                #transforms.Resize(256), #2.7
                transforms.Scale(256),  # 3.6
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]),
            classifier_type=args.classifier_name,
            num_channels=args.num_channels),
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=False)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)
        torch.save(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1
            },
            args.arch.lower() + '_latest.pth.tar')
        print("===Done with training===")
        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best,
            args.classifier_name.lower() + '_' + args.arch.lower())
def main():
    global args, best_prec1
    args = parser.parse_args()
    print args
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model  = wideresnet.resnet50(pretrained = True, num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    model.load_state_dict(torch.load('resnet50_places365.pth.tar'), strict=False)
    model.fc= nn.Linear(in_features=model.fc.in_features, out_features=205)
    #for para in list(model.parameters())[:-2]:
    #    para.requires_grad=False 

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
    
    


    print model

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    trainlist = args.train
    vallist = args.val
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        VideoNet(trainlist, transform = transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        VideoNet(vallist, transform = transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)
    # define loss function (criterion) and opptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

  
    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best, args.arch.lower())
Exemple #15
0
def main():
    global args, best_prec1

    print('parsing args...')
    args = parser.parse_args()

    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
        model = model.cuda()
    print(model)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True
    # Data loading code
    #train_loader = Provider(phase = 'train', batch_size=args.batch_size, workers=args.workers)
    #val_loader = Provider(phase = 'test', batch_size=args.batch_size)
    train_loader = torch.utils.data.DataLoader(Mydataset(phase='train'),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(Mydataset(phase='test'),
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, './snapshot/' + args.arch.lower() + '_' + str(epoch))
Exemple #16
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print args

    path = os.path.join(args.save_path, args.arch, args.pretrained_dataset)
    if not os.path.isdir(path):
        os.mkdir(path)
    assert os.path.isdir(path), 'Error: no save directory! %s' % (path)

    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.pretrained_dataset == 'places365':
        if args.arch.lower().startswith('wideresnet'):
            # a customized resnet model with last feature map size as 14x14 for better class activation mapping
            model = wideresnet.resnet50(num_classes=args.num_classes)
        else:
            model = models.__dict__[args.arch](num_classes=args.num_classes)

    elif args.pretrained_dataset == 'imagenet':
        model = models.__dict__[args.arch](pretrained=True)
        # model = models.__dict__[args.arch](num_classes=1000, pretrained='imagenet')

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()
        # model = model

    print model

    # optionally resume from a checkpoint
    if args.resume and args.pretrained_dataset == 'places365':
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #####################################################################################
    # Extract Feature
    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.classifier = model.classifier[:-1]
    else:
        new_classifier = nn.Sequential(*list(model.module.fc.children())[:-1])
        model.module.fc = new_classifier
    #####################################################################################

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # optimizer = torch.optim.SGD(model.parameters(), args.lr,
    #                             momentum=args.momentum,
    #                             weight_decay=args.weight_decay)
    optimizer = None

    train(train_loader, model, criterion, optimizer, args.start_epoch)
    prec1 = validate(val_loader, model, criterion)
Exemple #17
0
def main():
    global best_prec1
    global start_epoch

    # create model
    print("=> creating model '{}'".format(arch))
    if arch.lower().startswith('resnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(pretrained=True, num_classes=1000)
        # print (model)
        model.avgpool = nn.AdaptiveAvgPool2d(1)
        model.fc = nn.Linear(2048, 256)
        model.classifier = nn.Linear(256, num_classes)
        # print (model.fc)
    elif arch.lower().startswith('bcnn'):
        model = BilinearCNN.BilinearCNN(num_classes)
    elif arch.lower().startswith('inception'):
        # model = inceptionv3.inception_v3(pretrained=True)
        model = inceptionv3.inception_v3(pretrained=True)
        model.fc = nn.Linear(2048, num_classes)
    elif arch.lower().startswith('zrmodel'):
        model = LeNet32(1)
    else:
        model = models.__dict__[arch](num_classes=365)
        state_dict = torch.load('whole_alexnet_places365_python36.pth')
        model.load_state_dict(state_dict)
        model.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

    # a customized resnet model with last feature map size as 14x14 for better class activation mapping

    # print (model)

    model = model.cuda()
    # model = nn.DataParallel(model, device_ids=[9])
    # model = torch.nn.DataParallel(model).cuda()
    print(model)

    # optionally resume from a checkpoint
    if resume:
        if os.path.isfile(resume):
            print("=> loading checkpoint '{}'".format(resume))
            checkpoint = torch.load(resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(resume))
    #
    # cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(data_path, 'train')
    valdir = os.path.join(data_path, 'val')
    # normalize = transforms.Normalize(mean=[0.1680733,0.1680733,0.1680733],
    #                                  std=[0.15840427,0.15840427,0.15840427])
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    # train_loader = torch.utils.data.DataLoader(
    #     datasets.ImageFolder(traindir, transforms.Compose([
    #         # transforms.Lambda(lambda img:_cloud_crop(img)),
    #         # transforms.RandomResizedCrop(336, scale=(0.8, 1.0)),
    #         transforms.RandomResizedCrop(336),
    #         # transforms.CenterCrop(336),
    #         transforms.RandomHorizontalFlip(),
    #         # transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
    #         transforms.ToTensor(),
    #         normalize,
    #     ])),
    #     batch_size=batch_size, shuffle=True,
    #     num_workers=data_loader_workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(384),
            transforms.CenterCrop(334),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=data_loader_workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion_cel = nn.CrossEntropyLoss().cuda()
    criterion_tml = nn.TripletMarginLoss(margin=1.0, p=2).cuda()
    # optimizer = torch.optim.SGD(model.parameters(), lr,
    #                             momentum=momentum,
    #                             weight_decay=weight_decay)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0,
                                 amsgrad=False)
    # optimizer = torch.optim.SGD([
    #             {'params': model.features.parameters()},
    #             {'params': model.classifier.parameters(), 'lr': lr}
    #         ], lr=lr, momentum=momentum,weight_decay =weight_decay)
    if evaluate:
        validate(val_loader, model, criterion_cel)
        return

    for epoch in range(start_epoch, epochs):
        adjust_learning_rate(optimizer, epoch)
        train_loader = DataLoader(
            root_path=traindir,
            batch_size=batch_size,
            num_workers=data_loader_workers,
            transforms=transforms.Compose([
                # transforms.Lambda(lambda img:_cloud_crop(img)),
                # transforms.RandomResizedCrop(336, scale=(0.8, 1.0)),
                transforms.RandomResizedCrop(336),
                # transforms.CenterCrop(336),
                transforms.RandomHorizontalFlip(),
                # transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
                transforms.ToTensor(),
                normalize,
            ]),
            shuffle=True)
        # train for one epoch
        train(train_loader, model, criterion_tml, criterion_cel, optimizer,
              epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion_cel)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, epoch, arch.lower())
def main():

    print(args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
    else:
        model = torch.nn.DataParallel(model)

    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    #cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion, device)
        return

    for epoch in range(args.start_epoch, args.epochs):
        #adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        thisLoss = train(train_loader, model, criterion, optimizer, epoch,
                         device)

        # evaluate on validation set
        prec1, losstest, actual, predicted = validate(val_loader, model,
                                                      criterion, device)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'losses': losstest,
            }, is_best, args.arch.lower())
def main():
    global args, best_prec1
    args = parser.parse_args()
    #print (args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    if args.arch.lower().startswith('wideresnet'):
        # a customized resnet model with last feature map size as 14x14 for better class activation mapping
        model = wideresnet.resnet50(num_classes=args.num_classes)
    else:
        model = models.__dict__[args.arch](num_classes=args.num_classes)

    if args.arch.lower().startswith('alexnet') or args.arch.lower().startswith(
            'vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model
    else:
        model = torch.nn.DataParallel(model)

    model = model.cuda()
    device = torch.device('cuda')

    # update the fully connected layer
    nInF, nOutF = model.module.fc.in_features, args.num_outClasses  # input size from original; 10 integer outputs
    model.module.fc = torch.nn.Linear(nInF, nOutF, bias=True)
    print(model)

    #print (model)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    for param in model.module.parameters():
        param.requires_grad = False
    for param in model.module.fc.parameters():
        param.requires_grad = True

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    #scenicDF = pd.read_csv(args.data+'/newImagesWithJpg.tsv',sep='\t')
    #scenicDF = scenicDF[['Images','Average']]
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    actualFull, predicFull = [], []
    if args.evaluate:
        validate(val_loader, model, criterion, device)

    else:
        lossesT, lossesV = [], []
        for epoch in range(args.start_epoch, args.epochs):
            adjust_learning_rate(optimizer, epoch)

            # train for one epoch
            losses, actual, predicted = train(train_loader, model, criterion,
                                              optimizer, epoch, device)
            lossesT.append(np.sqrt(float(losses.val)))
            actualFull.append(act for act in actual)
            predicFull.append(prd for prd in predicted)

            # evaluate on validation set
            prec1, losses, actual, predicted = validate(
                val_loader, model, criterion, device)
            lossesV.append(np.sqrt(float(losses.val)))
            actualFull.append(act for act in actual)
            predicFull.append(prd for prd in predicted)

            # remember best prec@1 and save checkpoint
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                }, is_best, args.arch.lower())


# plot stuff
# loss vs epoch (only for training, not for validation-only processing)
        epochs = range(args.start_epoch, args.epochs)
        plt.plot(epochs, lossesT, 'b', label='Train')
        plt.plot(epochs, lossesV, 'g', label='Test')

        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend(loc='best')
        plt.savefig('errorVsEpoch.pdf')

    plt.clf()  # clear
    # predicted vs actual scenery score
    plt.plot(actualFull, predicFull)
    plt.xlabel('Actual Scenery Score')
    plt.ylabel('Predicted Scenery Score')
    plt.savefig('predictedVsActual.pdf')

    plt.clf()
    # (predicted - actual) vs actual
    plt.plot(actualFull, (predicFull - actualFull))
    plt.xlabel('Actual Scenery Score')
    plt.ylabel('(Predicted-Actual) Scenery Score')
    plt.savefig('diffVsActual.pdf')

    myOutPerfDF = pd.DataFrame(list(zip(actualFull, predicFull)),
                               columns=['Actual', 'Predicted'])
    myOutPerfDF.to_csv('output_actualPredicted.csv')

    myOutLossDF = pd.DataFrame(list(zip(epochs, lossesT, lossesV)),
                               columns=['Epoch', 'LossTrain', 'LossVal'])
    myOutLossDF.to_csv('output_lossEpoch.csv')