Example #1
0
def cifar10(data_root,
            batch_size,
            num_workers,
            distributed,
            cutout=False,
            eval_only=True):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ])
    if cutout:
        transform_train.transforms.append(utils.Cutout(n_holes=1, length=16))
    transform_test = transforms.Compose([
        transforms.ToTensor(),
    ])

    trainset = torchvision.datasets.CIFAR10(root=data_root,
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    testset = torchvision.datasets.CIFAR10(root=data_root,
                                           train=False,
                                           download=True,
                                           transform=transform_test)

    train_sampler = None
    test_sampler = None
    if distributed:
        rank, world_size = get_dist_info()
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            trainset, num_replicas=world_size, rank=rank)
        test_sampler = torch.utils.data.distributed.DistributedSampler(
            testset, num_replicas=world_size, rank=rank)

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              sampler=train_sampler,
                                              num_workers=num_workers,
                                              shuffle=(train_sampler is None))
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size,
                                             sampler=test_sampler,
                                             num_workers=num_workers)
    if eval_only:
        return testloader
    return trainloader, testloader, train_sampler, test_sampler
Example #2
0
def _data_transforms_tinyimagenet(args):
    """Get data transforms for tinyimagenet."""
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]

    train_transform = transforms.Compose([
        transforms.RandomCrop(64, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    if args.cutout:
        train_transform.transforms.append(utils.Cutout(args.cutout_length))

    valid_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    return train_transform, valid_transform
Example #3
0
def _data_transforms_cifar10(args):
    """Get data transforms for cifar10."""
    mean = [0.49139968, 0.48215827, 0.44653124]
    std = [0.24703233, 0.24348505, 0.26158768]

    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    if hasattr(args, 'cutout') and args.cutout:
        train_transform.transforms.append(utils.Cutout(args.cutout_length))

    valid_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    return train_transform, valid_transform
Example #4
0
def main():

    # Initialize the folder in which all training results will be saved
    model_save_dir = './models/imagenet-{}'.format(
        datetime.now().strftime('%Y%m%d-%H%M%S'))
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)

    logger = create_logger(filename=os.path.join(model_save_dir, 'log.log'),
                           logger_prefix=__file__)

    model = MobileNetV3Large(n_classes=1000)
    if torch.cuda.device_count() > 1:
        logger.info('Parallelize by using {} available GPUs'.format(
            torch.cuda.device_count()))
        model = nn.DataParallel(model)

    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=1e-5)

    device = torch.device('cuda') if torch.cuda.is_available() else 'cpu'
    cudnn.benchmark = True

    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomRotation(3),
        transforms.RandomVerticalFlip(),
        transforms.ColorJitter(brightness=0.3,
                               contrast=0.3,
                               saturation=0.3,
                               hue=0.1),
        transforms.ToTensor(),
        utils.Cutout(20), normalizer
    ])

    transform_test = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalizer
    ])

    train_dataset = torchvision.datasets.ImageFolder('./data/imagenet/train',
                                                     transform=transform_train)

    valid_dataset = torchvision.datasets.ImageFolder('./data/imagenet/valid',
                                                     transform=transform_test)

    test_dataset = torchvision.datasets.ImageFolder('./data/imagenet/test',
                                                    transform=transform_test)

    scheduler = utils.OneCycleLR(
        optimizer,
        num_steps=int((len(train_dataset) / BATCH_SIZE) * EPOCHS),
        lr_range=(0.2, 0.8),
    )

    train_loader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=NUM_WORKERS,
                              pin_memory=True)

    valid_loader = DataLoader(valid_dataset,
                              batch_size=BATCH_SIZE,
                              num_workers=NUM_WORKERS,
                              pin_memory=True)

    test_loader = DataLoader(test_dataset,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS,
                             pin_memory=True)

    epochs = EPOCHS

    trainer = Trainer(model=model,
                      criterion=criterion,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      device=device,
                      train_loader=train_loader,
                      valid_loader=valid_loader,
                      test_loader=test_loader,
                      epochs=epochs,
                      logger=logger,
                      model_save_dir=model_save_dir)

    trainer.train()
    trainer.validate()
Example #5
0
cut_size = 44
total_epoch = 500

total_prediction_fps = 0 
total_prediction_n = 0

path = os.path.join(opt.dataset + '_' + opt.model)
writer = SummaryWriter(log_dir=os.path.join(opt.dataset + '_' + opt.model))

# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.RandomCrop(44),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    utils.Cutout(n_holes=1, length=13),
    #transforms.Normalize((0.589499, 0.45687565, 0.40699387), 
                            #(0.25357702, 0.23312956, 0.23275192)),
    transforms.Normalize((0.56010324, 0.43693307, 0.39122295), 
                            (0.23726934, 0.21260591, 0.20737909)), #Augmentation
])

transform_test = transforms.Compose([
    transforms.TenCrop(44),
    #transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
    transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(
            mean=[0.589499, 0.45687565, 0.40699387], std=[0.25357702, 0.23312956, 0.23275192])
            (transforms.ToTensor()(crop)) for crop in crops])),
])

trainset = RAF(split = 'Training', transform=transform_train)
def main():

    # Initialize the folder in which all training results will be saved
    model_save_dir = './models/imagenet-{}'.format(
        datetime.now().strftime('%Y%m%d-%H%M%S'))
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)

    logger = create_logger(filename=os.path.join(model_save_dir, 'log.log'),
                           logger_prefix=__file__)

    model = MobileNetV3Large(n_classes=1000)
    if torch.cuda.device_count() > 1:
        logger.info('Parallelize by using {} available GPUs'.format(
            torch.cuda.device_count()))
        model = nn.DataParallel(model)

    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=0.1,
                                    alpha=0.9999,
                                    momentum=0.9,
                                    weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=3,
                                                gamma=0.01)

    device = torch.device('cuda') if torch.cuda.is_available() else 'cpu'

    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),
        transforms.RandomRotation(3),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.3,
                               contrast=0.3,
                               saturation=0.3,
                               hue=0.1),
        transforms.ToTensor(),
        utils.Cutout(20), normalizer
    ])

    transform_valid = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalizer
    ])

    train_val_dataset = torchvision.datasets.ImageFolder(
        './data/imagenet/train', transform=transform_train)

    test_dataset = torchvision.datasets.ImageFolder('./data/imagenet/valid',
                                                    transform=transform_valid)

    # Create validation dataset
    dataset_size = len(train_val_dataset)
    indices = list(range(dataset_size))
    random.shuffle(indices)

    train_indices = indices[50000:]
    valid_indices = indices[:50000]

    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(valid_indices)

    train_loader = DataLoader(train_val_dataset,
                              batch_size=BATCH_SIZE,
                              sampler=train_sampler,
                              num_workers=NUM_WORKERS)

    valid_loader = DataLoader(train_val_dataset,
                              batch_size=BATCH_SIZE,
                              sampler=valid_sampler,
                              num_workers=NUM_WORKERS)

    test_loader = DataLoader(test_dataset,
                             batch_size=BATCH_SIZE,
                             num_workers=NUM_WORKERS)

    epochs = EPOCHS

    trainer = Trainer(model=model,
                      criterion=criterion,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      device=device,
                      train_loader=train_loader,
                      valid_loader=valid_loader,
                      test_loader=test_loader,
                      epochs=epochs,
                      logger=logger,
                      model_save_dir=model_save_dir)

    trainer.train()
    trainer.validate()