Esempio n. 1
0
def mnist_aug_loader(train_size, test_size, args):
    transform_train = transforms.Compose([
        transforms.RandomCrop(28, padding=4),
        # https://github.com/hwalsuklee/tensorflow-mnist-cnn/blob/master/mnist_data.py
        #transforms.RandomAffine(translate=0.12),
        transforms.RandomRotation((-15, 15)),
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, )),
        #transforms.RandomErasing(probability=args.p, sh=args.sh, r1=args.r1, mean=[0.4914]),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, )),
    ])
    trainset = datasets.MNIST('./data/MNIST',
                              train=True,
                              download=True,
                              transform=transform_train)
    train_loader = data.DataLoader(trainset,
                                   batch_size=train_size,
                                   shuffle=True)

    testset = datasets.MNIST(root='./data/MNIST',
                             train=False,
                             download=False,
                             transform=transform_test)
    test_loader = data.DataLoader(testset, batch_size=test_size, shuffle=False)
    return train_loader, test_loader
Esempio n. 2
0
def get_transform(train):
    base_size = 520
    crop_size = 480

    min_size = int((0.5 if train else 1.0) * base_size)
    max_size = int((2.0 if train else 1.0) * base_size)
    transforms = []
    transforms.append(T.RandomResize(min_size, max_size))
    if train:
        transforms.append(
            T.RandomColorJitter(brightness=0.25,
                                contrast=0.25,
                                saturation=0.25,
                                hue=0.25))
        transforms.append(T.RandomGaussianSmoothing(radius=[0, 5]))
        transforms.append(T.RandomRotation(degrees=30, fill=0))
        transforms.append(T.RandomHorizontalFlip(0.5))
        transforms.append(T.RandomPerspective(fill=0))
        transforms.append(T.RandomCrop(crop_size, fill=0))
        transforms.append(T.RandomGrayscale(p=0.1))
    transforms.append(T.ToTensor())
    transforms.append(
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))

    return T.Compose(transforms)
    def create_transforms(self):

        transforms_list = []

        if self.mode == 'pretrain_tnet':
            transforms_list.extend([
                transforms.RandomCrop(400),
                transforms.RandomRotation(180),
                transforms.RandomHorizontalFlip()
            ])
        if self.mode == 'pretrain_mnet':
            transforms_list.extend([
                transforms.RandomCrop(320),
            ])
        if self.mode == 'end_to_end':
            transforms_list.extend([
                transforms.RandomCrop(800),
            ])

        transforms_list.extend([
            transforms.Resize((self.patch_size, self.patch_size)),
            transforms.ToTensor()
        ])

        self.transforms = transforms.Compose(transforms_list)
Esempio n. 4
0
def get_iterator(mode):
    normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
    kwargs = {'num_workers': 4, 'pin_memory': True}
    transform_augment = transforms.Compose([
        # transforms.RandomResizedCrop(args.size, scale=(0.8, 1.2)),  # random scale 0.8-1 of original image area, crop to args.size
        transforms.RandomResizedCrop(size),
        transforms.RandomRotation(15),  # random rotation -15 to +15 degrees
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    transform = transforms.Compose([transforms.Resize((size, size)),
                                              transforms.ToTensor(),
                                              normalize,
                                              ])
    if mode:
        dataset = Dataset.MURA(split="train", transform=(transform_augment if augment else transform), type=type)
        loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             **kwargs)
    else:
        dataset = Dataset.MURA(split="test", transform=transform, type=type)
        loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             **kwargs)
    return loader
Esempio n. 5
0
def loader(train_size, test_size, args):
    if args.data.startswith('cifar'):
        if args.data == 'cifar10':
            dataloader = datasets.CIFAR10
        else:
            dataloader = datasets.CIFAR100
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
            transforms.RandomErasing(probability = 0.5, sh = 0.4, r1 = 0.3, ),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])
    elif args.data == 'mnist':
        dataloader = datasets.MNIST
        transform_train = transforms.Compose([
            # https://github.com/hwalsuklee/tensorflow-mnist-cnn/blob/master/mnist_data.py
            #transforms.RandomAffine(translate=0.12),
            transforms.RandomCrop(28, padding=4),
            transforms.RandomRotation((-15, 15)),
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
        ])
    elif args.data == 'fmnist':
        dataloader = datasets.FashionMNIST
        transform_train = transforms.Compose([
            transforms.RandomCrop(28, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
            transforms.RandomErasing(probability=0.5, sh=0.4, r1=0.3, mean=[0.4914]),
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
        ])
    else:
        exit('Unknown dataset')

    if args.aug == 0:
        transform_train = transforms.ToTensor()
        transform_test = transforms.ToTensor()
        
    trainset = dataloader('./data/' + args.data.upper(), train=True, download=True, transform=transform_train)
    train_loader = data.DataLoader(trainset, batch_size=train_size, shuffle=True, num_workers=0) # num_workers=0 is crucial for seed

    testset = dataloader(root='./data/' + args.data.upper(), train=False, download=False, transform=transform_test)
    test_loader = data.DataLoader(testset, batch_size=test_size, shuffle=False, num_workers=0)
    return train_loader, test_loader, dataloader
Esempio n. 6
0
def get_transform(is_train):
    transforms = []
    if is_train:
        transforms.append(T.RandomResizedCrop())
    transforms.append(T.ToTensor())
    if is_train:
        transforms.append(T.RandomHorizontalFlip(0.5))
        transforms.append(T.RandomVerticalFlip(0.5))
        transforms.append(T.RandomRotation())
    return T.Compose(transforms)
def get_transform(train):
    transforms = []
    if train:
        # transforms.append(T.random_affine(degrees=1.98, translate=0.05, scale=0.05, shear=0.641))
        transforms.append(T.ColorJitter(brightness=0.5, saturation=0.5))
        transforms.append(T.RandomRotation())
        transforms.append(T.ToTensor())
        transforms.append(T.RandomHorizontalFlip(0.5))
    else:
        transforms.append(T.ToTensor())
    return T.Compose(transforms)
Esempio n. 8
0
def load_data_transformers(resize_reso=512, crop_reso=448, swap_num=[7, 7]):
    center_resize = 600
    Normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
    data_transforms = {
        'swap':
        transforms.Compose([
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'common_aug':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'train_totensor':
        transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            # ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val_totensor':
        transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'test_totensor':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.CenterCrop((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'None':
        None,
    }
    return data_transforms
Esempio n. 9
0
def build_transforms(is_train, size, crop_size,mode="baseline"):
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    fill = tuple([int(v * 255) for v in mean])
    ignore_value = 255
    transforms=[]
    min_scale=1
    max_scale=1
    if is_train:
        min_scale=0.5
        max_scale=2
    transforms.append(T.RandomResize(int(min_scale*size),int(max_scale*size)))
    if is_train:
        if mode=="baseline":
            pass
        elif mode=="randaug":
            transforms.append(T.RandAugment(2,1/3,prob=1.0,fill=fill,ignore_value=ignore_value))
        elif mode=="custom1":
            transforms.append(T.ColorJitter(0.5,0.5,(0.5,2),0.05))
            transforms.append(T.AddNoise(10))
            transforms.append(T.RandomRotation((-10,10), mean=fill, ignore_value=0))
        else:
            raise NotImplementedError()
        transforms.append(
        T.RandomCrop(
            crop_size,crop_size,
            fill,
            ignore_value,
            random_pad=is_train
        ))
        transforms.append(T.RandomHorizontalFlip(0.5))
    transforms.append(T.ToTensor())
    transforms.append(T.Normalize(
        mean,
        std
    ))
    return T.Compose(transforms)
Esempio n. 10
0
def main(args):
    if not os.path.exists(args.OUT_FOLDER):
        os.makedirs(args.OUT_FOLDER)
    since = time.time()

    transform = tt.Compose([
        transforms.RandomShift((10, 50, 50)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(90),
        transforms.ToTensor()
    ])
    data = util.LabeledKaggleDataset(args.INPUT_FOLDER,
                                     args.LABELS_FILE,
                                     input_transform=transform)

    #    for i,(img,t) in enumerate(data):
    img, t = data.__getitem__(0)
    img = img.numpy()
    print(img.shape)
    explore.plot(img)
    #img_new = horizontal_shift(img)
    #explore.plot(img_new)
    time_elapsed = time.time() - since
    print("Done, time_elapsed =", time_elapsed)
Esempio n. 11
0
def train(epochs):
    device = torch.device('cuda')
    param = {}
    param['lr'] = 0.001
    param['max_epoch'] = 60
    param['total_epoch'] = epochs
    param['lr_pow'] = 0.95
    param['running_lr'] = param['lr']

    train_file = 'Dataset05/train_file.txt'
    gt_root = 'Dataset05/training_aug/groundtruth'
    left_high_root = 'Dataset05/training_aug/left_high'
    right_low_root = 'Dataset05/training_aug/right_low'
    list_file = open(train_file)
    image_names = [line.strip() for line in list_file]

    crit = nn.L1Loss()
    #crit = nn.BCELoss()

    # model = SRNet().to(device)
    model = DINetwok().to(device)
    # model.load_state_dict(torch.load('model/2018-10-26 22:11:34/50000/snap_model.pth'))
    # model = load_part_of_model_PSP_LSTM(model, param['pretrained_model'])
    # model.load_state_dict(torch.load(param['pretrained_model']))
    # optimizers = create_optimizers(nets, param)
    optimizer = torch.optim.Adam(model.parameters(), lr=param['lr'])
    model.train()
    # model = load_part_of_model(model, 'checkpoint/model_epoch_5.pth')

    dataset = EnhanceDataset(left_high_root,
                             right_low_root,
                             gt_root,
                             image_names,
                             transform=transforms.Compose([
                                 transforms.RandomCrop(120),
                                 transforms.RandomHorizontalFlip(),
                                 transforms.RandomVerticalFlip(),
                                 transforms.RandomRotation(),
                                 transforms.ToTensor()
                             ]))

    training_data_loader = torch.utils.data.DataLoader(dataset,
                                                       batch_size=16,
                                                       shuffle=True,
                                                       num_workers=int(2))
    time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())

    for epoch in range(1, epochs + 1):
        for iteration, (low, high, target) in enumerate(training_data_loader):
            low = low.type(torch.cuda.FloatTensor)
            high = high.type(torch.cuda.FloatTensor)
            target = target.type(torch.cuda.FloatTensor)

            final, lstm_branck = model(low, high)

            loss = crit(final, target)
            #loss_lstm = crit(lstm_branck, target)

            #loss = 0.9 * loss + 0.1 * loss_lstm

            optimizer.zero_grad()

            loss.backward()

            optimizer.step()

            if iteration % 2 == 0:
                print(
                    "===> Epoch[{}]({}/{}): Loss: {:.10f}; lr:{:.10f}".format(
                        epoch, iteration, len(training_data_loader),
                        loss.item(), param['running_lr']))
            adjust_learning_rate(optimizer, epoch, param)

        print("Epochs={}, lr={}".format(epoch,
                                        optimizer.param_groups[0]["lr"]))

        if epoch % 50 == 0:
            save_checkpoint(model, epoch, time_str)
Esempio n. 12
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)

    global fig, ax1, ax2, ax3, ax4
    if args.visualize:
        plt.ion()
        plt.show()
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2)

    # create model
    model = fcn(pretrained=args.pretrained, nparts=args.nparts)

    # define loss function (criterion) and optimizer
    criterion = nn.MSELoss().cuda()     
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    print(model)
    model = model.cuda()
    
    train_sampler = None
    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    
    train_dataset = Heatmap(
        traindir,
        transforms.Compose([
            transforms.Resize((unisize, unisize)),
            #transforms.RandomCrop(unisize),
            transforms.RandomRotation(30),
            transforms.ResizeTarget((outsize, outsize)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))


    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_dataset = Heatmap(
            valdir, transforms.Compose([
            transforms.Resize((unisize,unisize)),
            transforms.ResizeTarget((outsize, outsize)),
            transforms.ToTensor(),
            normalize,
        ]))

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)


    if args.evaluate:
        validate(train_loader, model, criterion, args.epochs-1)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best)
Esempio n. 13
0
    tfs.ApplyAffine(so=2),
    tfs.ReturnImageData(),
    tfs.SwapAxes(0, 1),
    tfs.ToTensor(),
    tfs.UnitInterval(),
])

# adding rotation
title_dict[2] = 'With random shift + rotation'
t2 = tfs.ComposeMRI([
    tfs.LoadNifti(),
    tfs.TranslateToCom(),
    tfs.SetResolution(new_dim=new_dim, new_res=new_res),
    tfs.CropShift(np.array([0, -1, -30])),
    tfs.RandomShift([10, 0, 0]),
    tfs.RandomRotation(angle_interval=[-10, 10],
                       rotation_axis=[0, 1, 0]),  # random rotation
    tfs.ApplyAffine(so=2),
    tfs.ReturnImageData(),
    tfs.SwapAxes(0, 1),
    tfs.ToTensor(),
    tfs.UnitInterval(),
])

# adding gamma transform + gaussian noise
title_dict[3] = 'Rand shift, rotation, gamma, noise'
t3 = tfs.ComposeMRI([
    tfs.LoadNifti(),
    tfs.TranslateToCom(),
    tfs.SetResolution(new_dim=new_dim, new_res=new_res),
    tfs.CropShift(np.array([0, -1, -30])),
    tfs.RandomShift([10, 0, 0]),
Esempio n. 14
0
def main(epochs, batch_size, learn_angle, angle_lr):

    transform = torchvision.transforms.Compose([
        transforms.RandomRotation(30),
        transforms.Identity() if learn_angle else transforms.Free(),
        transforms.ToTensor(),
        transforms.Normalize()
    ])

    set_train = dataset.MNIST(root='./data',
                              train=True,
                              download=True,
                              transform=transform)
    set_test = dataset.MNIST(root='./data',
                             train=False,
                             download=True,
                             transform=transform)
    loader_train = torch.utils.data.DataLoader(set_train,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=2)
    loader_test = torch.utils.data.DataLoader(set_test,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=2)

    nn = model.Net(learn_angle)
    if torch.cuda.is_available():
        nn.cuda()
    optimizer = optim.SGD(nn.parameters(), lr=0.001, momentum=0.9)

    VIEW_INTERVAL = 100
    for epoch in range(epochs):
        acc_loss = 0.0
        running_loss = 0.0
        for i, sample in enumerate(loader_train):

            x, y = pack(sample, learn_angle)

            optimizer.zero_grad()
            y_pred = nn(x)
            loss = model.loss(y_pred, y, angle_lr)
            loss.backward()
            optimizer.step()

            # report loss
            acc_loss += loss.item()
            if i % VIEW_INTERVAL == VIEW_INTERVAL - 1:
                running_loss = acc_loss / VIEW_INTERVAL
                click.secho(
                    f"\rEpoch {epoch+1}, iteration {i+1}; "
                    f"loss: {(running_loss):.3f}; ",
                    err=True,
                    nl=False)
                acc_loss = 0.0

        # testing
        count_correct = 0
        count_total = 0
        for sample in loader_test:
            x, labels = pack(sample, learn_angle)
            y_pred = nn(Variable(x))
            if learn_angle:
                labels = labels[0]
                y_pred = y_pred[0]
            _, labels_pred = torch.max(y_pred.data, 1)
            c = (labels_pred == labels).squeeze()
            count_correct += c.sum().item()
            count_total += len(c)

        click.secho(
            f"\rEpoch {epoch+1}; loss: {(running_loss):.3f}; "
            f"Test Acc: {100.0 * count_correct / count_total :.2f}%",
            err=True,
            nl=False)
        running_loss = 0

        click.secho('', err=True)
Esempio n. 15
0
def main(args):
    path_to_config = pathlib.Path(args.path)
    with open(path_to_config) as f:
        config = yaml.load(f)

    # read config:
    path_to_data = pathlib.Path(config['path_to_data'])
    path_to_pkl = pathlib.Path(config['path_to_pkl'])
    path_to_save_dir = pathlib.Path(config['path_to_save_dir'])

    train_batch_size = int(config['train_batch_size'])
    val_batch_size = int(config['val_batch_size'])
    num_workers = int(config['num_workers'])
    lr = float(config['lr'])
    n_epochs = int(config['n_epochs'])
    n_cls = int(config['n_cls'])
    in_channels = int(config['in_channels'])
    n_filters = int(config['n_filters'])
    reduction = int(config['reduction'])
    T_0 = int(config['T_0'])
    eta_min = float(config['eta_min'])
    baseline = config['baseline']

    # train and val data paths:
    all_paths = utils.get_paths_to_patient_files(path_to_imgs=path_to_data,
                                                 append_mask=True)
    train_paths, val_paths = utils.get_train_val_paths(
        all_paths=all_paths, path_to_train_val_pkl=path_to_pkl)
    train_paths = train_paths[:2]
    val_paths = val_paths[:2]

    # train and val data transforms:
    train_transforms = transforms.Compose([
        transforms.RandomRotation(p=0.5, angle_range=[0, 45]),
        transforms.Mirroring(p=0.5),
        transforms.NormalizeIntensity(),
        transforms.ToTensor()
    ])

    val_transforms = transforms.Compose(
        [transforms.NormalizeIntensity(),
         transforms.ToTensor()])

    # datasets:
    train_set = dataset.HecktorDataset(train_paths,
                                       transforms=train_transforms)
    val_set = dataset.HecktorDataset(val_paths, transforms=val_transforms)

    # dataloaders:
    train_loader = DataLoader(train_set,
                              batch_size=train_batch_size,
                              shuffle=True,
                              num_workers=num_workers)
    val_loader = DataLoader(val_set,
                            batch_size=val_batch_size,
                            shuffle=False,
                            num_workers=num_workers)

    dataloaders = {'train': train_loader, 'val': val_loader}

    if baseline:
        model = models.BaselineUNet(in_channels, n_cls, n_filters)
    else:
        model = models.FastSmoothSENormDeepUNet_supervision_skip_no_drop(
            in_channels, n_cls, n_filters, reduction)

    criterion = losses.Dice_and_FocalLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))
    metric = metrics.dice
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, T_0=T_0, eta_min=eta_min)

    trainer_ = trainer.ModelTrainer(model=model,
                                    dataloaders=dataloaders,
                                    criterion=criterion,
                                    optimizer=optimizer,
                                    metric=metric,
                                    scheduler=scheduler,
                                    num_epochs=n_epochs,
                                    parallel=True)

    trainer_.train_model()
    trainer_.save_results(path_to_dir=path_to_save_dir)
Esempio n. 16
0
from models.resnet_model_3a import TestModel
from data_loader import loader
from data_loader_2 import load_datasets
import transforms as tfs
from helpers import deactivate_layer

data_path = '../Data'
labels_path = '{}/train_labels/train_labels.csv'.format(data_path)
#
# train_labels = pd.read_csv(labels_path)
#
# print(len(train_labels))

trans = transforms.Compose([
    tfs.RandomRotation(range=(0, 360)),
    tfs.Downsize(),
    tfs.Normalise(),
    # tfs.ChannelShift(),
    transforms.ToTensor()
])

batch_size = 128
validation_split = .2
shuffle_dataset = True
p = 1

# train_loader, validation_loader = loader('{}/train'.format(data_path), labels_path, batch_size, validation_split, p=p, transform=trans)
train_loader, validation_loader = load_datasets(batch_size=128,
                                                transforms=trans)
# We'll be using the default shape we're using ofr our network
image_shape = (224, 224)

transformer = None

# Based on the selected transformer, prepare one
# This is awful and I wish Python just had a switch
# statement

if args.transformer == "rescale":
    transformer = transforms.Rescale(image_shape)
elif args.transformer == "crop":
    transformer = transforms.RandomCrop((50, 50))
elif args.transformer == "rotation":
    transformer = transforms.RandomRotation(90)
elif args.transformer == "blur":
    transformer = transforms.RandomBlur()
elif args.transformer == "brightness":
    transformer = transforms.RandomBrightness()
elif args.transformer == "noise":
    transformer = transforms.RandomNoise()
elif args.transformer == "flip":
    transformer = transforms.RandomFlip()
elif args.transformer == "normalize":
    transformer = transforms.Normalize()
else:
    print("Transformer is not recognized - quitting")
    sys.exit()

# Two datasets - one for the untransformed image, another for the
Esempio n. 18
0

if __name__ == '__main__':
    train_file = 'Dataset05/train_file.txt'
    gt_root = 'Dataset05/training_aug/groundtruth'
    left_high_root = 'Dataset05/training_aug/left_high'
    right_low_root = 'Dataset05/training_aug/right_low'

    list_file = open(train_file)
    image_names = [line.strip() for line in list_file]

    dataset = EnhanceDataset(left_high_root,
                             right_low_root,
                             gt_root,
                             image_names,
                             transform=transforms.Compose([
                                 transforms.RandomCrop(280),
                                 transforms.RandomHorizontalFlip(),
                                 transforms.RandomVerticalFlip(),
                                 transforms.RandomRotation(),
                                 transforms.ToTensor()
                             ]))

    dataLoader = torch.utils.data.DataLoader(dataset,
                                             batch_size=32,
                                             shuffle=True,
                                             num_workers=int(1))

    for i, (low, higt, gt) in enumerate(dataLoader):
        print(i)
        # print(low)
def main():
    global args, best_prec1
    args = parser.parse_args()
    # torch.cuda.set_device(args.gpu)
    if args.tensorboard:
        print("Using tensorboard")
        configure("exp/%s" % (args.name))

    # Data loading code

    if args.augment:
        print(
            "Doing image augmentation with\n"
            "Zoom: prob: {zoom_prob}  range: {zoom_range}\n"
            "Stretch: prob: {stretch_prob} range: {stretch_range}\n"
            "Rotation: prob: {rotation_prob} range: {rotation_degree}".format(
                zoom_prob=args.zoom_prob,
                zoom_range=args.zoom_range,
                stretch_prob=args.stretch_prob,
                stretch_range=args.stretch_range,
                rotation_prob=args.rotation_prob,
                rotation_degree=args.rotation_degree))
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: F.pad(
                Variable(x.unsqueeze(0), requires_grad=False, volatile=True),
                (4, 4, 4, 4),
                mode='replicate').data.squeeze()),
            transforms.ToPILImage(),
            transforms.RandomCrop(32),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(prob=args.rotation_prob,
                                      degree=args.rotation_degree),
            transforms.RandomZoom(prob=args.zoom_prob,
                                  zoom_range=args.zoom_range),
            transforms.RandomStretch(prob=args.stretch_prob,
                                     stretch_range=args.stretch_range),
            transforms.ToTensor(),
        ])

    else:
        transform_train = transforms.Compose([
            transforms.ToTensor(),
        ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
    ])

    kwargs = {'num_workers': 1, 'pin_memory': True}
    assert (args.dataset == 'cifar10' or args.dataset == 'cifar100')
    train_loader = torch.utils.data.DataLoader(
        datasets.__dict__[args.dataset.upper()]('../data',
                                                train=True,
                                                download=True,
                                                transform=transform_train),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)
    val_loader = torch.utils.data.DataLoader(
        datasets.__dict__[args.dataset.upper()]('../data',
                                                train=False,
                                                transform=transform_test),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)

    # create model
    model = WideResNet(args.layers,
                       args.dataset == 'cifar10' and 10 or 100,
                       args.widen_factor,
                       dropRate=args.droprate)

    # get the number of model parameters
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    # for training on multiple GPUs.
    # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
    # model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                nesterov=args.nesterov,
                                weight_decay=args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch + 1)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best)
    print 'Best accuracy: ', best_prec1
Esempio n. 20
0
 def train(self):
     use_cuda = torch.cuda.is_available()
     path = os.path.join('./out_models/' + self.model_name + '_' +
                         self.task_name + '_' + self.job_id)
     ## get logger
     logger = self.get_logger(self.model_name, self.task_name, self.job_id,
                              path)
     logger.info("Job_id : {}".format(self.job_id))
     logger.info("gpus_device_ids : {}".format(self.device_ids))
     logger.info("Task Name : {}".format(self.task_name))
     logger.info("Backbone_name : {}".format(self.model_name))
     logger.info("input_shape : ({},{}.{})".format(self.input_shape[0],
                                                   self.input_shape[1],
                                                   self.input_shape[2]))
     logger.info("batch_size : {}".format(self.batch_size))
     logger.info("num_epochs : {}".format(self.num_epochs))
     logger.info("warmup_steps : {}".format(self.warmup_steps))
     logger.info("resume_from : {}".format(self.resume_from))
     logger.info("pretrained : {}".format(self.pretrained))
     logger.info("mixup : {}".format(self.mixup))
     logger.info("cutmix : {}".format(self.cutmix))
     ## tensorboard writer
     log_dir = os.path.join(path, "{}".format("tensorboard_log"))
     if not os.path.isdir(log_dir):
         os.mkdir(log_dir)
     writer = SummaryWriter(log_dir)
     ## get model of train
     net = get_model(self.model_name)
     net = torch.nn.DataParallel(net, device_ids=self.device_ids)
     net = net.cuda(device=self.device_ids[0])
     ## loss
     criterion = nn.CrossEntropyLoss()
     ## optimizer
     if self.optimizers == 'SGD':
         optimizer = optim.SGD(net.parameters(),
                               lr=self.init_lr,
                               momentum=0.9,
                               weight_decay=self.weight_decay)
     elif self.optimizers == 'Adam':
         optimizer = optim.Adam(net.parameters(),
                                lr=self.init_lr,
                                weight_decay=self.weight_decay)
     milestones = [80, 150, 200, 300]
     scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                milestones=milestones,
                                                gamma=0.1)
     logger.info(("===========opti=========="))
     logger.info("Optimizer:{}".format(self.optimizers))
     logger.info("lr:{}".format(self.init_lr))
     logger.info("weight_decay:{}".format(self.weight_decay))
     logger.info("lr_scheduler: MultiStepLR")
     logger.info("milestones:{}".format(milestones))
     ## augumation
     normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                      std=[0.5, 0.5, 0.5])
     ## train aug
     transform_train = transforms.Compose([
         transforms.RandomCrop(int(self.input_shape[-1])),
         transforms.RandomHorizontalFlip(),
         transforms.RandomBrightness(brightness = self.brightness, brightness_ratio=self.brightness_ratio),
         transforms.RandomBlur(blur_ratio = self.blur_ratio),
         transforms.RandomRotation(degrees = self.degrees, rotation_ratio = 0.1),
         transforms.ColorJitter(brightness = self.color_brightnesss, contrast = self.color_contrast,\
                                saturation = self.color_saturation, hue=0),
         transforms.ToTensor(),
         #normalize,
     ])
     ## test aug
     transform_test = transforms.Compose([
         transforms.CenterCrop(int(self.input_shape[-1])),
         transforms.ToTensor(),
         #normalize,
     ])
     logger.info(("============aug==========="))
     logger.info("crop: RandomCrop")
     logger.info("RandomHorizontalFlip: True")
     logger.info("brightness:{}".format(self.brightness))
     logger.info("brightness_ratio:{}".format(self.brightness_ratio))
     logger.info("blur_ratio:{}".format(self.blur_ratio))
     logger.info("degrees:{}".format(self.degrees))
     logger.info("color_brightnesss:{}".format(self.color_brightnesss))
     logger.info("color_contrast:{}".format(self.color_contrast))
     logger.info("color_saturation:{}".format(self.color_saturation))
     ## prepara data
     print('==> Preparing data..')
     logger.info(("==========Datasets========="))
     logger.info("train_datasets:{}".format(self.train_datasets))
     logger.info("val_datasets:{}".format(self.val_datasets))
     logger.info("test_datasets:{}".format(self.test_datasets))
     #trainset = DataLoader(split = 'Training', transform=transform_train)
     trainset = DataLoader(self.train_datasets,
                           self.val_datasets,
                           self.test_datasets,
                           split='Training',
                           transform=transform_train)
     trainloader = torch.utils.data.DataLoader(trainset,
                                               batch_size=self.batch_size *
                                               len(self.device_ids),
                                               shuffle=True)
     Valset = DataLoader(self.train_datasets,
                         self.val_datasets,
                         self.test_datasets,
                         split='Valing',
                         transform=transform_test)
     Valloader = torch.utils.data.DataLoader(Valset,
                                             batch_size=64 *
                                             len(self.device_ids),
                                             shuffle=False)
     Testset = DataLoader(self.train_datasets,
                          self.val_datasets,
                          self.test_datasets,
                          split='Testing',
                          transform=transform_test)
     Testloader = torch.utils.data.DataLoader(Testset,
                                              batch_size=64 *
                                              len(self.device_ids),
                                              shuffle=False)
     ## train
     logger.info(("======Begain Training======"))
     #self.train_model(net, criterion, optimizer, scheduler, trainloader, Valloader, Testloader, logger, writer, path)
     self.train_model(net, criterion, optimizer, scheduler, trainloader,
                      Valloader, Testloader, logger, writer, path)
     logger.info(("======Finsh Training !!!======"))
     logger.info(("best_val_acc_epoch: %d, best_val_acc: %0.3f" %
                  (self.best_Val_acc_epoch, self.best_Val_acc)))
     logger.info(("best_test_acc_epoch: %d, best_test_acc: %0.3f" %
                  (self.best_Test_acc_epoch, self.best_Test_acc)))
Esempio n. 21
0
    'requires_grad': True,
    'use_gpu': torch.cuda.is_available(),

    # train params
    'epoch': 100,
    'start_lr': 0.01,
    'momentum': 0.9,
    'gamma': 0.1,
    'lr_decay_step_size': 30,
    'loss_fun': nn.CrossEntropyLoss(),
}

data_trans = {
    'train':
    transforms.Compose([
        transforms.RandomRotation(degrees=10),
        transforms.RandomResizedCrop(args['input_size']),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(args['input_size']),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}


def data_load(data_dir=args['data_dir']):
Esempio n. 22
0

    train_dataset = CamVid(
        'data',
        image_set='train',
        download=True
    )
    valid_dataset = CamVid(
        'data',
        image_set='val',
        download=True
    )

    train_transforms = transforms.Compose([
            transforms.Resize(settings.IMAGE_SIZE),
            transforms.RandomRotation(15, fill=train_dataset.ignore_index),
            transforms.RandomGaussianBlur(),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(0.4, 0.4),
            transforms.ToTensor(),
            transforms.Normalize(settings.MEAN, settings.STD),
    ])

    valid_transforms = transforms.Compose([
        transforms.Resize(settings.IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(settings.MEAN, settings.STD),
    ])

    train_dataset.transforms = train_transforms
    valid_dataset.transforms = valid_transforms
Esempio n. 23
0
device = torch.device("cuda:0" if (torch.cuda.is_available() and args.cuda) else "cpu")
if (not torch.cuda.is_available() and args.cuda):
    print "cuda is not available. "

print "Working on {}.".format(device)
if torch.cuda.is_available():
    print "using GPU number {}".format(gpu_id)


## CREATE DATASETS ##

# defining transormations
randomVFlip = transforms.RandomVerticalFlip()
randomResizedCrop = transforms.RandomResizedCrop(parameters["input"]["matrix_size"], scale=parameters["transforms"]["scale_range"], ratio=parameters["transforms"]["ratio_range"], dtype=parameters['input']['data_type'])
randomRotation = transforms.RandomRotation(parameters["transforms"]["max_angle"])
elasticTransform = transforms.ElasticTransform(alpha_range=parameters["transforms"]["alpha_range"], sigma_range=parameters["transforms"]["sigma_range"], p=parameters["transforms"]["elastic_rate"], dtype=parameters['input']['data_type'])
channelShift = transforms.ChannelShift(parameters["transforms"]["channel_shift_range"], dtype=parameters['input']['data_type'])
centerCrop = transforms.CenterCrop2D(parameters["input"]["matrix_size"])

# creating composed transformation
composed = torch_transforms.Compose([randomVFlip,randomRotation,randomResizedCrop, elasticTransform])

# creating datasets
training_dataset = MRI2DSegDataset(paths.training_data, matrix_size=parameters["input"]["matrix_size"], orientation=parameters["input"]["orientation"], resolution=parameters["input"]["resolution"], transform = composed)
validation_dataset = MRI2DSegDataset(paths.validation_data, matrix_size=parameters["input"]["matrix_size"], orientation=parameters["input"]["orientation"], resolution=parameters["input"]["resolution"])

# creating data loaders
training_dataloader = DataLoader(training_dataset, batch_size=parameters["training"]["batch_size"], shuffle=True, drop_last=True, num_workers=1)
validation_dataloader = DataLoader(validation_dataset, batch_size=parameters["training"]["batch_size"], shuffle=True, drop_last=False, num_workers=1)
Esempio n. 24
0
    log_dir = os.path.join(root_path, settings.LOG_FOLDER, settings.TIME_NOW)

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{epoch}-{type}.pth')

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    writer = SummaryWriter(log_dir=log_dir)

    train_dataset = CamVid(settings.DATA_PATH, 'train')
    valid_dataset = CamVid(settings.DATA_PATH, 'val')

    train_transforms = transforms.Compose([
        transforms.RandomRotation(value=train_dataset.ignore_index),
        transforms.RandomScale(value=train_dataset.ignore_index),
        transforms.RandomGaussianBlur(),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(),
        transforms.Resize(settings.IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(settings.MEAN, settings.STD),
    ])

    valid_transforms = transforms.Compose([
        transforms.Resize(settings.IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(settings.MEAN, settings.STD),
    ])
Esempio n. 25
0
# use_gpu = torch.cuda.is_avilable()
cudnn.benchmark = True
use_gpu = True


## Data loading code
# normalization to be applied to the training and validation tensors.
normalize = augment.Normalize(mean = [ 0.485, 0.456, 0.406 ],
                              std  = [ 0.229, 0.224, 0.225 ])

# Dataset setup
# Choose what sort of data transformations and augmentations you wish to apply to
# the training.

# A set of random transformations which can be applied.
random_Transform_List = [transforms.RandomHorizontalFlip(),transforms.RandomRotation(30),
transforms.ColorJitter(brightness=0.5),transforms.Grayscale(num_output_channels=3)]

data_transforms = {
   'train': augment.Compose([
       #augment.ScalePad((1024,1024)),
       #augment.RandomRotate(60),
       #augment.RandomHorizontalFlip(),
       augment.AnisotropicScale((224,224)),
       transforms.RandomApply(random_Transform_List,p=0.5),
       augment.ToTensor(),
       normalize
   ]),
   'val': augment.Compose([
       #augment.ScalePad((1024,1024)),
       augment.AnisotropicScale((224,224)),