Example #1
0
def retrieve_image_tensor(image_path):
    transform = Compose([Resize((256, 256)), Normalize(mean=[0.5], std=[0.5])])

    pil_img = Image.open(image_path)
    tensor_image = torchvision.transforms.ToTensor()(pil_img)

    return transform(tensor_image)
Example #2
0
    def __getitem__(self, index):
        name = self.listsets[index]
        img = Image.open(os.path.join(self.root, name))
        img = np.array(img)
        img = torch.Tensor(img).permute(2, 0, 1) / 255
        # 制作标签
        names = name.split(".")
        position = np.array(names[1:5], dtype=np.float32) / 224
        flag = np.array(names[5:6], dtype=np.float32)
        target = np.concatenate((position, flag))
        if flag == 0:
            img = Normalize(self.sample_mean_0, self.sample_std_0)(img)
        else:
            img = Normalize(self.sample_mean_1, self.sample_std_1)(img)

        return img, target
Example #3
0
    def __init__(self, mean, std, **kwargs):
        """Normalizes images.

        Args:
            mean (list): List of means of each channel.
            std (list): List of standard deviations of each channel
        """
        self.normalize = Normalize(mean, std)
Example #4
0
def retrieve_inference_dataloader(dataframe, batch_size=4):
    transform = Compose([Resize((256, 256)), Normalize(mean=[0.5], std=[0.5])])

    dataset = MoleculesDatasetInference(dataframe, transform)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=0,
                            pin_memory=True,
                            shuffle=False)

    return dataloader
Example #5
0
    def __init__(self, image_dir):
        super(DataSetFromFolderForPix2Pix, self).__init__()
        self.photo_path = join(image_dir, "A")
        self.sketch_path = join(image_dir, "B")
        self.image_filenames = [
            x for x in listdir(self.photo_path) if is_image_file(x)
        ]

        transform_list = [
            ToTensor(),
            Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]
        self.transform = Compose(transform_list)
    def __init__(self, dataset_root="./dataset",
                 train_batch_size=8, val_batch_size=8, num_workers=0):
        super().__init__()

        self.dataset_root = dataset_root
        self.train_batch_size = train_batch_size
        self.val_batch_size = val_batch_size
        self.num_workers = num_workers

        self.transform = Compose([
            ToTensor(),
            Normalize(mean=(0.485, 0.456, 0.406),
                      std=(0.229, 0.224, 0.225))
        ])
Example #7
0
def evaluate_on_imagenet(model: NFNet,
                         dataset_dir: Path,
                         batch_size=50,
                         device='cuda:0'):
    transforms = Compose([
        #Pad32CenterCrop(model.test_imsize),
        ToTensor(),
        Resize((model.test_imsize, model.test_imsize), PIL.Image.BICUBIC),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    print(f"Starting evaluation from {dataset_dir}")
    dataset = get_dataset(dataset_dir, transforms=transforms)

    dataloader = DataLoader(
        dataset=dataset,
        batch_size=batch_size,  # F0: 120, F1: 100, F2: 80
        shuffle=False,
        pin_memory=False,
        num_workers=8)

    print(f"Validation set contains {len(dataset)} images.")

    model.to(device)
    model.eval()

    processed_imgs = 0
    correct_labels = 0
    for step, data in enumerate(dataloader):
        with torch.no_grad():
            inputs = data[0].to(device)
            targets = data[1].to(device)

            output = model(inputs).type(torch.float32)

            processed_imgs += targets.size(0)
            _, predicted = torch.max(output, 1)
            correct_labels += (predicted == targets).sum().item()

            batch_padding = int(math.log10(len(dataloader.dataset)) + 1)
            print(
                f"\rProcessing {processed_imgs:{batch_padding}d}/{len(dataloader.dataset)}. Accuracy: {100.0*correct_labels/processed_imgs:6.4f}",
                sep=' ',
                end='',
                flush=True)

    print(
        f"\nFinished eval. Accuracy: {100.0*correct_labels/processed_imgs:6.4f}"
    )
def retrieve_evaluate_dataloader(dataframe, vocab: Vocabulary, batch_size=8, shuffle=False, sequence_length=None):
    pad_idx = vocab.stoi['<PAD>']
    transform = Compose([
        Resize((256,256)),
        Normalize(mean=[0.5], std=[0.5])
    ])

    dataset = MoleculesDataset(dataframe, vocab, transform)
    dataloader = DataLoader(
        dataset, 
        batch_size=batch_size, 
        shuffle=shuffle,
        num_workers=0, 
        pin_memory=True,
        collate_fn=CapsCollate(pad_idx=pad_idx,batch_first=True, sequence_length=sequence_length)
    )

    return dataloader
def retrieve_train_dataloader(dataframe, vocab: Vocabulary, batch_size=8, shuffle=True, sequence_length=None):
    pad_idx = vocab.stoi['<PAD>']
    transform = Compose([
        # RandomVerticalFlip(),
        # RandomHorizontalFlip(),
        # RandomRotation(180),
        Resize((256,256)),
        Normalize(mean=[0.5], std=[0.5]),
    ])

    dataset = MoleculesDataset(dataframe, vocab, transform)
    dataloader = DataLoader(
        dataset, 
        batch_size=batch_size, 
        shuffle=shuffle,
        num_workers=0, 
        pin_memory=True,
        collate_fn=CapsCollate(pad_idx=pad_idx,batch_first=True, sequence_length=sequence_length)
    )

    return dataloader
import PIL
from PIL import Image

import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from torchvision.transforms.transforms import Compose, Normalize, Resize, ToTensor, RandomHorizontalFlip, RandomCrop


# IO
transform = Compose([
    #RandomHorizontalFlip(),
    Resize((256,256), PIL.Image.BICUBIC),
    ToTensor(),
    Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

def get_train_file_path(image_ids):
    # print(image_id)
    # return "../input/bms-molecular-translation/train/{}/{}/{}/{}.png".format(
    #     image_id[0], image_id[1], image_id[2], image_id 
    # )
    return [
        "./input/train/{}/{}/{}/{}.png".format(
        image_id[0], image_id[1], image_id[2], image_id)
        for image_id in image_ids
    ]

def get_test_file_path(image_ids):
    return [
        "./input/test/{}/{}/{}/{}.png".format(
Example #11
0
def train(config: dict) -> None:
    if config['device'].startswith('cuda'):
        if torch.cuda.is_available():
            print(
                f"Using CUDA{torch.version.cuda} with cuDNN{torch.backends.cudnn.version()}"
            )
        else:
            raise ValueError(
                "You specified to use cuda device, but cuda is not available.")

    if config['pretrained'] is not None:
        model = pretrained_nfnet(path=config['pretrained'],
                                 stochdepth_rate=config['stochdepth_rate'],
                                 alpha=config['alpha'],
                                 activation=config['activation'])
    else:
        model = NFNet(num_classes=config['num_classes'],
                      variant=config['variant'],
                      stochdepth_rate=config['stochdepth_rate'],
                      alpha=config['alpha'],
                      se_ratio=config['se_ratio'],
                      activation=config['activation'])

    transforms = Compose([
        RandomHorizontalFlip(),
        Resize((model.train_imsize, model.train_imsize), PIL.Image.BICUBIC),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    device = config['device']
    dataset = get_dataset(path=config['dataset'], transforms=transforms)

    if config['overfit']:
        dataset = Subset(dataset, [i * 50 for i in range(0, 1000)])

    dataloader = DataLoader(dataset=dataset,
                            batch_size=config['batch_size'],
                            shuffle=True,
                            num_workers=config['num_workers'],
                            pin_memory=config['pin_memory'])

    if config['scale_lr']:
        learning_rate = config['learning_rate'] * config['batch_size'] / 256
    else:
        learning_rate = config['learning_rate']

    if not config['do_clip']:
        config['clipping'] = None

    if config['use_fp16']:
        model.half()

    model.to(device)  # "memory_format=torch.channels_last" TBD

    optimizer = SGD_AGC(
        # The optimizer needs all parameter names
        # to filter them by hand later
        named_params=model.named_parameters(),
        lr=learning_rate,
        momentum=config['momentum'],
        clipping=config['clipping'],
        weight_decay=config['weight_decay'],
        nesterov=config['nesterov'])

    # Find desired parameters and exclude them
    # from weight decay and clipping
    for group in optimizer.param_groups:
        name = group['name']

        if model.exclude_from_weight_decay(name):
            group['weight_decay'] = 0

        if model.exclude_from_clipping(name):
            group['clipping'] = None

    criterion = nn.CrossEntropyLoss()

    runs_dir = Path('runs')
    run_index = 0
    while (runs_dir / ('run' + str(run_index))).exists():
        run_index += 1
    runs_dir = runs_dir / ('run' + str(run_index))
    runs_dir.mkdir(exist_ok=False, parents=True)
    checkpoints_dir = runs_dir / 'checkpoints'
    checkpoints_dir.mkdir()

    writer = SummaryWriter(str(runs_dir))
    scaler = amp.GradScaler()

    for epoch in range(config['epochs']):
        model.train()
        running_loss = 0.0
        processed_imgs = 0
        correct_labels = 0
        epoch_time = time.time()

        for step, data in enumerate(dataloader):
            inputs = data[0].half().to(
                device) if config['use_fp16'] else data[0].to(device)
            targets = data[1].to(device)

            optimizer.zero_grad()

            with amp.autocast(enabled=config['amp']):
                output = model(inputs)
            loss = criterion(output, targets)

            # Gradient scaling
            # https://www.youtube.com/watch?v=OqCrNkjN_PM
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()

            running_loss += loss.item()
            processed_imgs += targets.size(0)
            _, predicted = torch.max(output, 1)
            correct_labels += (predicted == targets).sum().item()

            epoch_padding = int(math.log10(config['epochs']) + 1)
            batch_padding = int(math.log10(len(dataloader.dataset)) + 1)
            print(
                f"\rEpoch {epoch+1:0{epoch_padding}d}/{config['epochs']}"
                f"\tImg {processed_imgs:{batch_padding}d}/{len(dataloader.dataset)}"
                f"\tLoss {running_loss / (step+1):6.4f}"
                f"\tAcc {100.0*correct_labels/processed_imgs:5.3f}%\t",
                sep=' ',
                end='',
                flush=True)

        elapsed = time.time() - epoch_time
        print(
            f"({elapsed:.3f}s, {elapsed/len(dataloader):.3}s/step, {elapsed/len(dataset):.3}s/img)"
        )

        global_step = epoch * len(dataloader) + step
        writer.add_scalar('training/loss', running_loss / (step + 1),
                          global_step)
        writer.add_scalar('training/accuracy',
                          100.0 * correct_labels / processed_imgs, global_step)

        #if not config['overfit']:
        if epoch % 10 == 0 and epoch != 0:
            cp_path = checkpoints_dir / ("checkpoint_epoch" + str(epoch + 1) +
                                         ".pth")

            torch.save(
                {
                    'epoch': epoch,
                    'model': model.state_dict(),
                    'optim': optimizer.state_dict(),
                    'loss': loss
                }, str(cp_path))

            print(f"Saved checkpoint to {str(cp_path)}")
Example #12
0
    # Read csv (video_name, label)
    df = pd.read_json(args.json_file, orient='records')

    total_videos = int(df['video_name'].count())

    # Add video index column (to be utilized by json)
    df['video_idx'] = range(total_videos)

    # Compute the sequence length (no. of frames) for each video (row)
    df['video_length'] = df['video_name'].apply(lambda x: _count_frames(x, args.frames_dir))

    # Image Mean & Std-Dev for Normalization
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])

    dataset = VideoFramesDataset(args.frames_dir, df, Compose([Resize((224, 224)), ToTensor(), Normalize(mean, std)]))
    # dataset = VideoFramesDataset(args.frames_dir, df, Compose([Resize((224, 224)), ToTensor()]))    # for sanity check

    # Compute the max sequence length, needed for embedding array - [N, F, D]
    max_video_len = compute_max_frames_len(args.frames_dir)
    total_frames = dataset.__len__()

    print('Total Videos: {}  |  Total Frames: {}  |  Max Video length: {}'.
          format(total_videos, total_frames, max_video_len))

    dataloader = DataLoader(dataset, args.batch_size, num_workers=args.num_workers)

    # Load model
    model, emb_dim = load_cnn(args.model)
    model.to(device)
        caption_vec += self.vocab.numericalize(row["InChI"])
        # e = time()
        # print("caption_vec InChI: ", (e-s))
        # s = time()
        caption_vec += [self.vocab.stoi["<EOS>"]]
        # e = time()
        # print("caption_vec EOS: ", (e-s))

        return (self.transform(tensorImage), torch.as_tensor(caption_vec))


transform = Compose([
    #RandomHorizontalFlip(),
    Resize((256, 256)),
    #ToTensor(),
    Normalize(mean=[0.5], std=[0.5]),
])

dataset = MoleculesDataset("data.csv", transform)

pad_idx = dataset.vocab.stoi["<PAD>"]


class CapsCollate:
    """
    Collate to apply the padding to the captions with dataloader
    """
    def __init__(self, pad_idx, batch_first=False):
        self.pad_idx = pad_idx
        self.batch_first = batch_first
Example #14
0
def main(args):
    def print2(parms, *aargs, **kwargs):
        redirect(parms, path=args.outfile, *aargs, **kwargs)

    start_time = time.time()

    # print args recap
    print2(args, end='\n\n')
    
    # Load the core50 data
    # TODO: check the symbolic links as for me no '../' prefix needed.

    if args.download:
        print2('cli switch download set to True so download will occur...')
        print2('  alternatively the batch script fetch_data_and_setup.sh can be used')

    
    print2('using directory for data_path path {}'.format(args.data_path))


    core50 = Core50(args.data_path, train=True, download=args.download)
    core50_val = Core50(args.data_path, train=False, download=args.download)

    # A new classes scenario, using continuum
    scenario = ClassIncremental(
        core50,
        increment=5,
        initial_increment=10,
        # following values come from the the mean and std of ImageNet - the basis of resnet.
        transformations=[ ToTensor(), Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]
    )
    scenario_val = ClassIncremental(
        core50_val,
        increment=5,
        initial_increment=10,
        # following values come from the the mean and std of ImageNet - the basis of resnet.
        transformations=[ ToTensor(), Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]
    )

    print2(f"Number of classes: {scenario.nb_classes}.")
    print2(f"Number of tasks: {scenario.nb_tasks}.")

    # Define a model
    # model
    if args.classifier == 'resnet18':
        classifier = models.resnet18(pretrained=True)
        classifier.fc = torch.nn.Linear(512, args.n_classes)
    
    elif args.classifier == 'resnet101':
        classifier = models.resnet101(pretrained=True)
        classifier.fc = nn.Linear(2048, args.n_classes)

    elif args.classifier == 'resnet34':
        classifier = models.resnet34(pretrained=True)
        classifier.fc = nn.Linear(512, args.n_classes)
    
    else:
        raise Exception('no classifier picked')

    # Fix for RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same
    if torch.cuda.is_available():
        classifier.cuda()

    # TODO: fix device specific cuda usage to we can parallel
    # TODO: right now probably due to marshalling parallel taking slightly longer
    # TODO: this parm is now default to false.
    if args.use_parallel and torch.cuda.device_count() > 1:
        print2(f"Let's use {torch.cuda.device_count()} GPUs!")
        classifier = nn.DataParallel(classifier)

    # Tune the model hyperparameters
    max_epochs = args.epochs # 8
    convergence_criterion = args.convergence_criterion # 0.004  # End early if loss is less than this
    lr = args.lr  # 0.00001
    weight_decay = args.weight_decay # 0.000001
    momentum = args.momentum # 0.9

    # Define a loss function and criterion
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(
        classifier.parameters(), 
        lr=lr, 
        weight_decay=weight_decay, 
        momentum=momentum
        )
    print2("Criterion: " + str(criterion))
    print2("Optimizer: " + str(optimizer))

    # Validation accuracies
    accuracies = []

    # Iterate through our NC scenario
    for task_id, train_taskset in enumerate(scenario):

        print2(f"<-------------- Task {task_id + 1} ---------------->")

        # Use replay if it's specified
        if args.replay:

            # Add replay examples to current taskset
            replay_examples = taskset_with_replay(scenario, task_id, args.replay)
            train_taskset._x = np.append(train_taskset._x, replay_examples['x'])
            train_taskset._y = np.append(train_taskset._y, replay_examples['y'])
            train_taskset._t = np.append(train_taskset._t, replay_examples['t'])

        train_loader = DataLoader(train_taskset, batch_size=32, shuffle=True)
        unq_cls_train = np.unique(train_taskset._y)

        print2(f"This task contains {len(unq_cls_train)} unique classes")
        print2(f"Training classes: {unq_cls_train}")

        # Train the model
        classifier.train()
        if args.importance:
            # EWC
            if task_id == 0:
                train(classifier, task_id, train_loader, criterion, optimizer, max_epochs, convergence_criterion)
            else:
                old_tasks = []
                for prev_id, prev_taskset in enumerate(scenario):
                    if prev_id == task_id:
                        break
                    else:
                        old_tasks = old_tasks + list(prev_taskset._x)
                train_ewc(classifier, task_id, train_loader, criterion, EWC(classifier, train_taskset, scenario, task_id), args.importance, optimizer, max_epochs, convergence_criterion)
        else:
            train(classifier, task_id, train_loader, criterion, optimizer, max_epochs, convergence_criterion)

        print2("=== Finished Training ===")
        classifier.eval()

        # Validate against separate validation data
        cum_accuracy = 0.0
        for val_task_id, val_taskset in enumerate(scenario_val):

            # Validate on all previously trained tasks (but not future tasks)
            if val_task_id > task_id:
                break

            val_loader = DataLoader(val_taskset, batch_size=32, shuffle=True)

            # Make sure we're validating the correct classes
            unq_cls_validate = np.unique(val_taskset._y)
            print2(f"Validating classes: {unq_cls_validate} -- val_task_id:{val_task_id}  task_id:{task_id}")

            total = 0.0
            correct = 0.0
            pred_classes = np.array([])
            with torch.no_grad():
                for x, y, t in val_loader:
                    x, y = x.cuda(), y.cuda()
                    outputs = classifier(x)
                    _, predicted = torch.max(outputs.data, 1)
                    pred_classes = np.unique(np.append(pred_classes, predicted.cpu()))
                    total += y.size(0)
                    correct += (predicted == y).sum().item()
            
            print2(f"Classes predicted: {pred_classes}")
            print2(f"=== Validation Accuracy: {100.0 * correct / total}%\n")
            cum_accuracy += (correct / total)
        
        avg_accuracy = cum_accuracy / 9
        print2(f"Average Accuracy: {100.0 * avg_accuracy:.5f}%  [{avg_accuracy:.5f}]")
        accuracies.append((cum_accuracy / 9))   
        # print2(f"Average Accuracy: {100.0 * cum_accuracy / 9.0}%")

        
    
    # Running Time
    print2("--- %s seconds ---" % (time.time() - start_time))

    # TO DO Add EWC Training

    # Some plots over time
    from pathlib import Path
    Path('continuum/output').mkdir(parents=True, exist_ok=True)

    plt.plot([1, 2, 3, 4, 5, 6, 7, 8, 9], accuracies, '-o', label="Naive")
    #plt.plot([1, 2, 3, 4, 5, 6, 7, 8, 9], rehe_accs, '-o', label="Rehearsal")
    #plt.plot([1, 2, 3, 4, 5, 6, 7, 8, 9], ewc_accs, '-o', label="EWC")
    plt.xlabel('Tasks Encountered', fontsize=14)
    plt.ylabel('Average Accuracy', fontsize=14)
    plt.title('Rehersal Strategy on Core50 w/ResNet18', fontsize=14)
    plt.xticks([1, 2, 3, 4, 5, 6, 7, 8, 9])
    plt.legend(prop={'size': 16})
    plt.show()
    filenames = dt.datetime.now().strftime("%Y%m%d-%H%M%S")
    plt.savefig('continuum/output/run_'+filenames+'.png')
Example #15
0
import torchvision
from torchvision.transforms.transforms import RandomAffine, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose
from torch.utils.data import DataLoader
from datasets import SprayDataset



def standard_dataloader(dataset, batch_size=16, num_workers=1):
    return DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)

unnormalize = Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
                        std=[1/0.229, 1/0.224, 1/0.225])

if __name__ == '__main__':

    dataset = prepare_data_from_list('/home/markpp/datasets/teejet/iphone_data/val.txt')
    dataloader = standard_dataloader(dataset, batch_size=8)

    # cleanup output dir
    import os, shutil
    if os.path.exists("output"):
        shutil.rmtree("output")
    os.makedirs("output")

    import numpy as np
    import cv2
    data = iter(dataloader)
    labels = []
    for batch in range(2):
        images,targets = next(data)
        for i, img_tar in enumerate(zip(images,targets)):
def get_data(dataset="MITIndoor67", root=None, train_folder='train',
             val_folder='val', batch_size=64, ten_crops=False,
             with_attribute=False):
    assert root is not None
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        PowerPIL(),
        # transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0),
        # transforms.Resize((256, 256), interpolation=Image.BICUBIC),
        # transforms.CenterCrop((224, 224)),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        # RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0]),
    ])

    if ten_crops:
        val_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.TenCrop((224, 224)),
            Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])),  # returns a 4D tensor
            Lambda(lambda crops: torch.stack(
                [Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops])),
        ])
    else:
        val_transform = transforms.Compose([
            # transforms.Resize((256, 256), interpolation=Image.BICUBIC),
            # transforms.CenterCrop((224, 224)),
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

    if dataset == "ADE20K":
        train_set = ADE20KDataset(root, folder=train_folder, transform=train_transform, with_attribute=with_attribute)
        val_set = ADE20KDataset(root, folder=val_folder, transform=val_transform, with_attribute=with_attribute)
        assert len(train_set) == 20210
        assert len(val_set) == 2000
        assert len(train_set.classes) == 1055

    elif dataset == "MITIndoor67":
        train_set = MITIndoor67Dataset(osp.join(root, train_folder), train_transform, with_attribute=with_attribute)
        val_set = MITIndoor67Dataset(osp.join(root, val_folder), val_transform, with_attribute=with_attribute)
        assert len(val_set) == 20 * 67
        assert len(train_set) == 80 * 67
        assert len(train_set.classes) == 67
    elif dataset == "SUN397":
        train_set = SUN397Dataset(osp.join(root, train_folder),
                                  train_transform,
                                  with_attribute=with_attribute)

        val_set = SUN397Dataset(osp.join(root, val_folder),
                                val_transform,
                                with_attribute=with_attribute)
        assert (len(train_set) == 50 * 397)
        assert (len(val_set) == 50 * 397)
        assert len(train_set.classes) == 397

    #weigths = get_attr_weight(train_set)
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=4)

    assert len(train_set.classes) == len(val_set.classes)

    print('Dataset loaded!')
    print(f'Train set. Size {len(train_set.imgs)}')
    print(f'Validation set. Size {len(val_set.imgs)}')
    print('Train set number of scenes: {}'.format(len(train_set.classes)))
    print('Validation set number of scenes: {}'.format(len(val_set.classes)))
    return train_loader, val_loader, train_set.classes, train_set.attributes if with_attribute else []
Example #17
0
    def randaugment(self, n, m):
        sampled_ops = np.random.choice(RAND_AUGMENT_NAMES, n)
        return [(op, m) for op in sampled_ops]

    def step_magnitude_controller(self, value):
        self.magnitude_controller = value


if __name__ == '__main__':
    pc = AugmentPolicyTransform([[('Solarize', 0.66, 0.34),
                                  ('Equalize', 0.56, 0.61)],
                                 [('Equalize', 0.43, 0.06),
                                  ('AutoContrast', 0.66, 0.08)],
                                 [('Color', 0.72, 0.47),
                                  ('Contrast', 0.88, 0.86)],
                                 [('Brightness', 0.84, 0.71),
                                  ('Color', 0.31, 0.74)],
                                 [('Rotate', 0.68, 0.26),
                                  ('TranslateX', 0.38, 0.88)]])

    cp = Compose([
        pc,
        Normalize((0, 0, 0), (0, 0, 0)),
    ])

    inp = (np.random.rand(4, 4, 3) * 255).astype(np.uint8)
    for i in range(10):
        print(np.asarray(pc(inp)))
        print(cp(inp))