Ejemplo n.º 1
0
def load_unlabeled_data(tar, data_dir='dataset/'):
    from torchvision import transforms

    folder_tar = data_dir + tar + '/images'

    transform = {
        'weak':
        transforms.Compose([
            transforms.Resize([224, 224]),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]),
        'strong':
        transforms.Compose([
            transforms.Resize([256, 256]),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            RandAugment(
            ),  # Paper: RandAugment: Practical data augmentation with no separate search
            transforms.ToTensor(),
            # Cutout(size=16), # (https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py)
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    }

    unlabeled_weak_data = datasets.ImageFolder(root=folder_tar,
                                               transform=transform['weak'])
    unlabeled_strong_data = datasets.ImageFolder(root=folder_tar,
                                                 transform=transform['strong'])

    return unlabeled_weak_data, unlabeled_strong_data
Ejemplo n.º 2
0
    def __init__(self,
                 size,
                 padding=None,
                 pad_if_needed=False,
                 fill=0,
                 padding_mode='constant'):
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            self.size = size
        self.padding = padding
        self.pad_if_needed = pad_if_needed
        self.fill = fill
        self.padding_mode = padding_mode

        self.tv_F = tv_t.RandomCrop(size, padding, pad_if_needed, fill,
                                    padding_mode)
        self.cv_F = cv_t.RandomCrop(size, padding, pad_if_needed, fill,
                                    padding_mode)
Ejemplo n.º 3
0
def get_cpu_transforms(augs: DictConfig) -> dict:
    """Makes CPU augmentations from the aug section of a configuration. 

    Parameters
    ----------
    augs : DictConfig
        augmentation parameters

    Returns
    -------
    xform : dict
        keys: ['train', 'val', 'test']. Values: a composed OpenCV augmentation pipeline callable. 
        Example: auged_images = xform['train'](images)
    """
    train_transforms = []
    val_transforms = []
    # order here matters a lot!!
    if augs.crop_size is not None:
        train_transforms.append(transforms.RandomCrop(augs.crop_size))
        val_transforms.append(transforms.CenterCrop(augs.crop_size))
    if augs.resize is not None:
        train_transforms.append(transforms.Resize(augs.resize))
        val_transforms.append(transforms.Resize(augs.resize))
    if augs.pad is not None:
        pad = tuple(augs.pad)
        train_transforms.append(transforms.Pad(pad))
        val_transforms.append(transforms.Pad(pad))

    train_transforms.append(Transpose())
    val_transforms.append(Transpose())

    train_transforms = transforms.Compose(train_transforms)
    val_transforms = transforms.Compose(val_transforms)

    xform = {'train': train_transforms,
             'val': val_transforms,
             'test': val_transforms}
    log.debug('CPU transforms: {}'.format(xform))
    return xform
Ejemplo n.º 4
0
def load_data(src, tar, data_dir='dataset', use_cv2=False):
    folder_src = os.path.join(os.path.join(data_dir, src), 'images')
    folder_tar = os.path.join(os.path.join(data_dir, tar), 'images')

    if use_cv2:
        import cv2
        from opencv_transforms import transforms

        def loader_opencv(path: str) -> np.ndarray:
            return cv2.imread(path)

        transform = {
            'train':
            transforms.Compose([
                transforms.Resize((256, 256), interpolation=cv2.INTER_LINEAR),
                transforms.RandomCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ]),
            'test':
            transforms.Compose([
                transforms.Resize((224, 224), interpolation=cv2.INTER_LINEAR),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])
        }

        source_data = datasets.ImageFolder(root=folder_src,
                                           transform=transform['train'],
                                           loader=loader_opencv)
        # source_data_loader = torch.utils.data.DataLoader(source_data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True)

        target_train_data = datasets.ImageFolder(root=folder_tar,
                                                 transform=transform['train'],
                                                 loader=loader_opencv)
        # target_train_loader = torch.utils.data.DataLoader(target_train_data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True)

        target_test_data = datasets.ImageFolder(root=folder_tar,
                                                transform=transform['test'],
                                                loader=loader_opencv)
        # target_test_loader = torch.utils.data.DataLoader(target_test_data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = False)
    else:
        from torchvision import transforms
        transform = {
            'train':
            transforms.Compose([
                transforms.Resize((256, 256)),
                transforms.RandomCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ]),
            'test':
            transforms.Compose([
                transforms.Resize((224, 224)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])
        }

        source_data = datasets.ImageFolder(root=folder_src,
                                           transform=transform['train'])
        # source_data_loader = torch.utils.data.DataLoader(source_data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True)

        target_train_data = datasets.ImageFolder(root=folder_tar,
                                                 transform=transform['train'])
        # target_train_loader = torch.utils.data.DataLoader(target_train_data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True)

        target_test_data = datasets.ImageFolder(root=folder_tar,
                                                transform=transform['test'])
        # target_test_loader = torch.utils.data.DataLoader(target_test_data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = False)

    return source_data, target_train_data, target_test_data
Ejemplo n.º 5
0
def main(cfg):
    workdir = Path(cfg.workdir)
    workdir.mkdir(parents=True, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
    set_logger(workdir / 'log.txt')
    cfg.dump_to_file(workdir / 'config.yml')
    saver = Saver(workdir, keep_num=10)
    logging.info(f'config: \n{cfg}')
    logging.info(f'use device: {device}')

    model = iqa.__dict__[cfg.model.name](**cfg.model.kwargs)
    model = model.to(device)

    if torch.cuda.device_count() > 1:
        model_dp = nn.DataParallel(model)
    else:
        model_dp = model

    train_transform = Transform(
        transforms.Compose([
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ]))

    val_transform = Transform(
        transforms.Compose([transforms.RandomCrop(224),
                            transforms.ToTensor()]))

    if not Path(cfg.ava.train_cache).exists():
        create_memmap(cfg.ava.train_labels, cfg.ava.images,
                      cfg.ava.train_cache, cfg.num_workers)
    if not Path(cfg.ava.val_cache).exists():
        create_memmap(cfg.ava.train_labels, cfg.ava.images, cfg.ava.val_cache,
                      cfg.num_workers)

    trainset = MemMap(cfg.ava.train_cache, train_transform)
    valset = MemMap(cfg.ava.val_cache, val_transform)

    total_steps = len(trainset) // cfg.batch_size * cfg.num_epochs
    eval_interval = len(trainset) // cfg.batch_size
    logging.info(f'total steps: {total_steps}, eval interval: {eval_interval}')
    model_dp.train()
    parameters = group_parameters(model)
    optimizer = SGD(parameters,
                    cfg.lr,
                    cfg.momentum,
                    weight_decay=cfg.weight_decay)

    lr_scheduler = OneCycleLR(optimizer,
                              max_lr=cfg.lr,
                              div_factor=cfg.lr / cfg.warmup_lr,
                              total_steps=total_steps,
                              pct_start=0.01,
                              final_div_factor=cfg.warmup_lr / cfg.final_lr)

    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=cfg.batch_size,
                                               shuffle=True,
                                               num_workers=cfg.num_workers,
                                               drop_last=True,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(valset,
                                             batch_size=cfg.batch_size,
                                             shuffle=False,
                                             num_workers=cfg.num_workers,
                                             pin_memory=True)

    curr_loss = 1e9
    state = {
        'model': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'lr_scheduler': lr_scheduler.state_dict(),
        'step': 0,  # init step,
        'cfg': cfg,
        'loss': curr_loss
    }

    saver.save(0, state)

    trainloader = repeat_loader(train_loader)
    batch_processor = BatchProcessor(device)
    start = time.time()
    for step in range(0, total_steps, eval_interval):
        num_steps = min(step + eval_interval, total_steps) - step
        step += num_steps
        trainmeter = train_steps(model_dp, trainloader, optimizer,
                                 lr_scheduler, emd_loss, batch_processor,
                                 num_steps)
        valmeter = evaluate(model_dp, val_loader, emd_loss, batch_processor)
        finish = time.time()
        img_s = cfg.batch_size * eval_interval / (finish - start)
        loss = valmeter.meters['loss'].global_avg

        state = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            'step': step,  # init step,
            'cfg': cfg,
            'loss': loss
        }
        saver.save(step, state)

        if loss < curr_loss:
            curr_loss = loss
            saver.save_best(state)

        logging.info(
            f'step: [{step}/{total_steps}] img_s: {img_s:.2f} train: [{trainmeter}] eval:[{valmeter}]'
        )
        start = time.time()