Beispiel #1
0
def create_train_val_dataloader(opt, logger):
    # create train and val dataloaders
    train_loader, val_loader = None, None
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
            train_set = create_dataset(dataset_opt)
            train_sampler = EnlargedSampler(train_set, opt['world_size'],
                                            opt['rank'], dataset_enlarge_ratio)
            train_loader = create_dataloader(train_set,
                                             dataset_opt,
                                             num_gpu=opt['num_gpu'],
                                             dist=opt['dist'],
                                             sampler=train_sampler,
                                             seed=opt['manual_seed'])

            num_iter_per_epoch = math.ceil(
                len(train_set) * dataset_enlarge_ratio /
                (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
            total_iters = int(opt['train']['total_iter'])
            total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
            logger.info(
                'Training statistics:'
                f'\n\tNumber of train images: {len(train_set)}'
                f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
                f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
                f'\n\tWorld size (gpu number): {opt["world_size"]}'
                f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
                f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')

        elif phase == 'val':
            import os

            names = ['Test100', 'Rain100H', 'Rain100L', 'Test2800', 'Test1200']

            val_loaders = []

            for name in names:
                dataset_opt['dataroot_gt'] = os.path.join(
                    './datasets/Rain13k/test', name, 'target')
                dataset_opt['dataroot_lq'] = os.path.join(
                    './datasets/Rain13k/test', name, 'input')
                dataset_opt['io_backend'] = {'type': 'disk'}

                val_set = create_dataset(dataset_opt)
                val_loader = create_dataloader(val_set,
                                               dataset_opt,
                                               num_gpu=opt['num_gpu'],
                                               dist=opt['dist'],
                                               sampler=None,
                                               seed=opt['manual_seed'])
                logger.info(
                    f'Number of val images/folders in {dataset_opt["name"]}: '
                    f'{len(val_set)}')

                val_loaders.append(val_loader)
        else:
            raise ValueError(f'Dataset phase {phase} is not recognized.')

    return train_loader, train_sampler, val_loaders, total_epochs, total_iters
Beispiel #2
0
def create_train_val_dataloader(opt, logger):
    # create train and val dataloaders
    train_loader, val_loader = None, None
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
            train_set = create_dataset(dataset_opt)
            train_sampler = EnlargedSampler(train_set, opt['world_size'],
                                            opt['rank'], dataset_enlarge_ratio)
            train_loader = create_dataloader(
                train_set,
                dataset_opt,
                num_gpu=opt['num_gpu'],
                dist=opt['dist'],
                sampler=train_sampler,
                seed=opt['manual_seed'])

            num_iter_per_epoch = math.ceil(
                len(train_set) * dataset_enlarge_ratio /
                (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
            total_iters = int(opt['train']['total_iter'])
            total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
            logger.info(
                'Training statistics:'
                f'\n\tNumber of train images: {len(train_set)}'
                f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
                f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
                f'\n\tWorld size (gpu number): {opt["world_size"]}'
                f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
                f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')

        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(
                val_set,
                dataset_opt,
                num_gpu=opt['num_gpu'],
                dist=opt['dist'],
                sampler=None,
                seed=opt['manual_seed'])
            logger.info(
                f'Number of val images/folders in {dataset_opt["name"]}: '
                f'{len(val_set)}')
        else:
            raise ValueError(f'Dataset phase {phase} is not recognized.')

    return train_loader, train_sampler, val_loader, total_epochs, total_iters
Beispiel #3
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option YAML file.')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch', 'slurm'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = parse(args.opt, is_train=False)

    # distributed testing settings
    if args.launcher == 'none':  # disabled distributed training
        opt['dist'] = False
        print('Disabled distributed testing.', flush=True)
    else:
        opt['dist'] = True
        if args.launcher == 'slurm' and 'dist_params' in opt:
            init_dist(args.launcher, **opt['dist_params'])
        else:
            init_dist(args.launcher)

    opt = dict_to_nonedict(opt)

    make_exp_dirs(opt)
    log_file = osp.join(opt['path']['log'],
                        f"test_{opt['name']}_{get_time_str()}.log")
    logger = get_root_logger(logger_name='basicsr',
                             log_level=logging.INFO,
                             log_file=log_file)
    logger.info(get_env_info())
    logger.info(dict2str(opt))

    # create test dataset and dataloader
    test_loaders = []
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        logger.info(
            f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
        test_loaders.append(test_loader)

    # create model
    model = create_model(opt)

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        logger.info(f'Testing {test_set_name}...')
        model.validation(test_loader,
                         current_iter=opt['name'],
                         tb_logger=None,
                         save_img=opt['val']['save_img'])
def calculate_stats_from_dataset():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    parser = argparse.ArgumentParser()
    parser.add_argument('--num_sample', type=int, default=50000)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--size', type=int, default=512)
    parser.add_argument('--dataroot', type=str, default='datasets/ffhq')
    args = parser.parse_args()

    # inception model
    inception = load_patched_inception_v3(device)

    # create dataset
    opt = {}
    opt['name'] = 'FFHQ'
    opt['type'] = 'FFHQDataset'
    opt['dataroot_gt'] = f'datasets/ffhq/ffhq_{args.size}.lmdb'
    opt['io_backend'] = dict(type='lmdb')
    opt['use_hflip'] = False
    opt['mean'] = [0.5, 0.5, 0.5]
    opt['std'] = [0.5, 0.5, 0.5]
    dataset = create_dataset(opt)

    # create dataloader
    data_loader = DataLoader(dataset=dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=4,
                             sampler=None,
                             drop_last=False)
    total_batch = math.ceil(args.num_sample / args.batch_size)

    def data_generator(data_loader, total_batch):
        for idx, data in enumerate(data_loader):
            if idx >= total_batch:
                break
            else:
                yield data['gt']

    features = extract_inception_features(
        data_generator(data_loader, total_batch), inception, total_batch,
        device)
    features = features.numpy()
    total_len = features.shape[0]
    features = features[:args.num_sample]
    print(f'Extracted {total_len} features, '
          f'use the first {features.shape[0]} features to calculate stats.')
    mean = np.mean(features, 0)
    cov = np.cov(features, rowvar=False)

    save_path = f'inception_{opt["name"]}_{args.size}.pth'
    torch.save(dict(name=opt['name'], size=args.size, mean=mean, cov=cov),
               save_path,
               _use_new_zipfile_serialization=False)
def main():
    """Test FFHQ dataset."""
    opt = {}
    opt['dist'] = False
    opt['gpu_ids'] = [0]
    opt['phase'] = 'train'

    opt['name'] = 'FFHQ'
    opt['type'] = 'FFHQDataset'

    opt['dataroot_gt'] = 'datasets/ffhq/ffhq_256.lmdb'
    opt['io_backend'] = dict(type='lmdb')

    opt['use_hflip'] = True
    opt['mean'] = [0.5, 0.5, 0.5]
    opt['std'] = [0.5, 0.5, 0.5]

    opt['use_shuffle'] = True
    opt['num_worker_per_gpu'] = 1
    opt['batch_size_per_gpu'] = 4

    opt['dataset_enlarge_ratio'] = 1

    mmcv.mkdir_or_exist('tmp')

    dataset = create_dataset(opt)
    data_loader = create_dataloader(dataset,
                                    opt,
                                    num_gpu=0,
                                    dist=opt['dist'],
                                    sampler=None)

    nrow = int(math.sqrt(opt['batch_size_per_gpu']))
    padding = 2 if opt['phase'] == 'train' else 0

    print('start...')
    for i, data in enumerate(data_loader):
        if i > 5:
            break
        print(i)

        gt = data['gt']
        print(torch.min(gt), torch.max(gt))
        gt_path = data['gt_path']
        print(gt_path)
        torchvision.utils.save_image(gt,
                                     f'tmp/gt_{i:03d}.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=True,
                                     range=(-1, 1))
Beispiel #6
0
def main():
    # parse options, set distributed setting, set ramdom seed
    opt = parse_options(is_train=False)

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    # mkdir and initialize loggers
    make_exp_dirs(opt)
    log_file = osp.join(opt['path']['log'],
                        f"test_{opt['name']}_{get_time_str()}.log")
    logger = get_root_logger(
        logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
    logger.info(get_env_info())
    logger.info(dict2str(opt))

    # create test dataset and dataloader
    test_loaders = []
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(
            test_set,
            dataset_opt,
            num_gpu=opt['num_gpu'],
            dist=opt['dist'],
            sampler=None,
            seed=opt['manual_seed'])
        logger.info(
            f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
        test_loaders.append(test_loader)

    # create model
    model = create_model(opt)

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        logger.info(f'Testing {test_set_name}...')
        rgb2bgr = opt['val'].get('rgb2bgr', True)
        # wheather use uint8 image to compute metrics
        use_image = opt['val'].get('use_image', True)
        model.validation(
            test_loader,
            current_iter=opt['name'],
            tb_logger=None,
            save_img=opt['val']['save_img'],
            rgb2bgr=rgb2bgr, use_image=use_image)
def main(mode='folder'):
    """Test paired image dataset.

    Args:
        mode: There are three modes: 'lmdb', 'folder', 'meta_info_file'.
    """
    opt = {}
    opt['dist'] = False
    opt['phase'] = 'train'

    opt['name'] = 'DIV2K'
    opt['type'] = 'PairedImageDataset'
    if mode == 'folder':
        opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub'
        opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub'
        opt['filename_tmpl'] = '{}'
        opt['io_backend'] = dict(type='disk')
    elif mode == 'meta_info_file':
        opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub'
        opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub'
        opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt'  # noqa:E501
        opt['filename_tmpl'] = '{}'
        opt['io_backend'] = dict(type='disk')
    elif mode == 'lmdb':
        opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub.lmdb'
        opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb'  # noqa:E501
        opt['io_backend'] = dict(type='lmdb')

    opt['gt_size'] = 128
    opt['use_flip'] = True
    opt['use_rot'] = True

    opt['use_shuffle'] = True
    opt['num_worker_per_gpu'] = 2
    opt['batch_size_per_gpu'] = 16
    opt['scale'] = 4

    opt['dataset_enlarge_ratio'] = 1

    mmcv.mkdir_or_exist('tmp')

    dataset = create_dataset(opt)
    data_loader = create_dataloader(dataset,
                                    opt,
                                    num_gpu=0,
                                    dist=opt['dist'],
                                    sampler=None)

    nrow = int(math.sqrt(opt['batch_size_per_gpu']))
    padding = 2 if opt['phase'] == 'train' else 0

    print('start...')
    for i, data in enumerate(data_loader):
        if i > 5:
            break
        print(i)

        lq = data['lq']
        gt = data['gt']
        lq_path = data['lq_path']
        gt_path = data['gt_path']
        print(lq_path, gt_path)
        torchvision.utils.save_image(lq,
                                     f'tmp/lq_{i:03d}.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=False)
        torchvision.utils.save_image(gt,
                                     f'tmp/gt_{i:03d}.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=False)
Beispiel #8
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option YAML file.')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch', 'slurm'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = parse(args.opt, is_train=True)

    # distributed training settings
    if args.launcher == 'none':  # non-distributed training
        opt['dist'] = False
        print('Disable distributed training.', flush=True)
    else:
        opt['dist'] = True
        if args.launcher == 'slurm' and 'dist_params' in opt:
            init_dist(args.launcher, **opt['dist_params'])
        else:
            init_dist(args.launcher)

    rank, world_size = get_dist_info()
    opt['rank'] = rank
    opt['world_size'] = world_size

    # load resume states if exists
    if opt['path'].get('resume_state'):
        device_id = torch.cuda.current_device()
        resume_state = torch.load(
            opt['path']['resume_state'],
            map_location=lambda storage, loc: storage.cuda(device_id))
    else:
        resume_state = None

    # mkdir and loggers
    if resume_state is None:
        make_exp_dirs(opt)
    log_file = osp.join(opt['path']['log'],
                        f"train_{opt['name']}_{get_time_str()}.log")
    logger = get_root_logger(logger_name='basicsr',
                             log_level=logging.INFO,
                             log_file=log_file)
    logger.info(get_env_info())
    logger.info(dict2str(opt))
    # initialize tensorboard logger and wandb logger
    tb_logger = None
    if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
        log_dir = './tb_logger/' + opt['name']
        if resume_state is None and opt['rank'] == 0:
            mkdir_and_rename(log_dir)
        tb_logger = init_tb_logger(log_dir=log_dir)
    if (opt['logger'].get('wandb')
            is not None) and (opt['logger']['wandb'].get('project')
                              is not None) and ('debug' not in opt['name']):
        assert opt['logger'].get('use_tb_logger') is True, (
            'should turn on tensorboard when using wandb')
        init_wandb_logger(opt)

    # random seed
    seed = opt['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
        opt['manual_seed'] = seed
    logger.info(f'Random seed: {seed}')
    set_random_seed(seed + rank)

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    # create train and val dataloaders
    train_loader, val_loader = None, None
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
            train_set = create_dataset(dataset_opt)
            train_sampler = EnlargedSampler(train_set, world_size, rank,
                                            dataset_enlarge_ratio)
            train_loader = create_dataloader(train_set,
                                             dataset_opt,
                                             num_gpu=opt['num_gpu'],
                                             dist=opt['dist'],
                                             sampler=train_sampler,
                                             seed=seed)

            num_iter_per_epoch = math.ceil(
                len(train_set) * dataset_enlarge_ratio /
                (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
            total_iters = int(opt['train']['total_iter'])
            total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
            logger.info(
                'Training statistics:'
                f'\n\tNumber of train images: {len(train_set)}'
                f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
                f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
                f'\n\tWorld size (gpu number): {opt["world_size"]}'
                f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
                f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set,
                                           dataset_opt,
                                           num_gpu=opt['num_gpu'],
                                           dist=opt['dist'],
                                           sampler=None,
                                           seed=seed)
            logger.info(
                f'Number of val images/folders in {dataset_opt["name"]}: '
                f'{len(val_set)}')
        else:
            raise ValueError(f'Dataset phase {phase} is not recognized.')
    assert train_loader is not None

    # create model
    if resume_state:
        check_resume(opt, resume_state['iter'])  # modify pretrain_model paths
    model = create_model(opt)

    # resume training
    if resume_state:
        logger.info(f"Resuming training from epoch: {resume_state['epoch']}, "
                    f"iter: {resume_state['iter']}.")
        start_epoch = resume_state['epoch']
        current_iter = resume_state['iter']
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        start_epoch = 0
        current_iter = 0

    # create message logger (formatted outputs)
    msg_logger = MessageLogger(opt, current_iter, tb_logger)

    # dataloader prefetcher
    prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
    if prefetch_mode is None or prefetch_mode == 'cpu':
        prefetcher = CPUPrefetcher(train_loader)
    elif prefetch_mode == 'cuda':
        prefetcher = CUDAPrefetcher(train_loader, opt)
        logger.info(f'Use {prefetch_mode} prefetch dataloader')
        if opt['datasets']['train'].get('pin_memory') is not True:
            raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
    else:
        raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.'
                         "Supported ones are: None, 'cuda', 'cpu'.")

    # training
    logger.info(
        f'Start training from epoch: {start_epoch}, iter: {current_iter}')
    data_time, iter_time = time.time(), time.time()
    start_time = time.time()

    for epoch in range(start_epoch, total_epochs + 1):
        train_sampler.set_epoch(epoch)
        prefetcher.reset()
        train_data = prefetcher.next()

        while train_data is not None:
            data_time = time.time() - data_time

            current_iter += 1
            if current_iter > total_iters:
                break
            # update learning rate
            model.update_learning_rate(current_iter,
                                       warmup_iter=opt['train'].get(
                                           'warmup_iter', -1))
            # training
            model.feed_data(train_data)
            model.optimize_parameters(current_iter)
            iter_time = time.time() - iter_time
            # log
            if current_iter % opt['logger']['print_freq'] == 0:
                log_vars = {'epoch': epoch, 'iter': current_iter}
                log_vars.update({'lrs': model.get_current_learning_rate()})
                log_vars.update({'time': iter_time, 'data_time': data_time})
                log_vars.update(model.get_current_log())
                msg_logger(log_vars)

            # save models and training states
            if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
                logger.info('Saving models and training states.')
                model.save(epoch, current_iter)

            # validation
            if opt.get('val') is not None and (current_iter %
                                               opt['val']['val_freq'] == 0):
                model.validation(val_loader, current_iter, tb_logger,
                                 opt['val']['save_img'])

            data_time = time.time()
            iter_time = time.time()
            train_data = prefetcher.next()
        # end of iter

    # end of epoch

    consumed_time = str(
        datetime.timedelta(seconds=int(time.time() - start_time)))
    logger.info(f'End of training. Time consumed: {consumed_time}')
    logger.info('Save the latest model.')
    model.save(epoch=-1, current_iter=-1)  # -1 stands for the latest
    if opt.get('val') is not None:
        model.validation(val_loader, current_iter, tb_logger,
                         opt['val']['save_img'])
    if tb_logger:
        tb_logger.close()
Beispiel #9
0
def main(mode='folder'):
    """Test vimeo90k dataset.

    Args:
        mode: There are two modes: 'lmdb', 'folder'.
    """
    opt = {}
    opt['dist'] = False
    opt['phase'] = 'train'

    opt['name'] = 'Vimeo90K'
    opt['type'] = 'Vimeo90KDataset'
    if mode == 'folder':
        opt['dataroot_gt'] = 'datasets/vimeo90k/vimeo_septuplet/sequences'
        opt['dataroot_lq'] = 'datasets/vimeo90k/vimeo_septuplet_matlabLRx4/sequences'  # noqa E501
        opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt'  # noqa E501
        opt['io_backend'] = dict(type='disk')
    elif mode == 'lmdb':
        opt['dataroot_gt'] = 'datasets/vimeo90k/vimeo90k_train_GT_only4th.lmdb'
        opt['dataroot_lq'] = 'datasets/vimeo90k/vimeo90k_train_LR7frames.lmdb'
        opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt'  # noqa E501
        opt['io_backend'] = dict(type='lmdb')

    opt['num_frame'] = 7
    opt['gt_size'] = 256
    opt['random_reverse'] = True
    opt['use_flip'] = True
    opt['use_rot'] = True

    opt['use_shuffle'] = True
    opt['num_worker'] = 1
    opt['batch_size'] = 16
    opt['scale'] = 4

    opt['dataset_enlarge_ratio'] = 1

    mmcv.mkdir_or_exist('tmp')

    dataset = create_dataset(opt)
    data_loader = create_dataloader(dataset, opt, opt, None)

    nrow = int(math.sqrt(opt['batch_size']))
    padding = 2 if opt['phase'] == 'train' else 0

    print('start...')
    for i, data in enumerate(data_loader):
        if i > 5:
            break
        print(i)

        lq = data['lq']
        gt = data['gt']
        key = data['key']
        print(key)
        for j in range(opt['num_frame']):
            torchvision.utils.save_image(lq[:, j, :, :, :],
                                         f'tmp/lq_{i:03d}_frame{j}.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
        torchvision.utils.save_image(gt,
                                     f'tmp/gt_{i:03d}.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=False)
Beispiel #10
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, help='Path to option YAML file.')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch', 'slurm'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = parse(args.opt, is_train=True)

    # distributed training settings
    if args.launcher == 'none':  # disabled distributed training
        opt['dist'] = False
        rank = -1
        print('Disabled distributed training.', flush=True)
    else:
        opt['dist'] = True
        if args.launcher == 'slurm' and 'dist_params' in opt:
            init_dist(args.launcher, **opt['dist_params'])
        else:
            init_dist(args.launcher)
        world_size = torch.distributed.get_world_size()
        rank = torch.distributed.get_rank()

    # load resume states if exists
    if opt['path'].get('resume_state', None):
        device_id = torch.cuda.current_device()
        resume_state = torch.load(
            opt['path']['resume_state'],
            map_location=lambda storage, loc: storage.cuda(device_id))
    else:
        resume_state = None

    # mkdir and loggers
    if resume_state is None:
        make_exp_dirs(opt)
    log_file = osp.join(opt['path']['log'],
                        f"train_{opt['name']}_{get_time_str()}.log")
    logger = get_root_logger(logger_name='basicsr',
                             log_level=logging.INFO,
                             log_file=log_file)
    logger.info(get_env_info())
    logger.info(dict2str(opt))
    # initialize tensorboard logger and wandb logger
    tb_logger = None
    if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
        tb_logger = init_tb_logger(log_dir='./tb_logger/' + opt['name'])
    if (opt['logger'].get('wandb')
            is not None) and (opt['logger']['wandb'].get('project')
                              is not None) and ('debug' not in opt['name']):
        assert opt['logger'].get('use_tb_logger') is True, (
            'should turn on tensorboard when using wandb')
        init_wandb_logger(opt)

    # random seed
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    logger.info(f'Random seed: {seed}')
    set_random_seed(seed)

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    # create train and val dataloaders
    train_loader, val_loader = None, None
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            # dataset_ratio: enlarge the size of datasets for each epoch
            dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
            train_set = create_dataset(dataset_opt)
            train_size = int(
                math.ceil(len(train_set) / dataset_opt['batch_size']))
            total_iters = int(opt['train']['niter'])
            total_epochs = int(math.ceil(total_iters / train_size))
            if opt['dist']:
                train_sampler = DistIterSampler(train_set, world_size, rank,
                                                dataset_enlarge_ratio)
                total_epochs = total_iters / (train_size *
                                              dataset_enlarge_ratio)
                total_epochs = int(math.ceil(total_epochs))
            else:
                train_sampler = None
            train_loader = create_dataloader(train_set, dataset_opt, opt,
                                             train_sampler)
            logger.info(f'Number of train images: {len(train_set)}, '
                        f'iters: {train_size}')
            logger.info(
                f'Total epochs needed: {total_epochs} for iters {total_iters}')
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt, opt, None)
            logger.info(
                f"Number of val images/folders in {dataset_opt['name']}: "
                f'{len(val_set)}')
        else:
            raise NotImplementedError(f'Phase {phase} is not recognized.')
    assert train_loader is not None

    # create model
    if resume_state:
        check_resume(opt, resume_state['iter'])  # modify pretrain_model paths
    model = create_model(opt)

    # resume training
    if resume_state:
        logger.info(f"Resuming training from epoch: {resume_state['epoch']}, "
                    f"iter: {resume_state['iter']}.")
        start_epoch = resume_state['epoch']
        current_iter = resume_state['iter']
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        current_iter = 0
        start_epoch = 0

    # create message logger (formatted outputs)
    msg_logger = MessageLogger(opt, current_iter, tb_logger)

    # training
    logger.info(
        f'Start training from epoch: {start_epoch}, iter: {current_iter}')
    data_time, iter_time = 0, 0

    for epoch in range(start_epoch, total_epochs + 1):
        if opt['dist']:
            train_sampler.set_epoch(epoch)
        for _, train_data in enumerate(train_loader):
            data_time = time.time() - data_time

            current_iter += 1
            if current_iter > total_iters:
                break
            # update learning rate
            model.update_learning_rate(current_iter,
                                       warmup_iter=opt['train']['warmup_iter'])
            # training
            model.feed_data(train_data)
            model.optimize_parameters(current_iter)
            iter_time = time.time() - iter_time
            # log
            if current_iter % opt['logger']['print_freq'] == 0:
                log_vars = {'epoch': epoch, 'iter': current_iter}
                log_vars.update({'lrs': model.get_current_learning_rate()})
                log_vars.update({'time': iter_time, 'data_time': data_time})
                log_vars.update(model.get_current_log())
                msg_logger(log_vars)

            # save models and training states
            if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
                logger.info('Saving models and training states.')
                model.save(epoch, current_iter)

            # validation
            if opt['val']['val_freq'] is not None and current_iter % opt[
                    'val']['val_freq'] == 0:
                model.validation(val_loader, current_iter, tb_logger,
                                 opt['val']['save_img'])

            data_time = time.time()
            iter_time = time.time()
        # end of iter
    # end of epoch

    logger.info('End of training.')
    logger.info('Saving the latest model.')
    model.save(epoch=-1, current_iter=-1)  # -1 for the latest
    # last validation
    if opt['val']['val_freq'] is not None:
        model.validation(val_loader, current_iter, tb_logger,
                         opt['val']['save_img'])

    if tb_logger:
        tb_logger.close()
Beispiel #11
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option YAML file.')
    parser.add_argument('--launcher',
                        choices=['none', 'pytorch', 'slurm'],
                        default='none',
                        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
    args = parser.parse_args()
    opt = parse(args.opt, is_train=False)

    # distributed testing settings
    if args.launcher == 'none':  # non-distributed testing
        opt['dist'] = False
        print('Disable distributed testing.', flush=True)
    else:
        opt['dist'] = True
        if args.launcher == 'slurm' and 'dist_params' in opt:
            init_dist(args.launcher, **opt['dist_params'])
        else:
            init_dist(args.launcher)

    rank, world_size = get_dist_info()
    opt['rank'] = rank
    opt['world_size'] = world_size

    make_exp_dirs(opt)
    log_file = osp.join(opt['path']['log'],
                        f"test_{opt['name']}_{get_time_str()}.log")
    logger = get_root_logger(logger_name='basicsr',
                             log_level=logging.INFO,
                             log_file=log_file)
    logger.info(get_env_info())
    logger.info(dict2str(opt))

    # random seed
    seed = opt['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
        opt['manual_seed'] = seed
    logger.info(f'Random seed: {seed}')
    set_random_seed(seed + rank)

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    # create test dataset and dataloader
    test_loaders = []
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set,
                                        dataset_opt,
                                        num_gpu=opt['num_gpu'],
                                        dist=opt['dist'],
                                        sampler=None,
                                        seed=seed)
        logger.info(
            f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
        test_loaders.append(test_loader)

    # create model
    model = create_model(opt)

    dummy_input = torch.zeros((1, 3, 800, 800)).to(model.device)
    dummy_psf = torch.zeros((1, 5, 1, 1)).to(model.device)
    summary(model.net_g, dummy_input, dummy_psf)

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        mkdir_or_exist(osp.join(opt['path']['visualization'], test_set_name))

        logger.info(f'Testing {test_set_name}...')
        model.validation(test_loader,
                         current_iter=opt['name'],
                         tb_logger=None,
                         save_img=opt['val']['save_img'])
Beispiel #12
0
def main(mode='folder'):
    """Test reds dataset.

    Args:
        mode: There are two modes: 'lmdb', 'folder'.
    """
    opt = {}
    opt['dist'] = False
    opt['phase'] = 'train'

    opt['name'] = 'REDS'
    opt['type'] = 'REDSDataset'
    if mode == 'folder':
        opt['dataroot_gt'] = 'datasets/REDS/train_sharp'
        opt['dataroot_lq'] = 'datasets/REDS/train_sharp_bicubic'
        opt['dataroot_flow'] = None
        opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_REDS_GT.txt'
        opt['io_backend'] = dict(type='disk')
    elif mode == 'lmdb':
        opt['dataroot_gt'] = 'datasets/REDS/train_sharp_with_val.lmdb'
        opt['dataroot_lq'] = 'datasets/REDS/train_sharp_bicubic_with_val.lmdb'
        opt['dataroot_flow'] = None
        opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_REDS_GT.txt'
        opt['io_backend'] = dict(type='lmdb')

    opt['val_partition'] = 'REDS4'
    opt['num_frame'] = 5
    opt['gt_size'] = 256
    opt['interval_list'] = [1]
    opt['random_reverse'] = True
    opt['use_flip'] = True
    opt['use_rot'] = True

    opt['use_shuffle'] = True
    opt['num_worker'] = 1
    opt['batch_size'] = 16
    opt['scale'] = 4

    opt['dataset_enlarge_ratio'] = 1

    mmcv.mkdir_or_exist('tmp')

    dataset = create_dataset(opt)
    data_loader = create_dataloader(dataset, opt, opt, None)

    nrow = int(math.sqrt(opt['batch_size']))
    padding = 2 if opt['phase'] == 'train' else 0

    print('start...')
    for i, data in enumerate(data_loader):
        if i > 5:
            break
        print(i)

        lq = data['lq']
        gt = data['gt']
        key = data['key']
        print(key)
        for j in range(opt['num_frame']):
            torchvision.utils.save_image(lq[:, j, :, :, :],
                                         f'tmp/lq_{i:03d}_frame{j}.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
        torchvision.utils.save_image(gt,
                                     f'tmp/gt_{i:03d}.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=False)
def main():
    opt = {}
    opt['dist'] = False
    opt['phase'] = 'train'

    opt['name'] = 'DIV2K'
    opt['type'] = 'PairedImageDataset'
    opt['dataroot_gt'] = '../../../data/DIV2K/Train/HR_sub.lmdb'
    opt['dataroot_lq'] = '../../../data/DIV2K/Train/LR_x4_sub.lmdb'
    opt['io_backend'] = dict(type='lmdb')

    opt['gt_size'] = 128
    opt['use_flip'] = True
    opt['use_rot'] = True
    opt['use_bright_contrast'] = True

    opt['use_shuffle'] = True
    opt['num_worker_per_gpu'] = 1
    opt['batch_size_per_gpu'] = 4
    opt['scale'] = 4

    opt['dataset_enlarge_ratio'] = 1

    opt['use_cutblur'] = False
    opt['use_blend'] = True
    opt['use_rgb_perm'] = True
    opt['use_mixup'] = True

    os.makedirs('tmp', exist_ok=True)

    dataset = create_dataset(opt)
    data_loader = create_dataloader(dataset,
                                    opt,
                                    num_gpu=0,
                                    dist=opt['dist'],
                                    sampler=None)

    nrow = int(math.sqrt(opt['batch_size_per_gpu']))
    padding = 2 if opt['phase'] == 'train' else 0

    print('start...')
    for i, data in enumerate(data_loader):
        if i > 7:
            break
        print(i)

        lq = data['lq']
        gt = data['gt']
        lq_path = data['lq_path']
        gt_path = data['gt_path']
        print(lq_path, gt_path)

        if opt['use_cutblur']:
            gt_cutblur, lq_cutblur = augments.cutblur(gt.clone(),
                                                      lq.clone(),
                                                      prob=1,
                                                      alpha=0.7)
            torchvision.utils.save_image(lq_cutblur,
                                         f'tmp/{i:03d}_lq_cutblur.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
            torchvision.utils.save_image(gt_cutblur,
                                         f'tmp/{i:03d}_gt_cutblur.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
        if opt['use_rgb_perm']:
            gt_rgb, lq_rgb = augments.rgb(gt.clone(), lq.clone(), prob=1)
            torchvision.utils.save_image(lq_rgb,
                                         f'tmp/{i:03d}_lq_rgb.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
            torchvision.utils.save_image(gt_rgb,
                                         f'tmp/{i:03d}_gt_rgb.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
        if opt['use_blend']:
            gt_rgb, lq_rgb = augments.blend(gt.clone(),
                                            lq.clone(),
                                            prob=1,
                                            alpha=0.6)
            torchvision.utils.save_image(lq_rgb,
                                         f'tmp/{i:03d}_lq_blend.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
            torchvision.utils.save_image(gt_rgb,
                                         f'tmp/{i:03d}_gt_blend.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
        if opt['use_mixup']:
            gt_rgb, lq_rgb = augments.mixup(gt.clone(),
                                            lq.clone(),
                                            prob=1,
                                            alpha=1.2)
            torchvision.utils.save_image(lq_rgb,
                                         f'tmp/{i:03d}_lq_mixup.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)
            torchvision.utils.save_image(gt_rgb,
                                         f'tmp/{i:03d}_gt_mixup.png',
                                         nrow=nrow,
                                         padding=padding,
                                         normalize=False)

        torchvision.utils.save_image(lq,
                                     f'tmp/{i:03d}_lq.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=False)
        torchvision.utils.save_image(gt,
                                     f'tmp/{i:03d}_gt.png',
                                     nrow=nrow,
                                     padding=padding,
                                     normalize=False)
def calculate_fid_folder():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    parser = argparse.ArgumentParser()
    parser.add_argument('folder', type=str, help='Path to the folder.')
    parser.add_argument(
        '--fid_stats', type=str, help='Path to the dataset fid statistics.')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--num_sample', type=int, default=50000)
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument(
        '--backend',
        type=str,
        default='disk',
        help='io backend for dataset. Option: disk, lmdb')
    args = parser.parse_args()

    # inception model
    inception = load_patched_inception_v3(device)

    # create dataset
    opt = {}
    opt['name'] = 'SingleImageDataset'
    opt['type'] = 'SingleImageDataset'
    opt['dataroot_lq'] = args.folder
    opt['io_backend'] = dict(type=args.backend)
    opt['mean'] = [0.5, 0.5, 0.5]
    opt['std'] = [0.5, 0.5, 0.5]
    dataset = create_dataset(opt)

    # create dataloader
    data_loader = DataLoader(
        dataset=dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=None,
        drop_last=False)
    args.num_sample = min(args.num_sample, len(dataset))
    total_batch = math.ceil(args.num_sample / args.batch_size)

    def data_generator(data_loader, total_batch):
        for idx, data in enumerate(data_loader):
            if idx >= total_batch:
                break
            else:
                yield data['lq']

    features = extract_inception_features(
        data_generator(data_loader, total_batch), inception, total_batch,
        device)
    features = features.numpy()
    total_len = features.shape[0]
    features = features[:args.num_sample]
    print(f'Extracted {total_len} features, '
          f'use the first {features.shape[0]} features to calculate stats.')

    sample_mean = np.mean(features, 0)
    sample_cov = np.cov(features, rowvar=False)

    # load the dataset stats
    stats = torch.load(args.fid_stats)
    real_mean = stats['mean']
    real_cov = stats['cov']

    # calculate FID metric
    fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)
    print('fid:', fid)