Ejemplo n.º 1
0
def test_dataset_desc_file():
    ddf = 'data/fake_datasets.yml'
    datasets = load_datasets(ddf)
    assert len(datasets) is 9
    assert len(datasets.NONE.train.hr.compile()) is 0
    assert len(datasets.NORMAL.train.hr.compile()) is 7
    assert len(datasets.NORMAL.val.hr.compile()) is 5
    assert len(datasets.NORMAL.test.hr.compile()) is 1
    assert len(datasets.PAIR.train.hr.compile()) is 2
    assert len(datasets.PAIR.train.lr.compile()) is 2
    assert len(datasets.VIDEOPAIR.train.hr.compile()) is 1
    assert len(datasets.VIDEOPAIR.train.lr.compile()) is 1
    assert len(datasets.VIDEOPAIR.val.hr.compile()) is 1
    assert len(datasets.VIDEOPAIR.val.lr.compile()) is 1
    assert len(datasets.VIDEOPAIR.test.hr.compile()) is 1
    assert len(datasets.VIDEOPAIR.test.lr.compile()) is 1
    assert len(datasets.FOO.test.hr.compile()) is 2
    assert len(datasets.BAR.test.hr.compile()) is 5
    assert datasets.VIDEOPAIR.train.hr.as_video
    assert datasets.XIUXIAN.test.hr.as_video

    raw = load_datasets(ddf, 'RAW')
    assert len(raw.train.hr.compile()) is 1
    assert len(raw.val.hr.compile()) is 1
    assert len(raw.test.hr.compile()) is 1
    assert raw.train.hr.as_video
Ejemplo n.º 2
0
    def test_dataset_desc_file(self):
        ddf = 'data/fake_datasets.yml'
        datasets = load_datasets(ddf)
        self.assertEqual(len(datasets), 9)
        self.assertEqual(len(datasets.NONE.train.hr.compile()), 0)
        self.assertEqual(len(datasets.NORMAL.train.hr.compile()), 7)
        self.assertEqual(len(datasets.NORMAL.val.hr.compile()), 5)
        self.assertEqual(len(datasets.NORMAL.test.hr.compile()), 1)
        self.assertEqual(len(datasets.PAIR.train.hr.compile()), 2)
        self.assertEqual(len(datasets.PAIR.train.lr.compile()), 2)
        self.assertEqual(len(datasets.VIDEOPAIR.train.hr.compile()), 1)
        self.assertEqual(len(datasets.VIDEOPAIR.train.lr.compile()), 1)
        self.assertEqual(len(datasets.VIDEOPAIR.val.hr.compile()), 1)
        self.assertEqual(len(datasets.VIDEOPAIR.val.lr.compile()), 1)
        self.assertEqual(len(datasets.VIDEOPAIR.test.hr.compile()), 1)
        self.assertEqual(len(datasets.VIDEOPAIR.test.lr.compile()), 1)
        self.assertEqual(len(datasets.FOO.test.hr.compile()), 2)
        self.assertEqual(len(datasets.BAR.test.hr.compile()), 5)
        self.assertTrue(datasets.VIDEOPAIR.train.hr.as_video)
        self.assertTrue(datasets.XIUXIAN.test.hr.as_video)

        raw = load_datasets(ddf, 'RAW')
        self.assertEqual(len(raw.train.hr.compile()), 1)
        self.assertEqual(len(raw.val.hr.compile()), 1)
        self.assertEqual(len(raw.test.hr.compile()), 1)
        self.assertTrue(raw.train.hr.as_video)
Ejemplo n.º 3
0
def fetch_datasets(data_config_file, opt):
    all_datasets = load_datasets(data_config_file)
    dataset = all_datasets[opt.dataset.upper()]
    if opt.test:
        test_data = all_datasets[opt.test.upper()]
    else:
        test_data = dataset
    if opt.infer:
        infer_dir = Path(opt.infer)
        if infer_dir.exists():
            # infer files in this directory
            if infer_dir.is_file():
                images = [str(infer_dir)]
            else:
                images = list(infer_dir.glob('*'))
                if not images:
                    images = infer_dir.iterdir()
            infer_data = Dataset(infer=images, mode='pil-image1', modcrop=False)
        else:
            infer_data = all_datasets[opt.infer.upper()]
    else:
        infer_data = test_data
    # TODO temp use, delete if not need
    if opt.cifar:
        cifar_data, cifar_test = tf.keras.datasets.cifar10.load_data()
        dataset = Dataset(**dataset)
        dataset.mode = 'numpy'
        dataset.train = [cifar_data[0]]
        dataset.val = [cifar_test[0]]
    return dataset, test_data, infer_data
Ejemplo n.º 4
0
def fetch_datasets(data_config_file, opt):
    all_datasets = load_datasets(data_config_file)
    dataset = all_datasets[opt.dataset.upper()]
    if opt.test:
        test_data = all_datasets[opt.test.upper()]
    else:
        test_data = dataset
    if opt.infer:
        infer_dir = Path(opt.infer)
        if infer_dir.exists():
            # infer files in this directory
            if infer_dir.is_file():
                images = [str(infer_dir)]
            else:
                images = list(infer_dir.glob('*'))
                if not images:
                    images = infer_dir.iterdir()
            infer_data = Dataset(infer=images,
                                 mode='pil-image1',
                                 modcrop=False)
        else:
            infer_data = all_datasets[opt.infer.upper()]
    else:
        infer_data = test_data
    if opt.mode:
        dataset.mode = opt.mode
        test_data.mode = opt.mode
        infer_data.mode = opt.mode
    return dataset, test_data, infer_data
Ejemplo n.º 5
0
def main():
    model = InformationDistillationNetwork(3, rgb_input=False).compile()
    dataset = load_datasets('../Data/datasets.json')['91-IMAGE']
    dataset.setattr(patch_size=48, depth=1, random=True, max_patches=64 * 300)
    env = Environment(model, f'{model.name}/save', f'{model.name}/log')
    env.fit(64, 100, dataset, restart=False, learning_rate=1e-5)
    env.output_callbacks = [save_image(f'{model.name}/test')]
    env.test(dataset)
def main():
    model = ResidualDenseNetwork(3, rdb_blocks=10, rdb_conv=6,
                                 rgb_input=False).compile()
    dataset = load_datasets('../Data/datasets.json')['BSD']
    dataset.setattr(patch_size=96, depth=1, random=True, max_patches=64 * 1)
    env = Environment(model, f'{model.name}/save', f'{model.name}/log')
    env.fit(64, 1, dataset, restart=True)
    env.output_callbacks = [save_image(f'{model.name}/test')]
    env.test(dataset)
Ejemplo n.º 7
0
def main():
    flags, args = parser.parse_known_args()
    opt = Config()
    for pair in flags._get_kwargs():
        opt.setdefault(*pair)

    data_config_file = Path(flags.data_config)
    if not data_config_file.exists():
        raise RuntimeError("dataset config file doesn't exist!")
    for _ext in ('json', 'yaml', 'yml'):  # for compat
        # apply a 2-stage (or master-slave) configuration, master can be
        # override by slave
        model_config_root = Path('Parameters/root.{}'.format(_ext))
        if opt.p:
            model_config_file = Path(opt.p)
        else:
            model_config_file = Path('Parameters/{}.{}'.format(
                opt.model, _ext))
        if model_config_root.exists():
            opt.update(Config(str(model_config_root)))
        if model_config_file.exists():
            opt.update(Config(str(model_config_file)))

    model_params = opt.get(opt.model, {})
    opt.update(model_params)
    suppress_opt_by_args(model_params, *args)
    model = get_model(flags.model)(**model_params)
    if flags.cuda:
        model.cuda()
    root = f'{flags.save_dir}/{flags.model}'
    if flags.comment:
        root += '_' + flags.comment
    verbosity = logging.DEBUG if flags.verbose else logging.INFO
    trainer = model.trainer

    datasets = load_datasets(data_config_file)
    dataset = datasets[flags.dataset.upper()]

    train_config = Config(crop=opt.train_data_crop,
                          feature_callbacks=[],
                          label_callbacks=[],
                          convert_to='rgb',
                          **opt)
    if opt.channel == 1:
        train_config.convert_to = 'gray'
    if opt.lr_decay:
        train_config.lr_schedule = lr_decay(lr=opt.lr, **opt.lr_decay)
    train_config.random_val = not opt.traced_val
    train_config.cuda = flags.cuda

    if opt.verbose:
        dump(opt)
    with trainer(model, root, verbosity, opt.pth) as t:
        if opt.seed is not None:
            t.set_seed(opt.seed)
        tloader = QuickLoader(dataset, 'train', train_config, True,
                              flags.thread)
        vloader = QuickLoader(dataset,
                              'val',
                              train_config,
                              False,
                              flags.thread,
                              batch=1,
                              crop=opt.val_data_crop,
                              steps_per_epoch=opt.val_num)
        t.fit([tloader, vloader], train_config)
        if opt.export:
            t.export(opt.export)
Ejemplo n.º 8
0
def main(*args, **kwargs):
    args = argparse.ArgumentParser()
    args.add_argument('name',
                      type=str,
                      choices=list_supported_models(),
                      help='the model name can be found in model_alias.py')
    args.add_argument('--scale',
                      type=int,
                      default=3,
                      help='scale factor, default 3')
    args.add_argument('--channel',
                      type=int,
                      default=1,
                      help='image channels, default 1')
    args.add_argument('--dataconfig',
                      type=str,
                      default='../Data/datasets.json',
                      help='the path to dataset config json file')
    args.add_argument(
        '--dataset',
        type=str,
        default='91-IMAGE',
        help=
        'specified dataset name(as described in config file, default 91-image')
    args.add_argument('--batch',
                      type=int,
                      default=64,
                      help='training batch size, default 64')
    args.add_argument('--epochs',
                      type=int,
                      default=200,
                      help='training epochs, default 200')
    args.add_argument(
        '--patch_size',
        type=int,
        default=48,
        help=
        'patch size of cropped training and validating sub-images, default 48')
    args.add_argument(
        '--strides',
        type=int,
        default=48,
        help='crop stride if random_patches is set 0, default 48')
    args.add_argument('--depth',
                      type=int,
                      default=1,
                      help='image1 depth used for video sources, default 1')
    args.add_argument(
        '--random_patches',
        type=int,
        default=0,
        help=
        'if set more than 0, use random crop to generate `random_patches` sub-image1 batches'
    )
    args.add_argument('--retrain',
                      type=int,
                      default=0,
                      help='retrain the model from scratch, default 0')
    args.add_argument('--lr',
                      type=float,
                      default=1e-4,
                      help='initial learning rate, default 1e-4')
    args.add_argument('--lr_decay',
                      type=float,
                      default=1,
                      help='learning rate decay, default 1')
    args.add_argument('--lr_decay_step',
                      type=int,
                      default=1000,
                      help='learning rate decay step')
    args.add_argument(
        '--add_noise',
        type=float,
        default=None,
        help='if not None, add noise with given stddev to input features')
    args.add_argument(
        '--add_random_noise',
        type=list,
        default=None,
        help=
        'if not None, add random noise with given stddev bound [low, high, step=1]'
    )
    args.add_argument(
        '--test',
        type=str,
        default=None,
        help='specify a dataset used to test, or use --dataset values if None')
    args.add_argument('--predict',
                      type=str,
                      default=None,
                      help='evaluate model on given files')
    args.add_argument(
        '--savedir',
        type=str,
        default='../Results',
        help='directory to save model checkpoints, default ../Results')
    args.add_argument('--output_color',
                      type=str,
                      default='RGB',
                      choices=('RGB', 'L', 'GRAY', 'Y'),
                      help='output color mode, default RGB')
    args.add_argument('--output_index',
                      type=int,
                      default=-1,
                      help='access index of model outputs to save, default -1')
    args.add_argument(
        '--export_pb',
        type=str,
        default=None,
        help=
        'if not None, specify the path that export trained model into pb format'
    )
    args.add_argument('--comment', type=str, default=None)
    args.add_argument('--custom_feature_cb', type=str, default=None)

    args = args.parse_args()
    model_args = json.load(open(f'parameters/{args.name}.json', mode='r'))

    model = get_model(args.name)(scale=args.scale,
                                 channel=args.channel,
                                 **model_args)
    dataset = load_datasets(args.dataconfig)[args.dataset.upper()]
    dataset.setattr(patch_size=args.patch_size,
                    strides=args.strides,
                    depth=args.depth)
    if args.random_patches:
        dataset.setattr(random=True,
                        max_patches=args.batch * args.random_patches)
    save_root = f'{args.savedir}/{model.name}_sc{args.scale}_c{args.channel}'
    if args.comment:
        save_root += '_' + args.comment
    with Environment(model,
                     f'{save_root}/save',
                     f'{save_root}/log',
                     feature_index=model.feature_index,
                     label_index=model.label_index) as env:
        if args.add_noise:
            env.feature_callbacks = [add_noise(args.add_noise)]
        if args.add_random_noise:
            env.feature_callbacks = [add_random_noise(*args.add_random_noise)]
        if args.custom_feature_cb:
            func = args.custom_feature_cb.split(' ')
            for f_name in func:
                env.feature_callbacks += [globals()[f_name]]
        fit_fn = partial(env.fit,
                         args.batch,
                         args.epochs,
                         dataset,
                         restart=args.retrain,
                         learning_rate=args.lr,
                         learning_rate_schedule=lr_decay(
                             'stair',
                             args.lr,
                             decay_step=args.lr_decay_step,
                             decay_rate=args.lr_decay))
        if model.channel > 1:
            fit_fn(convert_to='RGB')
            test_format = 'RGB'
        else:
            fit_fn(convery_to='GRAY')
            # use callback to generate colored images from grayscale ones
            # all models inputs is gray image1 however
            test_format = 'YUV'
            env.feature_callbacks += [to_gray()]
            env.label_callbacks = [to_gray()]
            if args.output_color == 'RGB':
                env.output_callbacks += [to_rgb()]
        if args.test:
            test_set = load_datasets(args.dataconfig)[args.test.upper()]
            test_set.setattr(patch_size=args.patch_size,
                             strides=args.strides,
                             depth=args.depth)
        else:
            test_set = dataset
        env.output_callbacks += [
            save_image(f'{save_root}/test', args.output_index)
        ]
        env.test(test_set,
                 convert_to=test_format)  # load image1 with 3 channels
        if args.predict:
            pth = Path(args.predict)
            if not pth.exists():
                raise ValueError('[Error] File path does not exist')
            images = pth.rglob('*')
            env.fi, fi_old = 0, env.fi  # upscale directly
            env.output_callbacks[-1] = save_image(f'{save_root}/output',
                                                  args.output_index)
            env.predict(images, convert_to=test_format)
            env.fi = fi_old
    if args.export_pb:
        model = get_model(args.name)(scale=args.scale, rgb_input=True)
        with Environment(model,
                         f'{save_root}/save',
                         f'{save_root}/log',
                         feature_index=model.feature_index,
                         label_index=model.label_index) as env:
            env.export(args.export_pb)
Ejemplo n.º 9
0
from VSR.Framework.Callbacks import *
from VSR.Models.DnCnn import DnCNN
from pathlib import Path


def add_noise(low=0, high=55):
    import numpy as np

    def feature_callback(feature):
        sigma = np.random.randint(low, high)
        # sigma = 25
        noise = np.random.normal(0, sigma, size=feature.shape)
        return feature + noise

    return feature_callback


if __name__ == '__main__':
    model = DnCNN(layers=20, rgb_input=False).compile()
    dataset = load_datasets('../Data/datasets.json')['BSD']
    dataset.setattr(patch_size=40, strides=40, depth=1)
    env = Environment(model, f'{model.name}/save', f'{model.name}/log', feature_index=0, label_index=0)
    env.feature_callbacks = [add_noise(0, 55)]
    env.fit(128, 100, dataset, restart=False)
    env.feature_callbacks = []
    env.output_callbacks = [lambda output, **kwargs: output[0], reduce_residual()]
    env.output_callbacks += [save_image(model.name)]
    env.predict(list(Path('srcnn').glob('*.png')))
    # env.test(dataset)
    env.export(f'{model.name}')
Ejemplo n.º 10
0
pre-process images in dataset
- cal mean and covariance
- ~random crop~
"""

import numpy as np
import tensorflow as tf

from VSR.DataLoader.Dataset import load_datasets
from VSR.DataLoader.Loader import QuickLoader, BasicLoader
from VSR.Util import FID
from VSR.Util.ImageProcess import imresize, array_to_img

try:
    DATASETS = load_datasets('./Data/datasets.json')
except FileNotFoundError:
    DATASETS = load_datasets('../Data/datasets.json')

FLAGS = tf.flags.FLAGS
SAVE = {}


def main(*args):
    for name in FLAGS.dataset:
        name = name.upper()
        d = DATASETS.get(name)
        if not d:
            tf.logging.error('Could not find ' + name)
            return
Ejemplo n.º 11
0
from VSR.Framework.Envrionment import Environment
from VSR.DataLoader.Dataset import load_datasets
from VSR.Models.Vespcn import VESPCN

if __name__ == '__main__':
    model = VESPCN(scale=3, depth=3)
    model.compile()
    datasets = load_datasets('../Data/datasets.json')
    env = Environment(model, './vespcn/save', './vespcn/log')
    env.fit(64, 100, datasets['MCL-V'], patch_size=48, strides=48, depth=3)
    env.test(datasets['MCL-V'], depth=3)
from VSR.Models.SrGan import SRGAN
from VSR.DataLoader.Dataset import load_datasets
from VSR.Framework.Envrionment import Environment
from VSR.Framework.Callbacks import *

if __name__ == '__main__':
    dataset = load_datasets('../Data/datasets.json')['DIV2K']
    dataset.setattr(patch_size=96,
                    strides=96,
                    random=True,
                    max_patches=16 * 100)
    model = SRGAN(scale=4, glayers=16, dlayers=8, vgg_layer=[2, 2], init=True)
    with Environment(model, f'../Results/{model.name}/save',
                     f'../Results/{model.name}/log') as env:
        env.fit(16, 100, dataset, learning_rate=1e-4)
        env.output_callbacks = [
            save_image(f'../Results/{model.name}/test_init')
        ]
        env.test(dataset)

    model = SRGAN(scale=4, glayers=16, dlayers=8, vgg_layer=[2, 2], init=False)
    with Environment(model, f'../Results/{model.name}/save',
                     f'../Results/{model.name}/log') as env:
        env.fit(32,
                200,
                dataset,
                learning_rate_schedule=lr_decay('stair',
                                                0.001,
                                                decay_step=1000,
                                                decay_rate=0.96))
        env.feature_callbacks = [to_gray()]
Ejemplo n.º 13
0
def main(*args, **kwargs):
    args = argparse.ArgumentParser()
    args.add_argument('name',
                      type=str,
                      choices=list_supported_models(),
                      help='the model name can be found in model_alias.py')
    # basic options
    args.add_argument('--scale',
                      type=int,
                      default=4,
                      help='scale factor, default 4')
    args.add_argument('--channel',
                      type=int,
                      default=3,
                      help='image channels, default 3')
    args.add_argument('--batch',
                      type=int,
                      default=16,
                      help='training batch size, default 16')
    args.add_argument('--epochs',
                      type=int,
                      default=200,
                      help='training epochs, default 200')
    args.add_argument('--steps_per_epoch',
                      type=int,
                      default=200,
                      help='training steps each epoch, default 200')
    args.add_argument('--retrain',
                      type=bool,
                      default=False,
                      help='retrain the model from scratch')
    # dataset options
    args.add_argument('--dataconfig',
                      type=str,
                      default='../Data/datasets.json',
                      help='the path to dataset config json file')
    args.add_argument(
        '--dataset',
        type=str,
        default='91-IMAGE',
        help=
        'specified dataset name(as described in config file, default 91-image')
    args.add_argument(
        '--patch_size',
        type=int,
        default=48,
        help=
        'patch size of cropped training and validating sub-images, default 48')
    args.add_argument('--depth',
                      type=int,
                      default=1,
                      help='image depth used for video sources, default 1')
    args.add_argument(
        '--parallel',
        type=int,
        default=1,
        help='number of cores used to load training sets in parallel')
    args.add_argument('--memory',
                      type=str,
                      default=None,
                      help='limit the memory usage. i.e. 4GB, 100MB')
    # learning options
    args.add_argument('--lr',
                      type=float,
                      default=1e-4,
                      help='initial learning rate, default 1e-4')
    args.add_argument('--lr_decay',
                      type=float,
                      default=1,
                      help='learning rate decay, default 1')
    args.add_argument('--lr_decay_step',
                      type=int,
                      default=1000,
                      help='learning rate decay step')
    # output options
    args.add_argument(
        '--test',
        type=str,
        default=None,
        help='specify a dataset used to test, or use --dataset values if None')
    args.add_argument('--predict',
                      type=str,
                      default=None,
                      help='evaluate model on given files')
    args.add_argument(
        '--savedir',
        type=str,
        default='../Results',
        help='directory to save model checkpoints, default ../Results')
    args.add_argument('--output_color',
                      type=str,
                      default='RGB',
                      choices=('RGB', 'L', 'GRAY', 'Y'),
                      help='output color mode, default RGB')
    args.add_argument('--output_index',
                      type=int,
                      default=-1,
                      help='access index of model outputs to save, default -1')
    args.add_argument(
        '--export_pb',
        type=str,
        default=None,
        help=
        'if not None, specify the path that export trained model into pb format'
    )
    args.add_argument(
        '--comment',
        type=str,
        default=None,
        help='add a suffix to output dir to distinguish each experiments')
    # callbacks
    args.add_argument(
        '--add_noise',
        type=float,
        default=None,
        help='if not None, add noise with given stddev to input features')
    args.add_argument(
        '--add_random_noise',
        type=list,
        default=None,
        help=
        'if not None, add random noise with given stddev bound [low, high, step=1]'
    )
    args.add_argument('--custom_feature_cb',
                      type=str,
                      default=None,
                      help='customized callbacks, defined in `custom_api.py`')

    args = args.parse_args()
    if Path(f'parameters/{args.name}.json').exists():
        model_args = json.load(open(f'parameters/{args.name}.json', mode='r'))
    else:
        print(
            f'[warning] model parameter file not found, use default parameters'
        )
        model_args = dict()
    model = get_model(args.name)(scale=args.scale,
                                 channel=args.channel,
                                 **model_args)

    dataset = load_datasets(args.dataconfig)[args.dataset.upper()]
    dataset.setattr(patch_size=args.patch_size, depth=args.depth)
    save_root = f'{args.savedir}/{model.name}_sc{args.scale}_c{args.channel}'
    if args.comment:
        save_root += '_' + args.comment
    with Environment(model,
                     f'{save_root}/save',
                     f'{save_root}/log',
                     feature_index=model.feature_index,
                     label_index=model.label_index) as env:
        if args.add_noise:
            env.feature_callbacks = [add_noise(args.add_noise)]
        if args.add_random_noise:
            env.feature_callbacks = [add_random_noise(*args.add_random_noise)]
        if args.custom_feature_cb:
            func = args.custom_feature_cb.split(' ')
            for f_name in func:
                env.feature_callbacks += [globals()[f_name]]
        fit_fn = partial(env.fit,
                         args.batch,
                         args.epochs,
                         args.steps_per_epoch,
                         dataset,
                         augmentation=True,
                         restart=args.retrain,
                         learning_rate=args.lr,
                         learning_rate_schedule=lr_decay(
                             'stair',
                             args.lr,
                             decay_step=args.lr_decay_step,
                             decay_rate=args.lr_decay),
                         parallel=args.parallel,
                         memory_usage=args.memory)
        if model.channel > 1:
            fit_fn(convert_to='RGB')
            test_format = 'RGB'
        else:
            fit_fn(convery_to='GRAY')
            # use callback to generate colored images from grayscale ones
            # all models inputs is gray image however
            test_format = 'YUV'
            env.feature_callbacks += [to_gray()]
            env.label_callbacks = [to_gray()]
            if args.output_color == 'RGB':
                env.output_callbacks += [to_rgb()]
        if args.test:
            test_set = load_datasets(args.dataconfig)[args.test.upper()]
            test_set.setattr(patch_size=args.patch_size, depth=args.depth)
        else:
            test_set = dataset
        env.output_callbacks += [
            save_image(f'{save_root}/test', args.output_index)
        ]
        env.test(test_set,
                 convert_to=test_format)  # load image with 3 channels
        if args.predict:
            pth = Path(args.predict)
            if not pth.exists():
                raise ValueError('[Error] File path does not exist')
            if pth.is_dir():
                images = list(pth.glob('*'))
                if not images:
                    images = pth.iterdir()
            elif pth.is_file():
                images = pth
            env.fi, fi_old = 0, env.fi  # upscale directly
            env.output_callbacks[-1] = save_image(f'{save_root}/output',
                                                  args.output_index)
            env.predict(images, convert_to=test_format, depth=args.depth)
            env.fi = fi_old
        elif test_set.pred:
            env.fi, fi_old = 0, env.fi  # upscale directly
            env.output_callbacks[-1] = save_image(f'{save_root}/output',
                                                  args.output_index)
            env.predict(test_set.pred,
                        convert_to=test_format,
                        depth=args.depth)
            env.fi = fi_old
        if args.export_pb:
            env.export(args.export_pb)
Ejemplo n.º 14
0
Calculate metrics for outputs and labels:
- PSNR
- SSIM
"""

from pathlib import Path
import numpy as np
import tensorflow as tf

from VSR.DataLoader.Dataset import load_datasets, Dataset
from VSR.DataLoader.Loader import BasicLoader
from VSR.Util.ImageProcess import rgb_to_yuv
from VSR.Util.Config import Config

try:
    DATASETS = load_datasets('./Data/datasets.yaml')
except FileNotFoundError:
    DATASETS = load_datasets('../Data/datasets.yaml')

tf.flags.DEFINE_string("input_dir", None, "images to test")
tf.flags.DEFINE_string("dataset", None, "dataset to compare")
tf.flags.DEFINE_bool("no_ssim", False, "disable ssim metric")
tf.flags.DEFINE_bool("no_psnr", False, "disable psnr metric")
tf.flags.DEFINE_bool("l_only", False, "compute luminance only")
tf.flags.DEFINE_string(
    "l_standard", "matlab",
    "yuv convertion standard, either 'bt601', 'bt709' or 'matlab'")
tf.flags.DEFINE_integer("shave", 0, "shave border pixels")
tf.flags.DEFINE_integer("clip", -1, "depth of a clip, -1 includes all frames")
tf.flags.DEFINE_integer("offset", 0, "skip n files in the dataset")
Ejemplo n.º 15
0
def main():
  flags, args = parser.parse_known_args()
  opt = Config()
  for pair in flags._get_kwargs():
    opt.setdefault(*pair)
  data_config_file = Path(flags.data_config)
  if not data_config_file.exists():
    raise RuntimeError("dataset config file doesn't exist!")
  for _ext in ('json', 'yaml', 'yml'):  # for compat
    # apply a 2-stage (or master-slave) configuration, master can be
    # override by slave
    model_config_root = Path('Parameters/root.{}'.format(_ext))
    if opt.p:
      model_config_file = Path(opt.p)
    else:
      model_config_file = Path('Parameters/{}.{}'.format(opt.model, _ext))
    if model_config_root.exists():
      opt.update(Config(str(model_config_root)))
    if model_config_file.exists():
      opt.update(Config(str(model_config_file)))

  model_params = opt.get(opt.model, {})
  suppress_opt_by_args(model_params, *args)
  opt.update(model_params)
  model = get_model(flags.model)(**model_params)
  if flags.cuda:
    model.cuda()
  root = f'{flags.save_dir}/{flags.model}'
  if flags.comment:
    root += '_' + flags.comment
  verbosity = logging.DEBUG if flags.verbose else logging.INFO
  trainer = model.trainer

  datasets = load_datasets(data_config_file)
  try:
    test_datas = [datasets[t.upper()] for t in flags.test]
    run_benchmark = True
  except KeyError:
    test_datas = []
    for pattern in flags.test:
      test_data = Dataset(test=_glob_absolute_pattern(pattern),
                          mode='pil-image1', modcrop=False)
      father = Path(flags.test)
      while not father.is_dir():
        if father.parent == father:
          break
        father = father.parent
      test_data.name = father.stem
      test_datas.append(test_data)
    run_benchmark = False

  if opt.verbose:
    dump(opt)
  for test_data in test_datas:
    loader_config = Config(convert_to='rgb',
                           feature_callbacks=[], label_callbacks=[],
                           output_callbacks=[], **opt)
    loader_config.batch = 1
    loader_config.subdir = test_data.name
    loader_config.output_callbacks += [
      save_image(root, flags.output_index, flags.auto_rename)]
    if opt.channel == 1:
      loader_config.convert_to = 'gray'

    with trainer(model, root, verbosity, flags.pth) as t:
      if flags.seed is not None:
        t.set_seed(flags.seed)
      loader = QuickLoader(test_data, 'test', loader_config,
                           n_threads=flags.thread)
      loader_config.epoch = flags.epoch
      if run_benchmark:
        t.benchmark(loader, loader_config)
      else:
        t.infer(loader, loader_config)
Ejemplo n.º 16
0
from VSR.Util.Utility import Vgg
from VSR.DataLoader.Dataset import load_datasets
from VSR.DataLoader.Loader import BatchLoader

import tensorflow as tf
import numpy as np

data = load_datasets('../Data/datasets.json')['91-IMAGE']
loader = BatchLoader(1,
                     data,
                     'test',
                     convert_to_gray=True,
                     crop=False,
                     scale=1)

m = Vgg(input_shape=[None, None, 3], type='vgg19')
with tf.Session() as sess:
    # m = Vgg(input_shape=[None, None, 3], type='vgg19')
    for hr, lr in loader:
        y = m(hr, [2, 3], [2, 3])
        break

tf.reset_default_graph()
m = Vgg(input_shape=[None, None, 3], type='vgg19')
with tf.Session() as sess:
    # m = Vgg(input_shape=[None, None, 3], type='vgg19')
    for hr, lr in loader:
        y = m(hr, [2, 3], [2, 3])
        break

# Error for not reusing VGG variables
Ejemplo n.º 17
0
import os

if not os.getcwd().endswith('UTest'):
    os.chdir('UTest')
from VSR.DataLoader.Dataset import _glob_absolute_pattern, load_datasets

DATASETS = load_datasets('./data/fake_datasets.yml')


def test_glob_absolute_pattern():
    URL = './data/set5_x2'
    node = _glob_absolute_pattern(URL)
    assert len(node) == 5
    assert node[0].match('img_001_SRF_2_LR.png')
    assert node[1].match('img_002_SRF_2_LR.png')
    assert node[2].match('img_003_SRF_2_LR.png')
    assert node[3].match('img_004_SRF_2_LR.png')
    assert node[4].match('img_005_SRF_2_LR.png')

    URL = './data/flying_chair/**/*.flo'
    node = _glob_absolute_pattern(URL)
    assert len(node) == 1
    assert node[0].match('0000.flo')

    URL = './data/**/*.png'
    node = _glob_absolute_pattern(URL)
    assert len(node) == 10


def test_existence():
    _K = DATASETS.keys()
Ejemplo n.º 18
0
def main(*args, **kwargs):
    args = argparse.ArgumentParser()
    args.add_argument('name', type=str, choices=list_supported_models(),
                      help='the model name can be found in model_alias.py')
    args.add_argument('--scale', type=int, default=3, help='scale factor')
    args.add_argument('--dataconfig', type=str, default='../Data/datasets.json',
                      help='the path to dataset config json file')
    args.add_argument('--dataset', type=str, default='91-IMAGE',
                      help='specified dataset name(as described in config file')
    args.add_argument('--batch', type=int, default=64, help='training batch size')
    args.add_argument('--epochs', type=int, default=200, help='training epochs')
    args.add_argument('--patch_size', type=int, default=48,
                      help='patch size of cropped training and validating sub-images')
    args.add_argument('--strides', type=int, default=48, help='crop stride if random_patches is set 0')
    args.add_argument('--depth', type=int, default=1, help='image1 depth used for video sources')
    args.add_argument('--random_patches', type=int, default=0,
                      help='if set more than 0, use random crop to generate `random_patches` sub-image1 batches')
    args.add_argument('--retrain', type=int, default=0, help='retrain the model from scratch')
    args.add_argument('--lr', type=float, default=1e-4, help='initial learning rate')
    args.add_argument('--lr_decay', type=float, default=1, help='learning rate decay')
    args.add_argument('--lr_decay_step', type=int, default=1000, help='learning rate decay step')
    args.add_argument('--add_noise', type=float, default=None,
                      help='if not None, add noise with given stddev to input features')
    args.add_argument('--add_random_noise', type=list, default=None,
                      help='if not None, add random noise with given stddev bound [low, high, step=1]')
    args.add_argument('--test', type=bool, default=True, help='test model and save tested images')
    args.add_argument('--savedir', type=str, default='../Results', help='directory to save model checkpoints')
    args.add_argument('--output_color', type=str, default='RGB', choices=('RGB', 'L', 'GRAY', 'Y'),
                      help='output color mode')
    args.add_argument('--export_pb', type=str, default=None,
                      help='if not None, specify the path that export trained model into pb format')

    args = args.parse_args()
    model_args = json.load(open(f'parameters/{args.name}.json', mode='r'))

    model = get_model(args.name)(scale=args.scale, **model_args)
    dataset = load_datasets(args.dataconfig)[args.dataset.upper()]
    dataset.setattr(patch_size=args.patch_size, strides=args.strides, depth=args.depth)
    if args.random_patches:
        dataset.setattr(random=True, max_patches=args.batch * args.random_patches)
    with Environment(model, f'{args.savedir}/{model.name}/save', f'{args.savedir}/{model.name}/log',
                     feature_index=model.feature_index, label_index=model.label_index) as env:
        if args.add_noise:
            env.feature_callbacks = [add_noise(args.add_noise)]
        if args.add_random_noise:
            env.feature_callbacks = [add_random_noise(*args.add_random_noise)]

        env.fit(args.batch, args.epochs, dataset, restart=args.retrain,
                learning_rate=args.lr,
                learning_rate_schedule=lr_decay('stair', args.lr,
                                                decay_step=args.lr_decay_step,
                                                decay_rate=args.lr_decay))
        if args.test:
            # use callback to generate colored images from grayscale ones
            # all models inputs is gray image1 however
            env.feature_callbacks += [to_gray()]
            env.label_callbacks = [to_gray()]
            if args.output_color == 'RGB':
                env.output_callbacks += [to_rgb()]
            env.output_callbacks += [save_image(f'../Results/{model.name}/test')]
            env.test(dataset, convert_to_gray=False)  # load image1 with 3 channels
    if args.export_pb:
        model = get_model(args.name)(scale=args.scale, rgb_input=True)
        with Environment(model, f'../Results/{model.name}/save', f'../Results/{model.name}/log') as env:
            env.export(args.export_pb)