Exemplo n.º 1
0
def init_loader_config(opt):
    train_config = Config(**opt, crop='random', feature_callbacks=[], label_callbacks=[])
    benchmark_config = Config(**opt, crop=None, feature_callbacks=[], label_callbacks=[], output_callbacks=[])
    infer_config = Config(**opt, feature_callbacks=[], label_callbacks=[], output_callbacks=[])
    benchmark_config.batch = opt.test_batch or 1
    benchmark_config.steps_per_epoch = -1
    if opt.channel == 1:
        train_config.convert_to = 'gray'
        benchmark_config.convert_to = 'gray'
        if opt.output_color == 'RGB':
            benchmark_config.convert_to = 'yuv'
            benchmark_config.feature_callbacks = train_config.feature_callbacks + [to_gray()]
            benchmark_config.label_callbacks = train_config.label_callbacks + [to_gray()]
            benchmark_config.output_callbacks = [to_rgb()]
        benchmark_config.output_callbacks += [save_image(opt.root, opt.output_index)]
        infer_config.update(benchmark_config)
    else:
        train_config.convert_to = 'rgb'
        benchmark_config.convert_to = 'rgb'
        benchmark_config.output_callbacks += [save_image(opt.root, opt.output_index)]
        infer_config.update(benchmark_config)
    if opt.add_custom_callbacks is not None:
        for fn in opt.add_custom_callbacks:
            train_config.feature_callbacks += [globals()[fn]]
            benchmark_config.feature_callbacks += [globals()[fn]]
            infer_config.feature_callbacks += [globals()[fn]]
    if opt.lr_decay:
        train_config.lr_schedule = lr_decay(lr=opt.lr, **opt.lr_decay)
    # modcrop: A boolean to specify whether to crop the edge of images to be divisible
    #          by `scale`. It's useful when to provide batches with original shapes.
    infer_config.modcrop = False
    return train_config, benchmark_config, infer_config
Exemplo n.º 2
0
def main():
    model = InformationDistillationNetwork(3, rgb_input=False).compile()
    dataset = load_datasets('../Data/datasets.json')['91-IMAGE']
    dataset.setattr(patch_size=48, depth=1, random=True, max_patches=64 * 300)
    env = Environment(model, f'{model.name}/save', f'{model.name}/log')
    env.fit(64, 100, dataset, restart=False, learning_rate=1e-5)
    env.output_callbacks = [save_image(f'{model.name}/test')]
    env.test(dataset)
def main():
    model = ResidualDenseNetwork(3, rdb_blocks=10, rdb_conv=6,
                                 rgb_input=False).compile()
    dataset = load_datasets('../Data/datasets.json')['BSD']
    dataset.setattr(patch_size=96, depth=1, random=True, max_patches=64 * 1)
    env = Environment(model, f'{model.name}/save', f'{model.name}/log')
    env.fit(64, 1, dataset, restart=True)
    env.output_callbacks = [save_image(f'{model.name}/test')]
    env.test(dataset)
Exemplo n.º 4
0
def main():
  flags, args = parser.parse_known_args()
  opt = Config()
  for pair in flags._get_kwargs():
    opt.setdefault(*pair)
  data_config_file = Path(flags.data_config)
  if not data_config_file.exists():
    raise RuntimeError("dataset config file doesn't exist!")
  for _ext in ('json', 'yaml', 'yml'):  # for compat
    # apply a 2-stage (or master-slave) configuration, master can be
    # override by slave
    model_config_root = Path('Parameters/root.{}'.format(_ext))
    if opt.p:
      model_config_file = Path(opt.p)
    else:
      model_config_file = Path('Parameters/{}.{}'.format(opt.model, _ext))
    if model_config_root.exists():
      opt.update(Config(str(model_config_root)))
    if model_config_file.exists():
      opt.update(Config(str(model_config_file)))

  model_params = opt.get(opt.model, {})
  suppress_opt_by_args(model_params, *args)
  opt.update(model_params)
  model = get_model(flags.model)(**model_params)
  if flags.cuda:
    model.cuda()
  root = f'{flags.save_dir}/{flags.model}'
  if flags.comment:
    root += '_' + flags.comment
  verbosity = logging.DEBUG if flags.verbose else logging.INFO
  trainer = model.trainer

  datasets = load_datasets(data_config_file)
  try:
    test_datas = [datasets[t.upper()] for t in flags.test]
    run_benchmark = True
  except KeyError:
    test_datas = []
    for pattern in flags.test:
      test_data = Dataset(test=_glob_absolute_pattern(pattern),
                          mode='pil-image1', modcrop=False)
      father = Path(flags.test)
      while not father.is_dir():
        if father.parent == father:
          break
        father = father.parent
      test_data.name = father.stem
      test_datas.append(test_data)
    run_benchmark = False

  if opt.verbose:
    dump(opt)
  for test_data in test_datas:
    loader_config = Config(convert_to='rgb',
                           feature_callbacks=[], label_callbacks=[],
                           output_callbacks=[], **opt)
    loader_config.batch = 1
    loader_config.subdir = test_data.name
    loader_config.output_callbacks += [
      save_image(root, flags.output_index, flags.auto_rename)]
    if opt.channel == 1:
      loader_config.convert_to = 'gray'

    with trainer(model, root, verbosity, flags.pth) as t:
      if flags.seed is not None:
        t.set_seed(flags.seed)
      loader = QuickLoader(test_data, 'test', loader_config,
                           n_threads=flags.thread)
      loader_config.epoch = flags.epoch
      if run_benchmark:
        t.benchmark(loader, loader_config)
      else:
        t.infer(loader, loader_config)