print(out_fname)

        print(np.min(out_file[..., 0]), np.max(out_file[..., 0]))

        Image.fromarray(out_file.astype(np.uint8)).save(out_fname)

if __name__ == '__main__':
  args = get_arguments()
  params = Params()
  params = load_json_to_params(params, args.json_path)
  params.num_steps_predict = params.num_steps_eval
  params.save_predictions = args.save_predictions
  params.save_dir = args.save_dir

  params.is_training = False
  params.batch_norm_istraining = False
  params.height_input = 512
  params.width_input = 1024
  params.height_orig = 604
  params.width_orig = 960
  params.Nb = 1

  filenames_list = list()
  for file in os.listdir(args.image_dir):
    if file.endswith(".png") or file.endswith(".jpg"):
      filenames_list.append(os.path.join(args.image_dir, file))

  predict(params, filenames_list)


      # Create the Timeline object, and write it to a json file
      fetched_timeline = timeline.Timeline(run_metadata.step_stats)
      chrome_trace = fetched_timeline.generate_chrome_trace_format()
      with open(os.path.join(params.log_dir, 'timeline_01.json'), 'w') as f:
        f.write(chrome_trace)
      with open(os.path.join(params.log_dir, 'mem_info.json'), 'w') as f:
        f.write(str(run_metadata))

    else:
      loss_value, _ = sess.run([loss, train_op], feed_dict=feed_dict)

    if step % params.ckpt_save_steps == 0:
      model.save(saver, sess, params.log_dir, step)

    duration = time.time() - start_time
    print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))

  coord.request_stop()
  coord.join(threads)

if __name__ == '__main__':
  args = get_arguments()
  params = Params()
  params = load_json_to_params(params, args.json_path)
  params.dataset_directory = '/home/ddegeus/datasets/Cityscapes/training/'
  params.filelist_filepath = '/home/ddegeus/datasets/Cityscapes/training/panoptic/filenames.lst'

  params.is_training = True
  params.batch_norm_istraining = True
  print(params)
  train(params)