Ejemplo n.º 1
0
def maybe_pick_models_to_evaluate(checkpoint_dir):
    """Pick a checkpoint to evaluate that has not been evaluated already."""
    logging.info("Picking checkpoint to evaluate from %s.", checkpoint_dir)

    filenames = gfile.listdir(checkpoint_dir)
    filenames = [f[:-5] for f in filenames if f[-5:] == ".meta"]
    logging.info("Found existing checkpoints: %s", filenames)

    evaluated_filenames = []
    if gfile.exists(os.path.join(checkpoint_dir, EVAL_FILENAME)):
        with gfile.GFile(os.path.join(checkpoint_dir, EVAL_FILENAME),
                         "r") as f:
            evaluated_filenames = [
                l.strip().split(",")[0] for l in f.readlines()
            ]
        logging.info("Found already evaluated checkpoints: %s",
                     evaluated_filenames)

    checkpoints_to_evaluate = [
        f for f in filenames if f not in evaluated_filenames
    ]
    logging.info("Remaining potential checkpoints: %s",
                 checkpoints_to_evaluate)

    if checkpoints_to_evaluate:
        return os.path.join(checkpoint_dir, checkpoints_to_evaluate[0])
    else:
        return None
Ejemplo n.º 2
0
  def begin(self):
    self._global_step_tensor = tf.train.get_global_step()
    if self._global_step_tensor is None:
      raise RuntimeError('Global step should be created to use PlottingHook.')

    if not gfile.exists(self._logdir):
      gfile.makedirs(self._logdir)
Ejemplo n.º 3
0
def write_eval_results(checkpoint_dir,
                       all_gen_sentences,
                       checkpoint_name,
                       mean_train_prob,
                       mean_valid_prob,
                       mean_gen_prob,
                       fid,
                       eval_filename=None):
    """Write evaluation results to disk."""
    if eval_filename is None:
        eval_filename = EVAL_FILENAME
    to_write = ",".join(
        map(str, [
            checkpoint_name, mean_train_prob, mean_valid_prob, mean_gen_prob,
            fid
        ]))
    eval_filepath = os.path.join(checkpoint_dir, eval_filename)
    previous_eval_content = ""
    if gfile.exists(eval_filepath):
        with gfile.GFile(eval_filepath, "r") as f:
            previous_eval_content = f.read()
    with gfile.GFile(eval_filepath, "w") as f:
        f.write(previous_eval_content + to_write + "\n")

    if all_gen_sentences is not None:
        with gfile.GFile(
                os.path.join(checkpoint_dir,
                             checkpoint_name + "_sentences.txt"), "w") as f:
            f.write("\n".join(all_gen_sentences))
Ejemplo n.º 4
0
def select_ppl_results(checkpoint_dir,
                       valid_ppl,
                       valid_tokens,
                       test_ppl,
                       test_tokens,
                       eval_filename=None):
    """Write evaluation results to disk."""
    if eval_filename is None:
        eval_filename = 'ppl.csv'
    eval_filepath = os.path.join(checkpoint_dir, eval_filename)
    previous_eval_content = ""
    if gfile.exists(eval_filepath):
        with gfile.GFile(eval_filepath, "r") as f:
            previous_eval_content = f.read()

    best_ppl = None
    best_checkpoint = None
    for line in previous_eval_content.strip().split('\n'):
        fields = line.strip().split(',')
        if fields:
            ppl = float(fields[1])
            if best_ppl is None or ppl < best_ppl:
                best_ppl = ppl
                best_checkpoint = fields[0]
    return best_checkpoint, best_ppl
Ejemplo n.º 5
0
def load_and_drop_stream(data_file,
                         kb_file,
                         drop_incorrect=True,
                         verbose=False):
  """ this function filter incorrect samples without standardization."""
  if verbose:
    print('loading stream')
  fin_data = gfile.GFile(data_file)
  if gfile.exists(kb_file):
    fin_kb = gfile.GFile(kb_file)
  else:
    fin_kb = None
  if verbose:
    print('gfile loaded: ', fin_data)
  for line1 in fin_data:
    if verbose:
      print(line1)
    if len(line1.strip()) < 10:
      continue
    line1 = delete_non_ascii(line1)
    data_obj = json.loads(line1)

    if fin_kb:
      line2 = fin_kb.readline()
      if len(line2.strip()) < 10:
        continue
      line2 = delete_non_ascii(line2)
      kb_obj = json.loads(line2)
    else:
      kb_obj = None
    if (not drop_incorrect) or (
        'correct_sample' not in data_obj) or data_obj['correct_sample']:
      yield data_obj, kb_obj
Ejemplo n.º 6
0
def inference_data_loader(
    input_lr_dir,
    input_hr_dir=None,
    input_dir_len=-1,
):
    """Inference pipeline data loader."""
    filedir = input_lr_dir
    down_sp = False
    if (input_lr_dir is None) or (not gfile.exists(input_lr_dir)):
        if (input_hr_dir is None) or (not gfile.exists(input_hr_dir)):
            raise ValueError('Input directory not found')
        filedir = input_hr_dir
        down_sp = True

    image_list_lr_temp = gfile.listdir(filedir)
    image_list_lr_temp = [_ for _ in image_list_lr_temp if _.endswith('.png')]
    image_list_lr_temp = sorted(
        image_list_lr_temp
    )  # first sort according to abc, then sort according to 123
    image_list_lr_temp.sort(
        key=lambda f: int(''.join(list(filter(str.isdigit, f))) or -1))
    if input_dir_len > 0:
        image_list_lr_temp = image_list_lr_temp[:input_dir_len]

    image_list_lr = [os.path.join(filedir, _) for _ in image_list_lr_temp]

    # Read in and preprocess the images
    def preprocess_test(name):

        with tf.gfile.Open(name, 'rb') as fid:
            raw_im = np.asarray(bytearray(fid.read()), dtype=np.uint8)
            im = cv2.imdecode(raw_im,
                              cv2.IMREAD_COLOR).astype(np.float32)[:, :, ::-1]

        if down_sp:
            icol_blur = cv2.GaussianBlur(im, (0, 0), sigmaX=1.5)
            im = icol_blur[::4, ::4, ::]
        im = im / 255.0
        return im

    image_lr = [preprocess_test(_) for _ in image_list_lr]
    image_list_lr = image_list_lr[5:0:-1] + image_list_lr
    image_lr = image_lr[5:0:-1] + image_lr

    Data = collections.namedtuple('Data', 'paths_LR, inputs')
    return Data(paths_LR=image_list_lr, inputs=image_lr)
Ejemplo n.º 7
0
def write_ppl_results(checkpoint_dir,
                      checkpoint_name,
                      valid_ppl,
                      valid_tokens,
                      test_ppl,
                      test_tokens,
                      eval_filename=None):
    """Write evaluation results to disk."""
    if eval_filename is None:
        eval_filename = 'ppl.csv'
    to_write = ",".join(
        map(str,
            [checkpoint_name, valid_ppl, valid_tokens, test_ppl, test_tokens]))
    eval_filepath = os.path.join(checkpoint_dir, eval_filename)
    previous_eval_content = ""
    if gfile.exists(eval_filepath):
        with gfile.GFile(eval_filepath, "r") as f:
            previous_eval_content = f.read()
    with gfile.GFile(eval_filepath, "w") as f:
        f.write(previous_eval_content + to_write + "\n")
Ejemplo n.º 8
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')
    if not FLAGS.input_ply:
        raise IOError('--input_ply must be specified.')
    if not FLAGS.output_ply:
        FLAGS.output_ply = FLAGS.input_ply.replace('.ply', '.reconstruct.ply')

    # load point cloud from ply file
    v, n = pu.read_point_ply(FLAGS.input_ply)

    # check if part size is too large
    min_bb = np.min(np.max(v, axis=0) - np.min(v, axis=0))
    if FLAGS.part_size > 0.25 * min_bb:
        warnings.warn(
            'WARNING: part_size seems too large. Recommend using a part_size < '
            '{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning)

    surface_points = np.concatenate([v, n], axis=1)
    near_surface_samples = rec.get_in_out_from_ray(surface_points,
                                                   sample_factor=10,
                                                   std=0.01)

    xmin = np.min(surface_points[:, :3], 0)
    xmax = np.max(surface_points[:, :3], 0)

    # add some extra slack to xmin and xmax
    xmin -= FLAGS.part_size
    xmax += FLAGS.part_size

    if FLAGS.res_per_part == 0:
        res_per_part = int(64 * FLAGS.part_size)
    else:
        res_per_part = FLAGS.res_per_part
    npts = min(near_surface_samples.shape[0], FLAGS.npoints) - 1

    print('Performing latent grid optimization...')
    v, f, _, _ = rec.encode_decoder_one_scene(near_surface_samples,
                                              FLAGS.ckpt_dir,
                                              FLAGS.part_size,
                                              overlap=True,
                                              indep_pt_loss=True,
                                              init_std=FLAGS.init_std,
                                              xmin=xmin,
                                              xmax=xmax,
                                              res_per_part=res_per_part,
                                              npts=npts,
                                              steps=FLAGS.steps)

    out_dir = os.path.dirname(FLAGS.output_ply)
    if out_dir and not gfile.exists(out_dir):
        gfile.makedirs(out_dir)
    mesh = trimesh.Trimesh(v, f)

    if FLAGS.postprocess:
        print('Postprocessing generated mesh...')
        mesh = postprocess.remove_backface(mesh, surface_points)

    print('Writing reconstructed mesh to {}'.format(FLAGS.output_ply))
    with gfile.GFile(FLAGS.output_ply, 'wb') as fh:
        mesh.export(fh, 'ply')
Ejemplo n.º 9
0
 def done_file_exists():
   return gfile.exists(os.path.join(output_dir, '__done__'))
Ejemplo n.º 10
0
  def export(self, path, session, overwrite=False):
    """Build the TF-Hub spec, module and sync ops."""

    method_specs = {}

    def module_fn():
      """A module_fn for use with hub.create_module_spec()."""
      # We will use a copy of the original object to build the graph.
      wrapped_object = self._object_factory()

      for method_name, method_info in self._captured_calls.items():
        captured_inputs, captured_specs = method_info
        tensor_inputs = nest.map_structure(_to_placeholder, captured_inputs)
        method_to_call = getattr(wrapped_object, method_name)
        tensor_outputs = method_to_call(**tensor_inputs)

        flat_tensor_inputs = nest.flatten(tensor_inputs)
        flat_tensor_inputs = {
            str(k): v for k, v in zip(
                range(len(flat_tensor_inputs)), flat_tensor_inputs)
        }
        flat_tensor_outputs = nest.flatten(tensor_outputs)
        flat_tensor_outputs = {
            str(k): v for k, v in zip(
                range(len(flat_tensor_outputs)), flat_tensor_outputs)
        }

        method_specs[method_name] = dict(
            specs=captured_specs,
            inputs=nest.map_structure(lambda _: None, tensor_inputs),
            outputs=nest.map_structure(lambda _: None, tensor_outputs))

        signature_name = ("default"
                          if method_name == "__call__" else method_name)
        hub.add_signature(signature_name, flat_tensor_inputs,
                          flat_tensor_outputs)

      hub.attach_message(
          "methods", tf.train.BytesList(value=[pickle.dumps(method_specs)]))
      hub.attach_message(
          "properties",
          tf.train.BytesList(value=[pickle.dumps(self._captured_attrs)]))

    # Create the spec that will be later used in export.
    hub_spec = hub.create_module_spec(module_fn, drop_collections=["sonnet"])

    # Get variables values
    module_weights = [
        session.run(v) for v in self._wrapped_object.get_all_variables()
    ]

    # create the sync ops
    with tf.Graph().as_default():
      hub_module = hub.Module(hub_spec, trainable=True, name="hub")

      assign_ops = []
      assign_phs = []
      for _, v in sorted(hub_module.variable_map.items()):
        ph = tf.placeholder(shape=v.shape, dtype=v.dtype)
        assign_phs.append(ph)
        assign_ops.append(tf.assign(v, ph))

      with tf.Session() as module_session:
        module_session.run(tf.local_variables_initializer())
        module_session.run(tf.global_variables_initializer())
        module_session.run(
            assign_ops, feed_dict=dict(zip(assign_phs, module_weights)))

        if overwrite and gfile.exists(path):
          gfile.rmtree(path)
        gfile.makedirs(path)
        hub_module.export(path, module_session)
def inference(
    input_lr_dir,
    input_hr_dir,
    input_dir_len,
    num_resblock,
    vsr_scale,
    checkpoint_path,
    output_dir,
    output_pre,
    output_name,
    output_ext,
):
  """Main inference function."""
  if checkpoint_path is None:
    raise ValueError('The checkpoint file is needed to performing the test.')

  # Declare the test data reader
  inference_data = inference_data_loader(input_lr_dir, input_hr_dir,
                                         input_dir_len)
  input_shape = [
      1,
  ] + list(inference_data.inputs[0].shape)
  output_shape = [1, input_shape[1] * vsr_scale, input_shape[2] * vsr_scale, 3]
  oh = input_shape[1] - input_shape[1] // 8 * 8
  ow = input_shape[2] - input_shape[2] // 8 * 8
  paddings = tf.constant([[0, 0], [0, oh], [0, ow], [0, 0]])
  print('input shape:', input_shape)
  print('output shape:', output_shape)

  # build the graph
  inputs_raw = tf.placeholder(tf.float32, shape=input_shape, name='inputs_raw')

  pre_inputs = tf.Variable(
      tf.zeros(input_shape), trainable=False, name='pre_inputs')
  pre_gen = tf.Variable(tf.zeros(output_shape), trainable=False, name='pre_gen')
  pre_warp = tf.Variable(
      tf.zeros(output_shape), trainable=False, name='pre_warp')

  transpose_pre = tf.space_to_depth(pre_warp, vsr_scale)
  inputs_all = tf.concat((inputs_raw, transpose_pre), axis=-1)
  with tf.variable_scope('generator'):
    gen_output = generator_f(
        inputs_all, 3, num_resblock, vsr_scale, reuse=False)
    # Deprocess the images outputed from the model, and assign things for next
    # frame
    with tf.control_dependencies([tf.assign(pre_inputs, inputs_raw)]):
      outputs = tf.assign(pre_gen, ops.deprocess(gen_output))

  inputs_frames = tf.concat((pre_inputs, inputs_raw), axis=-1)
  with tf.variable_scope('fnet'):
    gen_flow_lr = fnet(inputs_frames, reuse=False)
    gen_flow_lr = tf.pad(gen_flow_lr, paddings, 'SYMMETRIC')

    deconv_flow = gen_flow_lr
    deconv_flow = ops.conv2_tran(
        deconv_flow, 3, 64, 2, scope='deconv_flow_tran1')
    deconv_flow = tf.nn.relu(deconv_flow)
    deconv_flow = ops.conv2_tran(
        deconv_flow, 3, 64, 2, scope='deconv_flow_tran2')
    deconv_flow = tf.nn.relu(deconv_flow)
    deconv_flow = ops.conv2(deconv_flow, 3, 2, 1, scope='deconv_flow_conv')
    gen_flow = ops.upscale_x(gen_flow_lr * 4.0, scale=vsr_scale)
    gen_flow = deconv_flow + gen_flow

    gen_flow.set_shape(output_shape[:-1] + [2])
  pre_warp_hi = tfa.image.dense_image_warp(pre_gen, gen_flow)
  pre_warp_hi = pre_warp_hi + extract_detail_ops(pre_warp_hi)
  before_ops = tf.assign(pre_warp, pre_warp_hi)

  print('Finish building the network')

  if FLAGS.use_ema:
    moving_average_decay = 0.99
    global_step = tf.train.get_or_create_global_step()
    ema = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)
    ema_vars = _get_ema_vars()

  # In inference time, we only need to restore the weight of the generator
  var_list = tf.trainable_variables()

  restore_vars_dict = {}
  if FLAGS.use_ema:
    for v in var_list:
      if re.match(v.name, '.*global_step.*'):
        restore_vars_dict[v.name[:-2]] = v
      else:
        restore_vars_dict[v.name[:-2] + '/ExponentialMovingAverage'] = v
  else:
    restore_vars_dict = var_list

  weight_initiallizer = tf.train.Saver(restore_vars_dict)

  # Define the initialization operation
  init_op = tf.global_variables_initializer()
  local_init_op = tf.local_variables_initializer()

  config = tf.ConfigProto()
  config.gpu_options.allow_growth = True
  if not gfile.exists(output_dir):
    gfile.mkdir(output_dir)
  if not output_pre:
    image_dir = output_dir
  else:
    image_dir = os.path.join(output_dir, output_pre)
  if not gfile.exists(image_dir):
    gfile.mkdir(image_dir)

  with tf.Session(config=config) as sess:
    # Load the pretrained model
    sess.run(init_op)
    sess.run(local_init_op)

    print('Loading weights from ckpt model')
    weight_initiallizer.restore(sess, checkpoint_path)
    max_iter = len(inference_data.inputs)

    srtime = 0
    print('Frame evaluation starts!!')
    for i in range(max_iter):
      input_im = np.array([inference_data.inputs[i]]).astype(np.float32)
      feed_dict = {inputs_raw: input_im}
      t0 = time.time()
      if i != 0:
        sess.run(before_ops, feed_dict=feed_dict)
      output_frame = sess.run(outputs, feed_dict=feed_dict)
      srtime += time.time() - t0

      if i >= 5:
        name, _ = os.path.splitext(
            os.path.basename(str(inference_data.paths_LR[i])))
        filename = output_name + '_' + name
        out_path = os.path.join(image_dir, '%s.%s' % (filename, output_ext))
        print('saving image %s' % out_path)
        with tf.gfile.Open(out_path, 'wb') as image_file:
          img = np.clip(output_frame[0] * 255.0, 0, 255).astype(np.uint8)
          _, buff = cv2.imencode('.png', img[:, :, ::-1])
          image_file.write(buff.tostring())

      else:
        print('Warming up %d' % (5 - i))
  tf.reset_default_graph()
  print('total time ' + str(srtime) + ', frame number ' + str(max_iter))