Example #1
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  root_out = FLAGS.occnet_dir + '/extracted'
  if not file_util.exists(root_out):
    file_util.mkdir(root_out)

  if FLAGS.write_metrics:
    # TODO(ldif-user): Set up your own pipeline runner
    # TODO(ldif-user) Replace lambda x: None with a proto reader.
    with beam.Pipeline() as p:
      protos = p | 'ReadResults' >> (lambda x: None)

      with_metrics = protos | 'ExtractMetrics' >> beam.FlatMap(
          make_metrics)
      result_pcoll = with_metrics | 'MakeMetricList' >> (
          beam.combiners.ToList())
      result_str = result_pcoll | 'MakeMetricStr' >> beam.Map(
          save_metrics)
      out_path = FLAGS.occnet_dir + '/extracted/metrics_ub-v2.csv'
      _ = result_str | 'WriteMetrics' >> beam.io.WriteToText(
          out_path, num_shards=1, shard_name_template='')
  if FLAGS.write_metric_summaries:
    log.info('Aggregating results locally.')
    result_path = FLAGS.occnet_dir + '/extracted/metrics_ub-v2.csv'
    final_results = metrics.aggregate_extracted(result_path)
    summary_out_path = result_path.replace('/metrics_ub-v2.csv',
                                           '/metric_summary_ub-v2.csv')
    file_util.writetxt(summary_out_path, final_results.to_csv())
Example #2
0
 def bts_depth_224(self):
     if not hasattr(self, '_bts_depth_224'):
         log.info('BTS depth 480 shape: %s' %
                  repr(self.bts_depth_480.shape))
         self._bts_depth_224 = self._batch_resize(self.bts_depth_480.copy(),
                                                  (224, 224), 'nearest')
     return self._bts_depth_224
Example #3
0
def make_metrics(proto):
  """Returns a single-element list containing a dictionary of metrics."""
  key, s = proto
  p = results_pb2.Results.FromString(s)
  mesh_path = f"{FLAGS.occnet_dir}{key.replace('test/', '')}.ply"
  log.warning('Mesh path: %s' % mesh_path)
  try:
    mesh = file_util.read_mesh(mesh_path)
    _, synset, mesh_hash = key.split('/')
    if FLAGS.transform:
      ex = example.InferenceExample('test', synset, mesh_hash)
      tx = ex.gaps_to_occnet
      mesh.apply_transform(tx)
    log.info('Succeeded on %s' % mesh_path)
  # pylint:disable=broad-except
  except Exception as e:
    # pylint:enable=broad-except
    log.error(f"Couldn't load {mesh_path}, skipping due to {repr(e)}.")
    return []

  gt_mesh = mesh_util.deserialize(p.gt_mesh)
  dir_out = FLAGS.occnet_dir + '/metrics-out-gt/%s' % key
  if not file_util.exists(dir_out):
    file_util.makedirs(dir_out)
  file_util.write_mesh(f'{dir_out}gt_mesh.ply', gt_mesh)
  file_util.write_mesh(f'{dir_out}occnet_pred.ply', mesh)

  nc, fst, fs2t, chamfer = metrics.all_mesh_metrics(mesh, gt_mesh)
  return [{
      'key': key,
      'Normal Consistency': nc,
      'F-Score (tau)': fst,
      'F-Score (2*tau)': fs2t,
      'Chamfer': chamfer,
  }]
Example #4
0
def transform_normals(normals, tx):
  """Transforms normals to a new coordinate frame (applies inverse-transpose).

  Args:
    normals: Numpy array with shape [batch_size, ..., 3].
    tx: Numpy array with shape [batch_size, 4, 4] or [4, 4]. Somewhat
      inefficient for [4,4] inputs (tiles across the batch dimension).

  Returns:
    Numpy array with shape [batch_size, ..., 3]. The transformed normals.
  """
  batch_size = normals.shape[0]
  assert normals.shape[-1] == 3
  normal_shape = list(normals.shape[1:-1])
  flat_normal_len = int(np.prod(normal_shape))  # 1 if []
  normals = np.reshape(normals, [batch_size, flat_normal_len, 3])
  assert len(tx.shape) in [2, 3]
  assert tx.shape[-1] == 4
  assert tx.shape[-2] == 4
  if len(tx.shape) == 2:
    tx = np.tile(tx[np.newaxis, ...], [batch_size, 1, 1])
  assert tx.shape[0] == batch_size

  normals_invalid = np.all(np.equal(normals, 0.0), axis=-1)
  tx_invt = np.linalg.inv(np.transpose(tx, axes=[0, 2, 1]))
  transformed = batch_apply_4x4(normals, tx_invt)
  transformed[normals_invalid, :] = 0.0
  norm = np.linalg.norm(transformed, axis=-1, keepdims=True)
  log.info('Norm shape, transformed shape: %s %s' %
           (repr(norm.shape), repr(transformed.shape)))
  transformed /= norm + 1e-8
  return np.reshape(transformed, [batch_size] + normal_shape + [3])
Example #5
0
    def _testInterpolateFromGrid34(self):
        """Tests trilinear interpolation."""
        grid = np.zeros([2, 2, 2], dtype=np.float32)
        grid[0, 0, 0] = 0
        grid[0, 0, 1] = 0
        grid[0, 1, 0] = 0
        grid[0, 1, 1] = 0
        grid[1, 0, 0] = 1
        grid[1, 0, 1] = 1
        grid[1, 1, 0] = 1
        grid[1, 1, 1] = 1
        grid = np.reshape(grid, [1, 2, 2, 2, 1])
        grid_tf = tf.constant(grid, dtype=tf.float32)
        samples = tf.constant([[[0.5, 0.5, 0.375]]], dtype=tf.float32)
        samples = tf.reshape(samples, [1, 1, 3])
        result_tf = geom_util.interpolate_from_grid(samples, grid_tf)
        expected = np.array([0.25])

        with self.test_session() as sess:
            returned, validity = sess.run(result_tf)
        log.info(f'result {returned}')
        log.info(f'validity: {validity}')
        distance = float(np.reshape(np.abs(expected - returned), [1]))
        self.assertLess(
            distance, DISTANCE_EPS, 'Expected \n%s\n but got \n%s' %
            (np.array_str(expected), np.array_str(returned)))
Example #6
0
def make_dataset(directory, batch_size, mode, split):
  """Generates a one-shot style tf.Dataset."""
  assert split in ['train', 'val', 'test']
  dataset = tf.data.Dataset.list_files(f'{directory}/{split}/*/*/mesh_orig.ply')
  log.info('Mapping...')
  if mode == 'train':
    dataset = dataset.shuffle(buffer_size=2 * batch_size)
    dataset = dataset.repeat()
  # pylint: disable=g-long-lambda
  dataset = dataset.map(
      lambda filename: tf.py_func(_example_dict_tf_func_wrapper, [filename], [
          tf.float32, tf.float32, tf.string, tf.float32, tf.float32, tf.float32,
          tf.float32
      ]),
      num_parallel_calls=os.cpu_count())
  # pylint: enable=g-long-lambda

  bs = batch_size
  dataset = dataset.batch(bs, drop_remainder=True).prefetch(1)

  dataset_items = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
  dataset_obj = lambda: 0
  dataset_obj.bounding_box_samples = tf.ensure_shape(dataset_items[0],
                                                     [bs, 100000, 4])
  dataset_obj.depth_renders = tf.ensure_shape(dataset_items[1],
                                              [bs, 20, 224, 224, 1])
  dataset_obj.mesh_name = dataset_items[2]
  dataset_obj.near_surface_samples = tf.ensure_shape(dataset_items[3],
                                                     [bs, 100000, 4])
  dataset_obj.grid = tf.ensure_shape(dataset_items[4], [bs, 32, 32, 32])
  dataset_obj.world2grid = tf.ensure_shape(dataset_items[5], [bs, 4, 4])
  dataset_obj.surface_point_samples = tf.ensure_shape(dataset_items[6],
                                                      [bs, 10000, 6])

  return dataset_obj
Example #7
0
def get_result_path(xid):
    """Generates the result path associated with the requested XID."""
    # TODO(ldif-user) Set up the result path:
    base = FLAGS.input_dir + '/ROOT%i-*00000-*' % xid
    matches = file_util.glob(base)
    assert len(matches) >= 1
    ckpts = []
    for match in matches:
        # TODO(ldif-user) Set the file extension
        extension = None
        ckpt = int(match.split(extension)[0].split('-')[-1])
        ckpts.append(ckpt)
    if len(ckpts) > 1 and not FLAGS.use_newest:
        log.info(
            'Found multiple checkpoint matches for %s and --nouse_newest: %s' %
            (base, repr(ckpts)))
    if len(ckpts) == 1:
        ckpt = ckpts[0]
    elif len(ckpts) > 1:
        ckpts.sort()
        ckpt = ckpts[-1]
        log.info('Found multiple checkpoint matches %s, using %s' %
                 (repr(ckpts), repr(ckpt)))
    # TODO(ldif-user) Set up the result path:
    path = FLAGS.input_dir + '/ROOT%i-%i.*'
    path = path % (xid, ckpt)
    return path
Example #8
0
def extract_local_frame_images(world_xyz_im, world_normal_im, embedding,
                               decoder):
  """Computes local frame XYZ images for each of the world2local frames."""
  world2local = np.squeeze(decoder.world2local(embedding))
  log.info(world2local.shape)
  world2local = tile_world2local_frames(world2local, 15)
  log.info(world2local.shape)
  xyz_ims = []
  nrm_ims = []
  is_invalid = np.all(world_xyz_im == 0.0, axis=-1)
  # is_valid = np.logical_not(is_invalid)
  # plot(is_invalid)
  for i in range(world2local.shape[0]):
    m = world2local[i, :, :]
    local_xyz_im = apply_4x4(world_xyz_im, m, are_points=True)
    local_xyz_im[is_invalid, :] = 0.0
    local_nrm_im = apply_4x4(
        world_normal_im, np.linalg.inv(m).T, are_points=False)
    local_nrm_im /= np.linalg.norm(local_nrm_im, axis=-1, keepdims=True) + 1e-8
    local_nrm_im[is_invalid, :] = 0.0
    xyz_ims.append(local_xyz_im)
    nrm_ims.append(local_nrm_im)
  # log.info(xyz_ims[1][is_valid, :])
  # log.info(xyz_ims[31][is_valid, :])
  return np.stack(xyz_ims), np.stack(nrm_ims)
Example #9
0
def _gapsview(d,
              msh=None,
              pts=None,
              grd=None,
              world2grid=None,
              grid_threshold=0.0,
              camera='default'):
    """Interactively views a mesh, pointcloud, and/or grid at the same time."""
    assert msh is not None or pts is not None or grd is not None
    mpath = ''
    ppath = ''
    gpath = ''
    init_camera = _setup_cam(camera)
    if msh is not None:
        mpath = d + '/m.ply'
        file_util.write_mesh(mpath, msh)
        mpath = ' ' + mpath
        log.info('Mpath: %s' % mpath)
    ppath = _make_pts_input_str(d, pts)
    if grd is not None:
        gpath = d + '/g.grd'
        file_util.write_grd(gpath, grd, world2grid=world2grid)
        gpath = ' ' + gpath + ' -grid_threshold %0.6f' % grid_threshold
    cmd = '%s/gapsview%s%s%s%s' % (path_util.gaps_path(), mpath, ppath, gpath,
                                   init_camera)
    log.info(cmd)
    sp.check_output(cmd, shell=True)
Example #10
0
  def __init__(self, split, synset_or_cat, mesh_hash=None, dynamic=False,
               verbose=True):
    self.split = split
    self.synset, self.cat = _parse_synset_or_cat(synset_or_cat)
    self.mesh_hash = mesh_hash
    self._rgb_path = None
    self._rgb_image = None
    self.__archive = None
    self._uniform_samples = None
    self._near_surface_samples = None
    self._grid = None
    self._world2grid = None
    self._gt_path = None
    self._tx = None
    self._gaps_to_occnet = None
    self._gt_mesh = None
    self._tx_path = None
    self._surface_samples = None
    self._normalized_gt_mesh = None
    self._r2n2_images = None
    self.depth_native_res = 224

    self.is_from_directory = False

    if dynamic:
      if verbose:
        log.verbose(
            'Using dynamic files, not checking ahead for file existence.')
    elif not file_util.exists(self.npz_path):
      raise ValueError('Expected a .npz at %s.' % self.npz_path)
    else:
      log.info(self.npz_path)
Example #11
0
def maxpool2x2_layer(inputs):
  """A maxpool layer that pools 2x2 -> 1."""
  assert len(inputs.get_shape().as_list()) == 4
  batch_size_in, height_in, width_in, channel_count_in = inputs.get_shape(
  ).as_list()
  outputs = tf.layers.max_pooling2d(
      inputs,
      pool_size=2,
      strides=2,
      padding='same',
      data_format='channels_last',
      name=None)

  # outputs = tf.nn.max_pool(
  #     inputs, ksize=[0, 2, 2, 0], strides=[1, 2, 2, 1], padding='SAME')
  batch_size_out, height_out, width_out, channel_count_out = outputs.get_shape(
  ).as_list()
  assert batch_size_in == batch_size_out
  assert height_out * 2 == height_in
  assert width_out * 2 == width_in
  assert channel_count_in == channel_count_out
  log.info(
      'Applying maxpool2x2 layer. Input: [%i, %i, %i, %i]. Output: [%i, %i, %i, %i].'
      % (batch_size_in, height_in, width_in, channel_count_in, batch_size_out,
         height_out, width_out, channel_count_out))
  return outputs
Example #12
0
def print_pivot_table(class_mean_df, metric_name, metric_pretty_print):
  cmean = pd.pivot_table(
      class_mean_df, values=metric_name, index=['class'], columns=['xid'])
  log.info('%s:\n%s' %
           (metric_pretty_print,
            tabulate.tabulate(
                cmean, headers='keys', tablefmt='fancy_grid', floatfmt='.3f')))
Example #13
0
def load_example_dict(example_directory):
  """Loads an example from disk and makes a str:numpy dictionary out of it."""
  entry_t = time.time()
  start_t = entry_t  # Keep around the function entry time for a cumulative print.
  e = example.InferenceExample.from_directory(example_directory, verbose=False)
  end_t = time.time()
  log.verbose(f'Make example: {end_t - start_t}')
  start_t = end_t

  # The from_directory method should probably optionally take in a synset.
  bounding_box_samples = e.uniform_samples
  end_t = time.time()
  log.verbose(f'Bounding box: {end_t - start_t}')
  start_t = end_t
  # TODO(kgenova) There is a pitfall here where the depth is divided by 1000,
  # after this. So if some other depth images are provided, they would either
  # need to also be stored in the GAPS format or be artificially multiplied
  # by 1000.
  depth_renders = e.depth_images  # [20, 224, 224, 1]. 1 or 1000? trailing 1?
  assert depth_renders.shape[0] == 1
  depth_renders = depth_renders[0, ...]
  end_t = time.time()
  log.verbose(f'Depth renders: {end_t - start_t}')
  start_t = end_t

  mesh_name = e.mesh_name
  end_t = time.time()
  log.verbose(f'Mesh name: {end_t - start_t}')
  start_t = end_t

  log.info(f'Loading {mesh_name} from split {e.split}')
  near_surface_samples = e.near_surface_samples
  end_t = time.time()
  log.verbose(f'NSS: {end_t - start_t}')

  start_t = end_t
  grid = e.grid
  end_t = time.time()
  log.verbose(f'Grid: {end_t - start_t}')
  start_t = end_t

  world2grid = e.world2grid
  end_t = time.time()
  log.verbose(f'world2grid: {end_t - start_t}')
  start_t = end_t

  surface_point_samples = e.precomputed_surface_samples_from_dodeca
  end_t = time.time()
  log.verbose(f'surface points: {end_t - start_t}')
  log.verbose(f'load_example_dict total time: {end_t - entry_t}')
  return {
      'bounding_box_samples': bounding_box_samples,
      'depth_renders': depth_renders,
      'mesh_name': mesh_name,
      'near_surface_samples': near_surface_samples,
      'grid': grid,
      'world2grid': world2grid,
      'surface_point_samples': surface_point_samples,
  }
Example #14
0
def transform_r2n2_normal_cam_image_to_world_frame(normal_im, idx, e):
  is_valid = np.all(normal_im == 0.0, axis=-1)
  log.info(is_valid.shape)
  is_valid = is_valid.reshape([224, 224])
  world_im = apply_4x4(
      normal_im, np.linalg.inv(e.r2n2_cam2world[idx, ...]).T, are_points=False)
  world_im /= (np.linalg.norm(world_im, axis=-1, keepdims=True) + 1e-8)
  # world_im = np_util.zero_by_mask(is_valid, world_im).astype(np.float32)
  return world_im
Example #15
0
def random_scales(batch_size, minval, maxval):
    scales = tf.random.uniform(shape=[batch_size, 3],
                               minval=minval,
                               maxval=maxval)
    hom_coord = tf.ones([batch_size, 1], dtype=tf.float32)
    scales = tf.concat([scales, hom_coord], axis=1)
    s = tf.linalg.diag(scales)
    log.info(s.get_shape().as_list())
    return tf.linalg.diag(scales)
Example #16
0
def print_all(sif_vector, decoder, e, resolution=256, sample_count=100000):
  results = compute_all(sif_vector, decoder, e, resolution, sample_count)
  metrics = ''
  metrics += 'IoU             : %0.2f\n' % results['iou']
  metrics += 'F-Score (tau)   : %0.2f\n' % results['f_score_tau']
  metrics += 'F-Score (2*tau) : %0.2f\n' % results['f_score_2tau']
  metrics += 'Normal Const.   : %0.2f\n' % results['normal_c']
  metrics += 'Chamfer Distance: %0.5f\n' % results['chamfer']
  log.info(metrics)
  print(metrics)
Example #17
0
    def eval_implicit_parameters(self, implicit_parameters, samples):
        """Decodes each implicit parameter vector at each of its sample points.

    Args:
      implicit_parameters: Tensor with shape [batch_size, element_count,
        element_embedding_length]. The embedding associated with each element.
      samples: Tensor with shape [batch_size, element_count, sample_count, 3].
        The sample locations. Each embedding vector will be decoded at each of
        its sample locations.

    Returns:
      Tensor with shape [batch_size, element_count, sample_count, 1]. The
        decoded value of each element's embedding at each of the samples for
        that embedding.
    """
        # Each element has its own network:
        if self.single_element_implicit_eval_fun is None:
            raise ValueError('The implicit decoder function is None.')
        implicit_param_shape_in = implicit_parameters.get_shape().as_list()
        tf.logging.info('BID0: Input implicit param shape: %s',
                        repr(implicit_param_shape_in))
        tf.logging.info('BID0: Input samples shape: %s',
                        repr(samples.get_shape().as_list()))
        reuse = self._eval_implicit_parameters_call_count > 0
        # TODO(kgenova) Now that batching OccNet is supported, batch this call.
        with tf.variable_scope(self._name + '/eval_implicit_parameters',
                               reuse=reuse):
            if self._enable_deprecated:
                log.info('Deprecated eval.')
                vals = self._deprecated_multielement_eval(
                    implicit_parameters, samples)
            else:
                batch_size, element_count, element_embedding_length = (
                    implicit_parameters.get_shape().as_list())
                sample_count = samples.get_shape().as_list()[-2]
                batched_parameters = tf.reshape(
                    implicit_parameters,
                    [batch_size * element_count, element_embedding_length])
                batched_samples = tf.reshape(
                    samples, [batch_size * element_count, sample_count, 3])
                if self._model_config.hparams.npe == 't':
                    raise ValueError(
                        'Incompatible hparams. Must use _deprecated_multielement_eval'
                        'if requesting separate network weights per shape element.'
                    )
                with tf.variable_scope('all_elements', reuse=False):
                    batched_vals = self.single_element_implicit_eval_fun(
                        batched_parameters,
                        batched_samples,
                        apply_sigmoid=False,
                        model_config=self._model_config)
                vals = tf.reshape(batched_vals,
                                  [batch_size, element_count, sample_count, 1])
        self._eval_implicit_parameters_call_count += 1
        return vals
Example #18
0
def mesh_metrics(element):
    """Computes the chamfer distance and normal consistency metrics."""
    log.info('Metric step input: %s' % repr(element))
    example_np = element_to_example(element)
    if not element['mesh_str']:
        raise ValueError(
            'Empty mesh string encountered for %s but mesh metrics required.' %
            repr(element))
    mesh = mesh_util.deserialize(element['mesh_str'])
    if mesh.is_empty:
        raise ValueError(
            'Empty mesh encountered for %s but mesh metrics required.' %
            repr(element))

    sample_count = 100000
    points_pred, normals_pred = sample_points_and_face_normals(
        mesh, sample_count)
    points_gt, normals_gt = sample_points_and_face_normals(
        example_np.gt_mesh, sample_count)

    pred_to_gt_dist, pred_to_gt_indices = pointcloud_neighbor_distances_indices(
        points_pred, points_gt)
    gt_to_pred_dist, gt_to_pred_indices = pointcloud_neighbor_distances_indices(
        points_gt, points_pred)

    pred_to_gt_normals = normals_gt[pred_to_gt_indices]
    gt_to_pred_normals = normals_pred[gt_to_pred_indices]

    # We take abs because the OccNet code takes abs
    pred_to_gt_normal_consistency = np.abs(
        dot_product(normals_pred, pred_to_gt_normals))
    gt_to_pred_normal_consistency = np.abs(
        dot_product(normals_gt, gt_to_pred_normals))

    # The 100 factor is because papers multiply by 100 for display purposes.
    chamfer = 100.0 * (np.mean(pred_to_gt_dist**2) +
                       np.mean(gt_to_pred_dist**2))

    nc = 0.5 * np.mean(pred_to_gt_normal_consistency) + 0.5 * np.mean(
        gt_to_pred_normal_consistency)

    tau = 1e-04
    f_score_tau = f_score(pred_to_gt_dist, gt_to_pred_dist, tau)
    f_score_2tau = f_score(pred_to_gt_dist, gt_to_pred_dist, 2.0 * tau)

    element['chamfer'] = chamfer
    element['normal_consistency'] = nc
    element['f_score_tau'] = f_score_tau
    element['f_score_2tau'] = f_score_2tau
    element['split'] = example_np.split
    element['synset'] = example_np.synset
    element['name'] = example_np.mesh_hash
    element['class'] = example_np.cat
    return element
Example #19
0
def assert_shape(tensor, shape, name):
    """Fails an assert if the tensor fails the shape compatibility check."""
    real_shape = tensor.get_shape().as_list()
    same_rank = len(real_shape) == len(shape)
    values_different = [
        1 for i in range(min(len(shape), len(real_shape)))
        if shape[i] != real_shape[i] and shape[i] != -1
    ]
    all_equal = not values_different
    if not same_rank or not all_equal:
        log.info(
            'Error: Expected tensor %s to have shape %s, but it had shape %s.'
            % (name, str(shape), str(real_shape)))
        assert False
Example #20
0
def ptsview(pts, mesh=None, camera='fixed'):
    """Interactively visualizes a pointcloud alongside an optional mesh."""
    with py_util.py2_temporary_directory() as d:
        ptspath = _make_pts_input_str(d, pts, allow_none=False)
        init_camera = _setup_cam(camera)
        mshpath = ''
        if mesh:
            mshpath = d + '/m.ply'
            file_util.write_mesh(mshpath, mesh)
            mshpath = ' ' + mshpath
        cmd = '%s/ptsview %s%s%s' % (path_util.gaps_path(), ptspath, mshpath,
                                     init_camera)
        log.info(cmd)
        sp.check_output(cmd, shell=True)
Example #21
0
def world_xyzn_im_to_pts(world_xyz, world_n):
  """Makes a 10K long XYZN pointcloud from an XYZ image and a normal image."""
  # world im  + world normals -> world points+normals
  is_valid = np.logical_not(np.all(world_xyz == 0.0, axis=-1))
  world_xyzn = np.concatenate([world_xyz, world_n], axis=-1)
  world_xyzn = world_xyzn[is_valid, :]
  world_xyzn = np.reshape(world_xyzn, [-1, 6])
  np.random.shuffle(world_xyzn)
  point_count = world_xyzn.shape[0]
  assert point_count > 0
  log.info('The number of valid samples is: %i' % point_count)
  while point_count < 10000:
    world_xyzn = np.tile(world_xyzn, [2, 1])
    point_count = world_xyzn.shape[0]
  return world_xyzn[:10000, :]
Example #22
0
def mshview(mesh, camera='fixed'):
    """Interactively views a mesh."""
    with py_util.py2_temporary_directory() as d:
        init_camera = _setup_cam(camera)
        if not isinstance(mesh, list):
            mesh = [mesh]
        assert len(mesh) <= 4
        mshpath = ''
        for m, c in zip(mesh, ['m', 'n', 'o', 'p']):
            lpath = f'{d}/{c}.ply'
            mshpath += f' {lpath}'
            file_util.write_mesh(lpath, m)
        cmd = '%s/mshview %s%s' % (path_util.gaps_path(), mshpath, init_camera)
        log.info(cmd)
        sp.check_output(cmd, shell=True)
Example #23
0
def get_free_gpu_memory(cuda_device_index):
    """Returns the current # of free megabytes for the specified device."""
    if sys.platform == "darwin":
        # No GPUs on darwin...
        return 0
    result = sp.check_output(
        'nvidia-smi --query-gpu=memory.free '
        '--format=csv,nounits,noheader',
        shell=True)
    result = result.decode('utf-8').split('\n')[:-1]
    log.verbose(f'The system has {len(result)} gpu(s).')
    free_mem = int(result[cuda_device_index])
    log.info(f'The {cuda_device_index}-th GPU has {free_mem} MB free.')
    if cuda_device_index >= len(result):
        raise ValueError(f"Couldn't parse result for GPU #{cuda_device_index}")
    return int(result[cuda_device_index])
Example #24
0
 def hparams(self):
     """Load a tf.HParams() object based on the serialized hparams file."""
     if self._hparams is None:
         hparam_path = '%s/train/hparam_pickle.txt' % self.root_dir
         if file_util.exists(hparam_path):
             log.info('Found serialized hparams. Loading from %s' %
                      hparam_path)
             # hparams = hparams_util.read_hparams(hparam_path)
             self._hparams = (
                 hparams_util.
                 read_hparams_with_new_backwards_compatible_additions(
                     hparam_path))
         else:
             raise ValueError('No serialized hparam file found at %s' %
                              hparam_path)
     return self._hparams
Example #25
0
    def sample_on_grid(self, sample_center, sample_size, resolution):
        """Evaluates the function on a grid."""
        self.ensure_initialized()
        # ensure_np_shape(sample_grid, ['res', 'res', 'res'], 'sample_grid')
        # mcubes_res = sample_grid.shape[0]
        if resolution % self._block_res:
            raise ValueError(
                'Input resolution is %i, but must be a multiple of block size, %i.'
                % (resolution, self._block_res))
        block_count = resolution // self._block_res
        block_size = sample_size / block_count

        base_grid = np_util.make_coordinate_grid_3d(
            length=self._block_res,
            height=self._block_res,
            width=self._block_res,
            is_screen_space=False,
            is_homogeneous=False).astype(np.float32)
        lower_corner = sample_center - sample_size / 2.0
        start_time = time.time()
        l_block = []
        i = 0
        for li in range(block_count):
            l_min = lower_corner[2] + li * block_size
            h_block = []
            for hi in range(block_count):
                h_min = lower_corner[1] + hi * block_size
                w_block = []
                for wi in range(block_count):
                    w_min = lower_corner[0] + wi * block_size
                    offset = np.reshape(
                        np.array([w_min, l_min, h_min], dtype=np.float32),
                        [1, 1, 1, 3])
                    sample_locations = block_size * base_grid + offset
                    feed_dict = self._feed_dict()
                    feed_dict[self._sample_locations_ph] = sample_locations
                    grid_out_np = self._session.run(self._predicted_class_grid,
                                                    feed_dict=feed_dict)
                    i += 1
                    w_block.append(grid_out_np)
                h_block.append(np.concatenate(w_block, axis=2))
            l_block.append(np.concatenate(h_block, axis=0))
        grid_out = np.concatenate(l_block, axis=1)
        compute_time = time.time() - start_time
        log.info('Post Initialization Time: %s' % compute_time)
        return grid_out, compute_time
Example #26
0
def read_cam_file(path, verbose=False):
    """Reads a GAPS .cam file to 4x4 matrices.

  Args:
    path: filepath to a gaps .cam file with K cameras.
    verbose: Boolean. Whether to print detailed file info.

  Returns:
    cam2world: Numpy array with shape [K, 4, 4].
    xfov: Numpy array with shape [K]. The x field-of-view in GAPS' format (which
      is the half-angle in radians).
  """
    lines = file_util.readlines(path)
    if verbose:
        log.info('There are %i cameras in %s.' % (len(lines), path))
    cam2worlds, xfovs = [], []
    for i, l in enumerate(lines):
        vals = [float(x) for x in l.split(' ') if x]
        if len(vals) != 12:
            raise ValueError(
                'Failed reading %s: Expected 12 items on line %i, but read %i.'
                % (path, i, len(vals)))
        viewpoint = np.array(vals[0:3]).astype(np.float32)
        towards = np.array(vals[3:6]).astype(np.float32)
        up = np.array(vals[6:9]).astype(np.float32)
        right = np.cross(towards, up)
        right = right / np.linalg.norm(right)
        xfov = vals[9]
        # 11th is yfov but GAPS ignores it and recomputes.
        # 12th is a 'score' that is irrelevant.
        towards = towards / np.linalg.norm(towards)
        up = np.cross(right, towards)
        up = up / np.linalg.norm(up)
        # aspect = float(height) / float(width)
        # yf = math.atan(aspect * math.tan(xfov))
        rotation = np.stack([right, up, -towards], axis=1)
        rotation_4x4 = np.eye(4)
        rotation_4x4[:3, :3] = rotation
        cam2world = rotation_4x4.copy()
        cam2world[:3, 3] = viewpoint
        cam2worlds.append(cam2world)
        xfovs.append(xfov)
    cam2worlds = np.stack(cam2worlds, axis=0).astype(np.float32)
    xfovs = np.array(xfovs, dtype=np.float32)
    return cam2worlds, xfovs
Example #27
0
def conv_layer(inputs, depth, model_config):
  """A single 3x3 convolutional layer with stride 1."""
  normalizer, normalizer_params = get_normalizer_and_mode(model_config)
  ibs, ih, iw, ic = inputs.get_shape().as_list()
  outputs = contrib_layers.convolution(
      inputs=inputs,
      num_outputs=depth,
      kernel_size=3,
      stride=1,
      padding='SAME',
      normalizer_fn=normalizer,
      normalizer_params=normalizer_params,
      activation_fn=tf.nn.leaky_relu)
  obs, oh, ow, oc = outputs.get_shape().as_list()
  log.info(
      'Applying conv+relu layer. Input: [%i, %i, %i, %i]. Output: [%i, %i, %i, %i].'
      % (ibs, ih, iw, ic, obs, oh, ow, oc))
  return outputs
Example #28
0
 def _batch_resize(self, ims, res, strategy='nearest'):
   """Resizes a batch of images to a new resolution."""
   order = {'nearest': 0, 'bilinear': 1, 'bicubic': 3}[strategy]
   bs = ims.shape[0]
   out = []
   log.info('Input ims shape: %s' % repr(ims.shape))
   has_extra_dim = len(ims.shape) == 4
   if not has_extra_dim:
     ims = ims[..., np.newaxis]
   h, w = ims.shape[1:3]
   for i in range(bs):
     o = interpolation.zoom(
         ims[i, ...], [res[0] / h, res[1] / w, 1.0], np.float32, order=order)
     out.append(o)
   out = np.stack(out)
   if not has_extra_dim:
     out = np.reshape(out, out.shape[:-1])
   return out
Example #29
0
 def initialize(self, session, feed_dict):
     """Initializes the data by evaluating the tensors in the session."""
     np_list = session.run(self._structured_implicit_tf.tensor_list,
                           feed_dict=feed_dict)
     (self.element_constants, self.element_centers,
      self.element_radii) = np_list[:3]
     # pylint:disable=protected-access
     if self._structured_implicit_tf._model_config.hparams.ipe != 'f':
         # pylint:enable=protected-access
         # if len(np_list) == 4:
         self.element_iparams = np_list[3]
     else:
         if len(np_list) == 4:
             log.info(
                 'Warning: implicit parameters present but not enabled.'
                 ' Eliding them to avoid using untrained values.')
         self.element_iparams = None
     self._session = session
     self._initialized = True
Example #30
0
 def get_surface_samples_from_single_r2n2_depth_image(self, idx):
   """Computes surface samples from the idx-th depth_image."""
   depth_im = self.r2n2_depth_images[idx, ...]
   is_valid = depth_im != 0.0
   is_valid = np.reshape(is_valid, [224, 224])
   world_xyz = self.r2n2_xyz_images.copy()[idx, ...]
   world_n = self.r2n2_normal_world_images.copy()[idx, ...]
   world_xyzn = np.concatenate([world_xyz, world_n], axis=-1)
   world_xyzn = world_xyzn[is_valid, :]
   world_xyzn = np.reshape(world_xyzn, [-1, 6])
   np.random.shuffle(world_xyzn)
   point_count = world_xyzn.shape[0]
   assert point_count > 0
   log.info('The number of valid samples for idx %i is: %i' %
            (idx, point_count))
   while point_count < 10000:
     world_xyzn = np.tile(world_xyzn, [2, 1])
     point_count = world_xyzn.shape[0]
   return world_xyzn[:10000, :]