コード例 #1
0
  def __init__(self, split, synset_or_cat, mesh_hash=None, dynamic=False,
               verbose=True):
    self.split = split
    self.synset, self.cat = _parse_synset_or_cat(synset_or_cat)
    self.mesh_hash = mesh_hash
    self._rgb_path = None
    self._rgb_image = None
    self.__archive = None
    self._uniform_samples = None
    self._near_surface_samples = None
    self._grid = None
    self._world2grid = None
    self._gt_path = None
    self._tx = None
    self._gaps_to_occnet = None
    self._gt_mesh = None
    self._tx_path = None
    self._surface_samples = None
    self._normalized_gt_mesh = None
    self._r2n2_images = None
    self.depth_native_res = 224

    self.is_from_directory = False

    if dynamic:
      if verbose:
        log.verbose(
            'Using dynamic files, not checking ahead for file existence.')
    elif not file_util.exists(self.npz_path):
      raise ValueError('Expected a .npz at %s.' % self.npz_path)
    else:
      log.info(self.npz_path)
コード例 #2
0
def mesh_to_example(codebase_root_dir, mesh_path, dirpath, skip_existing,
                    log_level):
    # Logging level must be specified because mesh_to_example is an entry point
    # for a subprocess call.
    log.set_level(log_level)
    ldif_path = path_util.get_path_to_ldif_root()
    if not skip_existing or not os.path.isfile(
            f'{dirpath}/depth_and_normals.npz'):
        sp.check_output(
            f'{codebase_root_dir}/scripts/process_mesh_local.sh {mesh_path} {dirpath} {ldif_path}',
            shell=True)
        write_depth_and_normals_npz(dirpath,
                                    f'{dirpath}/depth_and_normals.npz')
    else:
        log.verbose(f'Skipping shell script processing for {dirpath},'
                    ' the output already exists.')
    # Precompute the dodeca samples for later:
    e = example.InferenceExample.from_directory(dirpath)
    sample_path = e.precomputed_surface_samples_from_dodeca_path
    if not skip_existing or not os.path.isfile(sample_path):
        e.surface_sample_count = 100000
        precomputed_samples = e.surface_samples_from_dodeca
        assert precomputed_samples.shape[0] == 100000
        assert precomputed_samples.shape[1] == 6
        file_util.write_points(sample_path, precomputed_samples)
    else:
        log.verbose(
            f'Skipping surface sample precompution for {dirpath}, it\'s already done.'
        )
コード例 #3
0
ファイル: experiment.py プロジェクト: yang-zhifei/ldif
    def __init__(self, model_dir, model_name, experiment_name):
        self.root = f'{model_dir}/{model_name}-{experiment_name}'
        self.model_name = model_name
        self.experiment_name = experiment_name

        if not file_util.exists(self.root):
            log.verbose('Regex expanding root to find experiment ID')
            options = file_util.glob(self.root[:-1] + '*')
            if len(options) != 1:
                log.verbose(
                    "Tried to glob for directory but didn't find one path. Found:"
                )
                log.verbose(options)
                raise ValueError('Directory not found: %s' % self.root)
            else:
                self.root = options[0] + '/'
                self.experiment_name = os.path.basename(self.root.strip('/'))
                self.experiment_name = self.experiment_name.replace(
                    self.model_name + '-', '')
                log.verbose('Expanded experiment name with regex to root: %s' %
                            self.root)

        job_strs = [
            os.path.basename(n) for n in file_util.glob(f'{self.root}/*')
        ]

        banned = ['log', 'mldash_config.txt', 'snapshot', 'mldash_config']
        job_strs = [p for p in job_strs if p not in banned]
        job_strs = sorted(job_strs, key=to_xid)
        log.verbose('Job strings: %s' % repr(job_strs))
        self.all_jobs = [Job(self, job_str) for job_str in job_strs]
        self._visible_jobs = self.all_jobs[:]
コード例 #4
0
def _parse_synset_or_cat(synset_or_cat):
  """Attempts to turn wordnet synsets into readable class names."""
  if synset_or_cat in synset_to_cat:
    synset = synset_or_cat
  else:
    if synset_or_cat not in cat_to_synset:
      log.verbose(f'{synset_or_cat} is not a recognized class or synset. If you'
                  ' are not testing on shapenet-13 or a subset, this can be '
                  'safely ignored.')
      return synset_or_cat, synset_or_cat
    synset = cat_to_synset[synset_or_cat]
  cat = synset_to_cat[synset]
  return synset, cat
コード例 #5
0
ファイル: experiment.py プロジェクト: yang-zhifei/ldif
 def copy_meshes(self, mesh_names, split, overwrite_if_present=True):
     """Copies meshes from the remote store to the local cache."""
     for xid in self.experiment.visible_xids:
         log.verbose('Copying filelist for xid #%i...' % xid)
         for mesh_name in mesh_names:
             local_path = self.local_path_to_mesh(mesh_name, xid, split)
             if os.path.isfile(local_path) and not overwrite_if_present:
                 continue
             local_dir = os.path.dirname(local_path)
             if not os.path.isdir(local_dir):
                 os.makedirs(local_dir)
             remote_path = self.remote_path_to_mesh(mesh_name, xid, split)
             if file_util.exists(local_path):
                 file_util.rm(local_path)
             file_util.cp(remote_path, local_path)
コード例 #6
0
def mesh_to_example(codebase_root_dir, mesh_path, dirpath, skip_existing,
                    log_level):
    # Logging level must be specified because mesh_to_example is an entry point
    # for a subprocess call.
    log.set_level(log_level)
    ldif_path = path_util.get_path_to_ldif_root()
    if not skip_existing or not os.path.isfile(
            f'{dirpath}/depth_and_normals.npz'):
        sp.check_output(
            f'{codebase_root_dir}/scripts/process_mesh_local.sh {mesh_path} {dirpath} {ldif_path}',
            shell=True)
        # write_depth_and_normals_npz(dirpath, f'{dirpath}/depth_and_normals.npz')
    else:
        log.verbose(f'Skipping shell script processing for {dirpath},'
                    ' the output already exists.')
コード例 #7
0
ファイル: gpu_util.py プロジェクト: yang-zhifei/ldif
def get_free_gpu_memory(cuda_device_index):
    """Returns the current # of free megabytes for the specified device."""
    if sys.platform == "darwin":
        # No GPUs on darwin...
        return 0
    result = sp.check_output(
        'nvidia-smi --query-gpu=memory.free '
        '--format=csv,nounits,noheader',
        shell=True)
    result = result.decode('utf-8').split('\n')[:-1]
    log.verbose(f'The system has {len(result)} gpu(s).')
    free_mem = int(result[cuda_device_index])
    log.info(f'The {cuda_device_index}-th GPU has {free_mem} MB free.')
    if cuda_device_index >= len(result):
        raise ValueError(f"Couldn't parse result for GPU #{cuda_device_index}")
    return int(result[cuda_device_index])
コード例 #8
0
def inputs_to_feature_vector(inputs, feature_length, model_config):
  """Encodes an input observation tensor to a fixed lengthh feature vector."""
  batch_size, image_count, height, width, channel_count = (
      inputs.get_shape().as_list())
  log.verbose('Input shape to early-fusion cnn: %s' %
              str(inputs.get_shape().as_list()))
  if image_count == 1:
    im = tf.reshape(inputs, [batch_size, height, width, channel_count])
  else:
    im = tf.reshape(
        tf.transpose(inputs, perm=[0, 2, 3, 1, 4]),
        [batch_size, height, width, image_count * channel_count])
  embedding = encode(
      im,
      model_config,
      conv_layer_depths=[16, 32, 64, 128, 128],
      fc_layer_depths=[feature_length])
  return embedding
コード例 #9
0
ファイル: local_inputs.py プロジェクト: yang-zhifei/ldif
def _make_optimized_dataset(directory, batch_size, mode, split):
    filenames = glob.glob(f'{directory}/optimized/{split}/*.tfrecords')
    log.verbose(f'Making dataset from the following files: {filenames}')
    dataset = tf.data.TFRecordDataset(filenames=filenames,
                                      compression_type='GZIP',
                                      buffer_size=None,
                                      num_parallel_reads=8)
    log.info('Mapping...')
    if mode == 'train':
        dataset = dataset.shuffle(buffer_size=2 * batch_size)
        dataset = dataset.repeat()

    dataset = dataset.map(process_element.parse_tf_example,
                          num_parallel_calls=os.cpu_count())

    dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)

    dataset_items = tf.compat.v1.data.make_one_shot_iterator(
        dataset).get_next()
    return build_dataset_obj(dataset_items, batch_size)
コード例 #10
0
 def surface_samples_from_dodeca(self):
   """10K surface point samples with normals computed from the dodecahedron."""
   if not hasattr(self, '_surface_samples_from_dodeca'):
     depth_ims = self.depth_images.copy() / 1000.0
     is_valid = depth_ims != 0.0
     is_valid = np.reshape(is_valid, [20, 224, 224])
     world_xyz = self.world_xyz_images_from_dodeca.copy()
     world_n = self.world_normal_images.copy()
     world_xyzn = np.concatenate([world_xyz, world_n], axis=-1)
     world_xyzn = world_xyzn[is_valid, :]
     world_xyzn = np.reshape(world_xyzn, [-1, 6])
     np.random.shuffle(world_xyzn)
     assert world_xyzn.shape[0] > 1
     while world_xyzn.shape[0] < self.surface_sample_count:
       log.verbose(f'Tiling samples from {world_xyzn.shape[0]} to'
                   f' {2*world_xyzn.shape[0]}')
       world_xyzn = np.tile(world_xyzn, [2, 1])
     self._surface_samples_from_dodeca = world_xyzn[:self
                                                    .surface_sample_count, :]
   return self._surface_samples_from_dodeca
コード例 #11
0
def process_one(f, mesh_directory, dataset_directory, skip_existing,
                log_level):
    """Processes a single mesh, adding it to the dataset."""

    print('[HERE: In meshes2dataset.process_one] Entering a process_one...')

    relpath = f.replace(mesh_directory, '')
    assert relpath[0] == '/'
    relpath = relpath[1:]
    split, synset = relpath.split('/')[:2]

    log.verbose(f'The split is {split} and the synset is {synset}')

    name = os.path.basename(f)
    name, extension = os.path.splitext(name)
    valid_extensions = ['.ply']

    if extension not in valid_extensions:
        raise ValueError(
            f'File with unsupported extension {extension} found: {f}.'
            f' Only {valid_extensions} are supported.')

    output_dir = f'{dataset_directory}/{split}/{synset}/{name}/'
    # This is the last file the processing writes, if it already exists the
    # example has already been processed.
    final_file_written = f'{output_dir}/depth_and_normals.npz'

    print(
        '[HERE: In meshes2dataset.process_one] f = %s\n' % f +
        '[HERE: In meshes2dataset.process_one] | relpath = %s, split = %s, synset = %s\n'
        % (relpath, split, synset) +
        '[HERE: In meshes2dataset.process_one] | mesh_to_example arg[0] = %s\n'
        % os.path.join(path_util.get_path_to_ldif_parent(), 'ldif') +
        '[HERE: In meshes2dataset.process_one] | output_dir = %s' % output_dir)

    make_example.mesh_to_example(
        os.path.join(path_util.get_path_to_ldif_parent(), 'ldif'), f,
        f'{dataset_directory}/{split}/{synset}/{name}/', skip_existing,
        log_level)

    return output_dir
コード例 #12
0
 def precomputed_surface_samples_from_dodeca(self):
     if not hasattr(self, '_precomputed_surface_samples_from_dodeca'):
         if not self.is_from_directory:
             raise ValueError('Precomputed surface samples are only'
                              ' available with a from_directory example.')
         if not os.path.isfile(
                 self.precomputed_surface_samples_from_dodeca_path):
             raise ValueError(
                 'Dodeca surface samples have not been precomputed at '
                 f'{self.precomputed_surface_samples_from_dodeca_path}')
         full_samples = np.load(
             self.precomputed_surface_samples_from_dodeca_path)
         np.random.shuffle(full_samples)
         assert full_samples.shape[0] > 1
         while full_samples.shape[0] < self.surface_sample_count:
             log.verbose(f'Doubling samples from {full_samples.shape[0]} to'
                         f' {2*full_samples.shape[0]}')
             full_samples = np.tile(full_samples, [2, 1])
         self._precomputed_surface_samples_from_dodeca = (
             full_samples[:self.surface_sample_count, :])
     return self._precomputed_surface_samples_from_dodeca
コード例 #13
0
ファイル: hparams.py プロジェクト: yang-zhifei/ldif
def read_hparams_with_new_backwards_compatible_additions(path):
  """Reads hparams from a file and adds in any new backwards compatible ones."""
  kvs = pickle.loads(file_util.readbin(path))
  log.verbose('Loaded %s' % repr(kvs))
  if 'ident' not in kvs:
    ident = 'sif'  #  A default identifier for old checkpoints.
    log.info('Default ident!')
  else:
    ident = kvs['ident']
  new_additions = backwards_compatible_hparam_defaults(ident)
  for k, v in new_additions.items():
    if k not in kvs:
      log.verbose('Adding hparam %s:%s since it postdates the checkpoint.' %
                  (k, str(v)))
      kvs[k] = v
  # r50 is no longer supported, replace it:
  if 'cnna' in kvs and kvs['cnna'] == 'r50':
    kvs['cnna'] = 's50'
  hparams = tf.contrib.training.HParams(**kvs)

  return hparams
コード例 #14
0
 def precomputed_surface_samples_from_dodeca(self):
   if not hasattr(self, '_precomputed_surface_samples_from_dodeca'):
     if not self.is_from_directory:
       raise ValueError('Precomputed surface samples are only'
                        ' available with a from_directory example.')
     if not os.path.isfile(self.precomputed_surface_samples_from_dodeca_path):
       raise ValueError('Dodeca surface samples have not been precomputed at '
                        f'{self.precomputed_surface_samples_from_dodeca_path}')
     full_samples = gaps_util.read_pts_file(
         self.precomputed_surface_samples_from_dodeca_path)
     orig_count = 100000
     assert full_samples.shape[0] == orig_count
     assert full_samples.shape[1] == 6
     assert full_samples.dtype == np.float32
     assert full_samples.shape[0] > 1
     while full_samples.shape[0] < self.surface_sample_count:
       log.verbose(f'Doubling samples from {full_samples.shape[0]} to'
                   f' {2*full_samples.shape[0]}')
       full_samples = np.tile(full_samples, [2, 1])
     self._precomputed_surface_samples_from_dodeca = (
         full_samples[np.random.choice(orig_count, self.surface_sample_count, replace=False), :])
   return self._precomputed_surface_samples_from_dodeca
コード例 #15
0
ファイル: extract_mesh.py プロジェクト: yang-zhifei/ldif
def marching_cubes(volume, mcubes_extent):
    """Maps from a voxel grid of implicit surface samples to a Trimesh mesh."""
    volume = np.squeeze(volume)
    length, height, width = volume.shape
    resolution = length
    # This function doesn't support non-cube volumes:
    assert resolution == height and resolution == width
    thresh = -0.07
    try:
        vertices, faces, normals, _ = measure.marching_cubes_lewiner(
            volume, thresh)
        del normals
        x, y, z = [np.array(x) for x in zip(*vertices)]
        xyzw = np.stack([x, y, z, np.ones_like(x)], axis=1)
        # Center the volume around the origin:
        xyzw += np.array(
            [[-resolution / 2.0, -resolution / 2.0, -resolution / 2.0, 0.]])
        # This assumes the world is right handed with y up; matplotlib's renderer
        # has z up and is left handed:
        # Reflect across z, rotate about x, and rescale to [-0.5, 0.5].
        xyzw *= np.array([[(2.0 * mcubes_extent) / resolution,
                           (2.0 * mcubes_extent) / resolution,
                           -1.0 * (2.0 * mcubes_extent) / resolution, 1]])
        y_up_to_z_up = np.array([[0., 0., -1., 0.], [0., 1., 0., 0.],
                                 [1., 0., 0., 0.], [0., 0., 0., 1.]])
        xyzw = np.matmul(y_up_to_z_up, xyzw.T).T
        faces = np.stack([faces[..., 0], faces[..., 2], faces[..., 1]],
                         axis=-1)
        world_space_xyz = np.copy(xyzw[:, :3])
        mesh = trimesh.Trimesh(vertices=world_space_xyz, faces=faces)
        log.verbose('Generated mesh successfully.')
        return True, mesh
    except (ValueError, RuntimeError) as e:
        log.warning(
            'Failed to extract mesh with error %s. Setting to unit sphere.' %
            repr(e))
        return False, trimesh.primitives.Sphere(radius=0.5)
コード例 #16
0
ファイル: train.py プロジェクト: Sumith1896/ldif
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')
    tf.disable_v2_behavior()
    log.set_level(FLAGS.log_level)

    log.info('Making dataset...')
    if not FLAGS.dataset_directory:
        raise ValueError('A dataset directory must be provided.')
    if not os.path.isdir(FLAGS.dataset_directory):
        raise ValueError(
            f'No dataset directory found at {FLAGS.dataset_directory}')
    # TODO(kgenova) This batch size should match.
    dataset = local_inputs.make_dataset(FLAGS.dataset_directory,
                                        mode='train',
                                        batch_size=FLAGS.batch_size,
                                        split=FLAGS.split)

    # Sets up the hyperparameters and tf.Dataset
    model_config = build_model_config(dataset)

    # Generates the graph for a single train step, including summaries
    shared_launcher.sif_transcoder(model_config)
    summary_op = tf.summary.merge_all()
    global_step_op = tf.compat.v1.train.get_global_step()

    saver = tf.train.Saver(max_to_keep=5,
                           pad_step_number=False,
                           save_relative_paths=True)

    init_op = tf.initialize_all_variables()

    model_root = get_model_root()

    experiment_dir = f'{model_root}/sif-transcoder-{FLAGS.experiment_name}'
    checkpoint_dir = f'{experiment_dir}/1-hparams/train/'

    if FLAGS.reserve_memory_for_inference_kernel and sys.platform != "darwin":
        current_free = gpu_util.get_free_gpu_memory(gpu_id_to_use)
        allowable = current_free - (1024 + 512)  # ~1GB
        allowable_fraction = allowable / current_free
        if allowable_fraction <= 0.0:
            raise ValueError(
                f"Can't leave 1GB over for the inference kernel, because"
                f" there is only {allowable} total free GPU memory.")
        log.info(
            f'TensorFlow can use up to {allowable_fraction*100}% of the total'
            ' GPU memory.')
    else:
        allowable_fraction = 1.0
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=allowable_fraction)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:
        writer = tf.summary.FileWriter(f'{experiment_dir}/log', session.graph)
        log.info('Initializing variables...')
        session.run([init_op])

        if FLAGS.visualize:
            visualize_data(session, model_config.inputs['dataset'])

        # Check whether the checkpoint directory already exists (resuming) or
        # needs to be created (new model).
        if not os.path.isdir(checkpoint_dir):
            log.info('No previous checkpoint detected, training from scratch.')
            os.makedirs(checkpoint_dir)
            # Serialize hparams so eval can load them:
            hparam_path = f'{checkpoint_dir}/hparam_pickle.txt'
            if not file_util.exists(hparam_path):
                hparams.write_hparams(model_config.hparams, hparam_path)
            initial_index = 0
        else:
            log.info(
                f'Checkpoint root {checkpoint_dir} exists, attempting to resume.'
            )
            latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
            log.info(f'Latest checkpoint: {latest_checkpoint}')
            saver.restore(session, latest_checkpoint)
            initial_index = session.run(global_step_op)
            log.info(f'The global step is {initial_index}')
            initial_index = int(initial_index)
            log.info(f'Parsed to {initial_index}')
        start_time = time.time()
        log_every = 10
        for i in range(initial_index, FLAGS.train_step_count):
            log.verbose(f'Starting step {i}...')
            is_summary_step = i % FLAGS.summary_step_interval == 0
            if is_summary_step:
                _, summaries, loss = session.run(
                    [model_config.train_op, summary_op, model_config.loss])
                writer.add_summary(summaries, i)
            else:
                _, loss = session.run(
                    [model_config.train_op, model_config.loss])
            if not (i % log_every):
                end_time = time.time()
                steps_per_second = float(log_every) / (end_time - start_time)
                start_time = end_time
                log.info(
                    f'Step: {i}\tLoss: {loss}\tSteps/second: {steps_per_second}'
                )

            is_checkpoint_step = i % FLAGS.checkpoint_interval == 0
            if is_checkpoint_step or i == FLAGS.train_step_count - 1:
                ckpt_path = os.path.join(checkpoint_dir, 'model.ckpt')
                log.info(f'Writing checkpoint to {ckpt_path}...')
                saver.save(session, ckpt_path, global_step=i)
        log.info('Done training!')
コード例 #17
0
ファイル: cnn.py プロジェクト: yang-zhifei/ldif
def early_fusion_cnn(observation, element_count, element_length, model_config):
  """A cnn that maps 1+ images with 1+ chanels to a feature vector."""
  inputs = observation.tensor
  if model_config.hparams.cnna == 'cnn':
    embedding = net_util.inputs_to_feature_vector(inputs, 1024, model_config)
  elif model_config.hparams.cnna in ['r18', 'r50', 'h50', 'k50', 's50']:
    batch_size, image_count, height, width, channel_count = (
        inputs.get_shape().as_list())
    log.verbose('Input shape to early-fusion cnn: %s' %
                str(inputs.get_shape().as_list()))
    if image_count == 1:
      im = tf.reshape(inputs, [batch_size, height, width, channel_count])
    else:
      im = tf.reshape(
          tf.transpose(inputs, perm=[0, 2, 3, 1, 4]),
          [batch_size, height, width, image_count * channel_count])
    if model_config.hparams.cnna == 'r18':
      raise ValueError('ResNet18 is no longer supported.')
    elif model_config.hparams.cnna == 'r50':
      raise ValueError('r50 network is no longer supported.')
    elif model_config.hparams.cnna == 'k50':
      log.warning('Using a keras based model.')
      resnet = tf.compat.v1.keras.applications.ResNet50V2(
          include_top=False,
          weights=None,
          input_tensor=None,
          input_shape=(height, width, image_count * channel_count),
          pooling=None)
      embedding = resnet(im)
    elif model_config.hparams.cnna == 's50':
      with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        embedding_prenorm, _ = contrib_slim_resnet_v2.resnet_v2_50(
            inputs=im,
            num_classes=2048,
            is_training=model_config.train,
            global_pool=True,
            reuse=tf.AUTO_REUSE,
            scope='resnet_v2_50')
        embedding_prenorm = tf.reshape(embedding_prenorm,
                                       [model_config.hparams.bs, 2048])
        embedding = tf.nn.l2_normalize(embedding_prenorm, axis=1)
    elif model_config.hparams.cnna == 'h50':
      log.warning('TF Hub not tested externally.')
      resnet = hub.Module(
          'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/1',
          trainable=True)
      expected_height, expected_width = hub.get_expected_image_size(resnet)
      if channel_count == 1:
        im = tf.tile(im, [1, 1, 1, 3])
      if height != expected_height or width != expected_width:
        raise ValueError(
            ('The input tensor has shape %s, but this tf.Hub()'
             ' r50 expects [%i, %i, 3].') %
            (repr(im.get_shape().as_list()), expected_height, expected_width))
      embedding = resnet(im)
    log.verbose('Embedding shape: %s' % repr(embedding.get_shape().as_list()))
    current_total_dimensionality = functools.reduce(
        lambda x, y: x * y,
        embedding.get_shape().as_list()[1:])
    embedding = tf.reshape(
        embedding, [model_config.hparams.bs, current_total_dimensionality])

  net = embedding
  normalizer, normalizer_params = net_util.get_normalizer_and_mode(model_config)
  for _ in range(2):
    net = contrib_layers.fully_connected(
        inputs=net,
        num_outputs=2048,
        activation_fn=tf.nn.leaky_relu,
        normalizer_fn=normalizer,
        normalizer_params=normalizer_params)
  prediction = contrib_layers.fully_connected(
      inputs=net,
      num_outputs=element_count * element_length,
      activation_fn=None,
      normalizer_fn=None)
  batch_size = inputs.get_shape().as_list()[0]
  prediction = tf.reshape(prediction,
                          [batch_size, element_count, element_length])
  return prediction, embedding
コード例 #18
0
def load_example_dict(example_directory, log_level=None):
    """Loads an example from disk and makes a str:numpy dictionary out of it."""
    if log_level:
        log.set_level(log_level)
    entry_t = time.time()
    start_t = entry_t  # Keep the function entry time around for a cumulative print.
    e = example.InferenceExample.from_directory(example_directory,
                                                verbose=False)
    end_t = time.time()
    log.verbose(f'Make example: {end_t - start_t}')
    start_t = end_t

    # The from_directory method should probably optionally take in a synset.
    bounding_box_samples = e.uniform_samples
    end_t = time.time()
    log.verbose(f'Bounding box: {end_t - start_t}')
    start_t = end_t
    # TODO(kgenova) There is a pitfall here where the depth is divided by 1000,
    # after this. So if some other depth images are provided, they would either
    # need to also be stored in the GAPS format or be artificially multiplied
    # by 1000.
    depth_renders = e.depth_images  # [20, 224, 224, 1]. 1 or 1000? trailing 1?
    assert depth_renders.shape[0] == 1
    depth_renders = depth_renders[0, ...]
    end_t = time.time()
    log.verbose(f'Depth renders: {end_t - start_t}')
    start_t = end_t

    mesh_name = e.mesh_name
    end_t = time.time()
    log.verbose(f'Mesh name: {end_t - start_t}')
    start_t = end_t

    log.verbose(f'Loading {mesh_name} from split {e.split}')
    near_surface_samples = e.near_surface_samples
    end_t = time.time()
    log.verbose(f'NSS: {end_t - start_t}')

    start_t = end_t
    grid = e.grid
    end_t = time.time()
    log.verbose(f'Grid: {end_t - start_t}')
    start_t = end_t

    world2grid = e.world2grid
    end_t = time.time()
    log.verbose(f'world2grid: {end_t - start_t}')
    start_t = end_t

    surface_point_samples = e.precomputed_surface_samples_from_dodeca
    end_t = time.time()
    log.verbose(f'surface points: {end_t - start_t}')
    log.verbose(f'load_example_dict total time: {end_t - entry_t}')
    return {
        'bounding_box_samples': bounding_box_samples,
        'depth_renders': depth_renders,
        'mesh_name': mesh_name,
        'near_surface_samples': near_surface_samples,
        'grid': grid,
        'world2grid': world2grid,
        'surface_point_samples': surface_point_samples,
    }
コード例 #19
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    log.set_level(FLAGS.log_level)
    tf.disable_v2_behavior()

    gpu_util.get_free_gpu_memory(0)
    if FLAGS.use_gpu_for_tensorflow and FLAGS.use_inference_kernel:
        log.info('Limiting TensorFlow memory by 1GB so the inference kernel'
                 ' has enough left over to run.')

    if not FLAGS.dataset_directory:
        raise ValueError('A dataset directory must be provided.')
    if not FLAGS.result_directory:
        if FLAGS.save_results or FLAGS.save_meshes or FLAGS.save_ldifs:
            raise ValueError(
                'A result directory must be provided to save results.')
    else:
        if not os.path.isdir(FLAGS.result_directory):
            os.makedirs(FLAGS.result_directory)
    if not FLAGS.use_gpu_for_tensorflow:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    log.info('Loading model...')
    # Try to detect the most common error early for a good warning message:
    if not os.path.isdir(get_model_root()):
        raise ValueError(
            f"Couldn't find a trained model at {get_model_root()}")
    encoder, decoder = load_newest_model()

    log.info('Evaluating metrics...')
    splits = [x for x in FLAGS.split.split(',') if x]
    log.info(f'Will evaluate on splits: {splits}')
    for split in splits:
        log.info(f'Starting evaluation for split {split}.')
        dataset_items = get_evaluation_directories(split)
        log.info(f'The split has {len(dataset_items)} elements.')
        results = []
        to_eval = filter_by_class(dataset_items)
        to_eval = filter_by_eval_frac(to_eval)
        for path in tqdm.tqdm(to_eval):
            e = examples.InferenceExample.from_directory(path)
            embedding = encoder.run_example(e)
            iou = decoder.iou(embedding, e)
            gt_mesh = e.gt_mesh
            mesh = decoder.extract_mesh(embedding, resolution=FLAGS.resolution)
            if FLAGS.visualize:
                # Visualize in the normalized_coordinate frame, so the camera is
                # always reasonable. Metrics are computed in the original frame.
                gaps_util.mshview([e.normalized_gt_mesh, mesh])

            # TODO(kgenova) gaps2occnet is poorly named, it is really normalized ->
            # unnormalized (where 'gaps' is the normalized training frame and 'occnet'
            # is whatever the original frame of the input mesh was)
            post_extract_start = time.time()
            mesh.apply_transform(e.gaps2occnet)

            if FLAGS.save_meshes:
                path = (f'{FLAGS.result_directory}/meshes/{split}/{e.cat}/'
                        f'{e.mesh_hash}.ply')
                if not os.path.isdir(os.path.dirname(path)):
                    os.makedirs(os.path.dirname(path))
                mesh.export(path)
            if FLAGS.save_ldifs:
                path = (f'{FLAGS.result_directory}/ldifs/{split}/{e.cat}/'
                        f'{e.mesh_hash}.txt')
                if not os.path.isdir(os.path.dirname(path)):
                    os.makedirs(os.path.dirname(path))
                decoder.savetxt(embedding, path)

            nc, fst, fs2t, chamfer = metrics.all_mesh_metrics(mesh, gt_mesh)
            log.verbose(f'Mesh: {e.mesh_name}')
            log.verbose(f'IoU: {iou}.')
            log.verbose(f'F-Score (tau): {fst}')
            log.verbose(f'Chamfer: {chamfer}')
            log.verbose(f'F-Score (2*tau): {fs2t}')
            log.verbose(f'Normal Consistency: {nc}')
            results.append({
                'key': e.mesh_name,
                'Normal Consistency': nc,
                'F-Score (tau)': fst,
                'F-Score (2*tau)': fs2t,
                'Chamfer': chamfer,
                'IoU': iou
            })
            post_extract_end = time.time()
            log.verbose(
                f'Time post extract: {post_extract_end - post_extract_start}')
        results = pd.DataFrame(results)
        if FLAGS.save_results:
            complete_csv = results.to_csv()
            result_path = f'{FLAGS.result_directory}/full_results_{split}.csv'
            file_util.writetxt(result_path, complete_csv)
        final_results = metrics.aggregate_extracted(results)
        if FLAGS.save_results:
            summary_out_path = f'{FLAGS.result_directory}/result_summary_{split}.csv'
            file_util.writetxt(summary_out_path, final_results.to_csv())