Exemplo n.º 1
0
def convert_results(results_path, out_csv_path, method):
    predictions = torch.load(results_path)['predictions']
    predictions = predictions[method]
    print("Predictions from:", results_path)
    print("Method:", method)
    print("Number of predictions: ", len(predictions))

    preds = []
    for n in tqdm(range(len(predictions))):
        TCO_n = predictions.poses[n]
        t = TCO_n[:3, -1] * 1e3  # m -> mm conversion
        R = TCO_n[:3, :3]
        row = predictions.infos.iloc[n]
        obj_id = int(row.label.split('_')[-1])
        score = row.score
        time = row.time
        pred = dict(scene_id=row.scene_id,
                    im_id=row.view_id,
                    obj_id=obj_id,
                    score=score,
                    t=t, R=R, time=time)
        preds.append(pred)
    print("Wrote:", out_csv_path)
    inout.save_bop_results(out_csv_path, preds)
    return out_csv_path
Exemplo n.º 2
0
    def evaluate(self, res_file, out_dir=None, renderer_type='python'):
        """Evaluates BOP metrics given a result file.

    Args:
      res_file: Path to the result file.
      out_dir: Path to the output directory.
      renderer_type: Renderer type. 'python' or 'cpp'.

    Returns:
      A dictionary holding the results.

    Raises:
      RuntimeError: If BOP evaluation failed.
    """
        if out_dir is None:
            out_dir = self._out_dir

        ests = inout.load_bop_results(res_file)
        ests = [self._convert_pose_to_bop(est) for est in ests]
        res_name = os.path.splitext(os.path.basename(res_file))[0]
        bop_res_name = 'bop-{}_{}-{}'.format(res_name.replace('_', '-'),
                                             self._setup, self._split)
        bop_res_file = os.path.join(out_dir, "{}.csv".format(bop_res_name))
        inout.save_bop_results(bop_res_file, ests)

        eval_cmd = [
            'python',
            os.path.join('scripts', 'eval_bop19.py'),
            '--renderer_type={}'.format(renderer_type),
            '--result_filenames={}'.format(bop_res_file),
            '--results_path={}'.format(out_dir),
            '--eval_path={}'.format(out_dir),
        ]
        cwd = "bop_toolkit"
        env = os.environ.copy()
        env['PYTHONPATH'] = "."
        env['BOP_PATH'] = self._bop_dir

        if subprocess.run(eval_cmd, cwd=cwd, env=env).returncode != 0:
            raise RuntimeError('BOP evaluation failed.')

        log_file = os.path.join(
            out_dir, "bop_eval_{}_{}.log".format(self._name, res_name))
        logger = get_logger(log_file)

        results = {}
        results['all'] = self._derive_bop_results(out_dir, bop_res_name, False,
                                                  logger)
        results['grasp_only'] = self._derive_bop_results(
            out_dir, bop_res_name, True, logger)

        logger.info('Evaluation complete.')

        return results
Exemplo n.º 3
0
def tc_to_csv(predictions, csv_path):
    preds = []
    for n in range(len(predictions)):
        TCO_n = predictions.poses[n]
        t = TCO_n[:3, -1] * 1e3  # m -> mm conversion
        R = TCO_n[:3, :3]
        row = predictions.infos.iloc[n]
        obj_id = int(row.label.split('_')[-1])
        score = row.score
        time = -1.0
        pred = dict(scene_id=row.scene_id,
                    im_id=row.view_id,
                    obj_id=obj_id,
                    score=score,
                    t=t,
                    R=R,
                    time=time)
        preds.append(pred)
    inout.save_bop_results(csv_path, preds)
Exemplo n.º 4
0
        time_spend =time.time()-t1 #ends time for the computation
        total_inst=0
        n_inst = np.sum(inst_counts)
    else:
        continue    
    
    for result_id in sorted_id:
        total_inst+=1
        if(task_type=='2' and total_inst>n_inst): #for vivo task
            break        
        obj_id = result_objid[result_id]
        R = result_R[result_id].flatten()
        t = (result_t[result_id]).flatten()
        score = result_score[result_id]
        obj_gt_no = obj_id_targets.index(obj_id)
        inst_count_est[obj_gt_no]+=1
        if(task_type=='2' and inst_count_est[obj_gt_no]>inst_counts[obj_gt_no]):
            #skip if the result exceeds the amount of taget instances for vivo task
            continue
        result_temp ={'scene_id':scene_id,'im_id': im_id,'obj_id':obj_id,'score':score,'R':R,'t':t,'time':time_spend }
        result_dataset.append(result_temp)

if(dataset=='tless'):
    output_path = os.path.join(output_dir,"pix2pose-iccv19_"+dataset+"-test-primesense.csv")
else:
    output_path = os.path.join(output_dir,"pix2pose-iccv19_"+dataset+"-test.csv")

print("Saving the result to ",output_path)
inout.save_bop_results(output_path,result_dataset)

Exemplo n.º 5
0
        aae_start = time.time()
        pose_est = mp_pose_estimator.process(det, img_masked, cam_K, mm=True)
        aae_time += (time.time() - aae_start)

        #pose refinement
        if pose_refiner_method:
            # cv2.imshow('depth_mask', depth_masked / depth_img.max())
            # cv2.imshow('depth_img', depth_img / depth_img.max())
            # cam_K[0,2] = depth_img.shape[1] / 2
            # cam_K[1,2] = depth_img.shape[0] / 2
            icp_start = time.time()
            pose_est = pose_refiner.process(pose_est,
                                            depth_img=depth_img,
                                            camK=cam_K,
                                            masks=inst_mask)
            icp_time += (time.time() - icp_start)

        if pose_est:
            img_bop_res.append(
                convert_rmc2bop(pose_est[0], det[0], scene_id, im_id))

        img_pose_ests += pose_est
        img_dets += det
        # except:
        #     print((im_id,'not found'))

res_path = os.path.join(
    result_folder, 'sundermeyer-{}_{}-{}.csv'.format(args.eval_name,
                                                     dataset_name, split))
inout.save_bop_results(res_path, bop_results)
Exemplo n.º 6
0
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Model folder.
  model_dir = os.path.join(config.TF_MODELS_PATH, FLAGS.model)

  # Update flags with parameters loaded from the model folder.
  common.update_flags(os.path.join(model_dir, common.PARAMS_FILENAME))

  # Print the flag values.
  common.print_flags()

  # Folder from which the latest model checkpoint will be loaded.
  checkpoint_dir = os.path.join(model_dir, 'train')

  # Folder for the inference output.
  infer_dir = os.path.join(model_dir, 'infer')
  tf.gfile.MakeDirs(infer_dir)

  # Folder for the visualization output.
  vis_dir = os.path.join(model_dir, 'vis')
  tf.gfile.MakeDirs(vis_dir)

  # TFRecord files used for training.
  tfrecord_names = FLAGS.infer_tfrecord_names
  if not isinstance(FLAGS.infer_tfrecord_names, list):
    tfrecord_names = [FLAGS.infer_tfrecord_names]

  # Stride of the final output.
  if FLAGS.upsample_logits:
    # The stride is 1 if the logits are upsampled to the input resolution.
    output_stride = 1
  else:
    assert (len(FLAGS.decoder_output_stride) == 1)
    output_stride = FLAGS.decoder_output_stride[0]

  with tf.Graph().as_default():

    return_gt_orig = np.any([
      FLAGS.task_type == common.LOCALIZATION,
      FLAGS.vis_gt_poses])

    return_gt_maps = np.any([
      FLAGS.vis_pred_obj_labels,
      FLAGS.vis_pred_obj_confs,
      FLAGS.vis_pred_frag_fields])

    # Dataset provider.
    dataset = datagen.Dataset(
      dataset_name=FLAGS.dataset,
      tfrecord_names=tfrecord_names,
      model_dir=model_dir,
      model_variant=FLAGS.model_variant,
      batch_size=1,
      max_height_before_crop=FLAGS.infer_max_height_before_crop,
      crop_size=list(map(int, FLAGS.infer_crop_size)),
      num_frags=FLAGS.num_frags,
      min_visib_fract=None,
      gt_knn_frags=1,
      output_stride=output_stride,
      is_training=False,
      return_gt_orig=return_gt_orig,
      return_gt_maps=return_gt_maps,
      should_shuffle=False,
      should_repeat=False,
      prepare_for_projection=FLAGS.project_to_surface,
      data_augmentations=None)

    # Initialize a renderer for visualization.
    renderer = None
    if FLAGS.vis_gt_poses or FLAGS.vis_pred_poses:
      tf.logging.info('Initializing renderer for visualization...')

      renderer = bop_renderer.Renderer()
      renderer.init(dataset.crop_size[0], dataset.crop_size[1])

      model_type_vis = 'eval'
      dp_model = dataset_params.get_model_params(
        config.BOP_PATH, dataset.dataset_name, model_type=model_type_vis)
      for obj_id in dp_model['obj_ids']:
        path = dp_model['model_tpath'].format(obj_id=obj_id)
        renderer.add_object(obj_id, path)

      tf.logging.info('Renderer initialized.')

    # Inputs.
    samples = dataset.get_one_shot_iterator().get_next()

    # A map from output type to the number of associated channels.
    outputs_to_num_channels = common.get_outputs_to_num_channels(
      dataset.num_objs, dataset.model_store.num_frags)

    # Options of the neural network model.
    model_options = common.ModelOptions(
        outputs_to_num_channels=outputs_to_num_channels,
        crop_size=list(map(int, FLAGS.infer_crop_size)),
        atrous_rates=FLAGS.atrous_rates,
        encoder_output_stride=FLAGS.encoder_output_stride)

    # Construct the inference graph.
    predictions = model.predict(
        images=samples[common.IMAGE],
        model_options=model_options,
        upsample_logits=FLAGS.upsample_logits,
        image_pyramid=FLAGS.image_pyramid,
        num_objs=dataset.num_objs,
        num_frags=dataset.num_frags,
        frag_cls_agnostic=FLAGS.frag_cls_agnostic,
        frag_loc_agnostic=FLAGS.frag_loc_agnostic)

    # Global step.
    tf.train.get_or_create_global_step()

    # Get path to the model checkpoint.
    if FLAGS.checkpoint_name is None:
      checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    else:
      checkpoint_path = os.path.join(checkpoint_dir, FLAGS.checkpoint_name)

    time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
    tf.logging.info('Starting inference at: {}'.format(time_str))
    tf.logging.info('Inference with model: {}'.format(checkpoint_path))

    # Scaffold for initialization.
    scaffold = tf.train.Scaffold(
      init_op=tf.global_variables_initializer(),
      saver=tf.train.Saver(var_list=misc.get_variable_dict()))

    # TensorFlow configuration.
    if FLAGS.cpu_only:
      tf_config = tf.ConfigProto(device_count={'GPU': 0})
    else:
      tf_config = tf.ConfigProto()
      # tf_config.gpu_options.allow_growth = True  # Only necessary GPU memory.
      tf_config.gpu_options.allow_growth = False

    # Nodes that can use multiple threads to parallelize their execution will
    # schedule the individual pieces into this pool.
    tf_config.intra_op_parallelism_threads = 10

    # All ready nodes are scheduled in this pool.
    tf_config.inter_op_parallelism_threads = 10

    poses_all = []
    first_im_poses_num = 0

    session_creator = tf.train.ChiefSessionCreator(
        config=tf_config,
        scaffold=scaffold,
        master=FLAGS.master,
        checkpoint_filename_with_path=checkpoint_path)
    with tf.train.MonitoredSession(
          session_creator=session_creator, hooks=None) as sess:

      im_ind = 0
      while not sess.should_stop():

        # Estimate object poses for the current image.
        poses, run_times = process_image(
            sess=sess,
            samples=samples,
            predictions=predictions,
            im_ind=im_ind,
            crop_size=dataset.crop_size,
            output_scale=(1.0 / output_stride),
            model_store=dataset.model_store,
            renderer=renderer,
            task_type=FLAGS.task_type,
            infer_name=FLAGS.infer_name,
            infer_dir=infer_dir,
            vis_dir=vis_dir)

        # Note that the first image takes longer time (because of TF init).
        tf.logging.info(
          'Image: {}, prediction: {:.3f}, establish_corr: {:.3f}, '
          'fitting: {:.3f}, total time: {:.3f}'.format(
            im_ind, run_times['prediction'], run_times['establish_corr'],
            run_times['fitting'], run_times['total']))

        poses_all += poses
        if im_ind == 0:
          first_im_poses_num = len(poses)
        im_ind += 1

    # Set the time of pose estimates from the first image to the average time.
    # Tensorflow takes a long time on the first image (because of init).
    time_avg = 0.0
    for pose in poses_all:
      time_avg += pose['time']
    if len(poses_all) > 0:
      time_avg /= float((len(poses_all)))
    for i in range(first_im_poses_num):
      poses_all[i]['time'] = time_avg

    # Save the estimated poses in the BOP format:
    # https://bop.felk.cvut.cz/challenges/bop-challenge-2020/#formatofresults
    if FLAGS.save_estimates:
      suffix = ''
      if FLAGS.infer_name is not None:
        suffix = '_{}'.format(FLAGS.infer_name)
      poses_path = os.path.join(
        infer_dir, 'estimated-poses{}.csv'.format(suffix))
      tf.logging.info('Saving estimated poses to: {}'.format(poses_path))
      inout.save_bop_results(poses_path, poses_all, version='bop19')

    time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
    tf.logging.info('Finished inference at: {}'.format(time_str))