def save_errors(_error_sign, _scene_errs): # Save the calculated errors to a JSON file. errors_path = p['out_errors_tpath'].format(result_name=result_name, error_sign=_error_sign, scene_id=scene_id) misc.ensure_dir(os.path.dirname(errors_path)) misc.log('Saving errors to: {}'.format(errors_path)) inout.save_json(errors_path, _scene_errs)
misc.log('Recall scores: {}'.format(' '.join(map(str, recalls)))) misc.log('Average recall: {}'.format(average_recalls[error['type']])) time_total = time.time() - time_start misc.log('Evaluation of {} took {}s.'.format(result_filename, time_total)) # Calculate the final scores. final_scores = {} for error in p['errors']: final_scores['bop19_average_recall_{}'.format(error['type'])] =\ average_recalls[error['type']] # Final score for the given dataset. final_scores['bop19_average_recall'] = np.mean([ average_recalls['vsd'], average_recalls['mssd'], average_recalls['mspd']]) # Average estimation time per image. final_scores['bop19_average_time_per_image'] = average_time_per_image # Save the final scores. final_scores_path = os.path.join( p['eval_path'], result_name, 'scores_bop19.json') inout.save_json(final_scores_path, final_scores) # Print the final scores. misc.log('FINAL SCORES:') for score_name, score_value in final_scores.items(): misc.log('- {}: {}'.format(score_name, score_value)) misc.log('Done.')
}) # Visualization of the visibility mask. if p['vis_visibility_masks']: depth_im_vis = visualization.depth_for_vis(depth, 0.2, 1.0) depth_im_vis = np.dstack([depth_im_vis] * 3) visib_gt_vis = visib_gt.astype(np.float) zero_ch = np.zeros(visib_gt_vis.shape) visib_gt_vis = np.dstack([zero_ch, visib_gt_vis, zero_ch]) vis = 0.5 * depth_im_vis + 0.5 * visib_gt_vis vis[vis > 1] = 1 vis_path = p['vis_mask_visib_tpath'].format( delta=p['delta'], dataset=p['dataset'], split=p['dataset_split'], scene_id=scene_id, im_id=im_id, gt_id=gt_id) misc.ensure_dir(os.path.dirname(vis_path)) inout.save_im(vis_path, vis) # Save the info for the current scene. scene_gt_info_path = dp_split['scene_gt_info_tpath'].format( scene_id=scene_id) misc.ensure_dir(os.path.dirname(scene_gt_info_path)) inout.save_json(scene_gt_info_path, scene_gt_info)
for gt_id in err['errors'].keys(): err['errors'][gt_id] = [factor * e for e in err['errors'][gt_id]] # Match the estimated poses to the ground-truth poses. matches += pose_matching.match_poses_scene( scene_id, scene_gt_curr, scene_gt_valid, scene_errs, p['correct_th'][err_type], n_top) # Calculate the performance scores. # ---------------------------------------------------------------------------- # 6D object localization scores (SiSo if n_top = 1). scores = score.calc_localization_scores( dp_split['scene_ids'], dp_model['obj_ids'], matches, n_top) # Save scores. scores_path = p['out_scores_tpath'].format( eval_path=p['eval_path'], error_dir_path=error_dir_path, score_sign=score_sign) inout.save_json(scores_path, scores) # Save matches. matches_path = p['out_matches_tpath'].format( eval_path=p['eval_path'], error_dir_path=error_dir_path, score_sign=score_sign) inout.save_json(matches_path, matches) time_total = time.time() - time_start misc.log('Matching and score calculation took {}s.'.format(time_total)) misc.log('Done.')
for m_id, model_ply in enumerate(model_plys): model_id = model_ids[m_id] m_info = model_info['{}'.format(model_id)] keys = m_info.keys() sym_continous = [0, 0, 0, 0, 0, 0] center_x = center_y = center_z = True if ('symmetries_discrete' in keys): center_x = center_y = center_z = False print("keep origins of the object when it has symmetric poses") fn_read = model_ply fname = model_ply.split("/")[-1] obj_id = int(fname[4:-4]) fn_write = bop_dir + "/models_xyz/" + fname x_abs, y_abs, z_abs, x_ct, y_ct, z_ct = convert_unique(fn_read, fn_write, center_x=center_x, center_y=center_y, center_z=center_z) print(obj_id, x_abs, y_abs, z_abs, x_ct, y_ct, z_ct) param[int(obj_id)] = { 'x_scale': float(x_abs), 'y_scale': float(y_abs), 'z_scale': float(z_abs), 'x_ct': float(x_ct), 'y_ct': float(y_ct), 'z_ct': float(z_ct) } inout.save_json(norm_factor, param)
# Load dataset parameters. dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'], p['model_type']) models_info = {} for obj_id in dp_model['obj_ids']: misc.log('Processing model of object {}...'.format(obj_id)) model = inout.load_ply(dp_model['model_tpath'].format(obj_id=obj_id)) # Calculate 3D bounding box. ref_pt = map(float, model['pts'].min(axis=0).flatten()) size = map(float, (model['pts'].max(axis=0) - ref_pt).flatten()) # Calculated diameter. diameter = misc.calc_pts_diameter(model['pts']) models_info[obj_id] = { 'min_x': ref_pt[0], 'min_y': ref_pt[1], 'min_z': ref_pt[2], 'size_x': size[0], 'size_y': size[1], 'size_z': size[2], 'diameter': diameter } # Save the calculated info about the object models. inout.save_json(dp_model['models_info_path'], models_info)
'train') dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'], p['model_type']) models_info = {} for obj_id in dp_model['obj_ids']: misc.log('Processing model of object {}...'.format(obj_id)) model = inout.load_ply(dp_model['model_tpath'].format(obj_id=obj_id)) # Calculate 3D bounding box. ref_pt = map(float, model['pts'].min(axis=0).flatten()) size = map(float, (model['pts'].max(axis=0) - ref_pt).flatten()) # Calculated diameter. diameter = misc.calc_pts_diameter(model['pts']) models_info[obj_id] = { 'min_x': ref_pt[0], 'min_y': ref_pt[1], 'min_z': ref_pt[2], 'size_x': size[0], 'size_y': size[1], 'size_z': size[2], 'diameter': diameter } # Save the calculated info about the object models. inout.save_json(dp_split['models_info_path'], models_info)