def collect_printer_info(self): pdb = PrinterModelDB(self.cls_name) database = [] modeldb = LineModModelDB() for k in range(pdb.image_num): data = {} data['rgb_pth'] = pdb.image_pattern.format(k + 1) data['dpt_pth'] = pdb.mask_pattern.format(k + 1) data['RT'] = pdb.aligned_poses[k] data['K'] = pdb.K[self.cls_name] data['cls_typ'] = self.cls_name data['rnd_typ'] = 'printer' data['corners'] = Projector.project_K( modeldb.get_corners_3d(self.cls_name), data['RT'], pdb.K[self.cls_name]) data['farthest'] = Projector.project_K( modeldb.get_farthest_3d(self.cls_name), data['RT'], pdb.K[self.cls_name]) for num in [4, 12, 16, 20]: data['farthest{}'.format(num)] = Projector.project_K( modeldb.get_farthest_3d(self.cls_name, num), data['RT'], pdb.K[self.cls_name]) data['center'] = Projector.project_K( modeldb.get_centers_3d(self.cls_name)[None, :], data['RT'], pdb.K[self.cls_name]) database.append(data) save_pickle(database, self.printer_pkl) return database
def collect_truncated_set_info(self): database=[] projector=Projector() modeldb=LineModModelDB() img_num=len(os.listdir(os.path.join(self.linemod_dir,self.cls_name,'JPEGImages'))) for k in range(img_num): data={} data['rgb_pth']=os.path.join('truncated',self.cls_name,'{:06}_rgb.jpg'.format(k)) data['dpt_pth']=os.path.join('truncated',self.cls_name,'{:04}_msk.png'.format(k)) pose,K=read_pickle(os.path.join(self.linemod_dir,'truncated',self.cls_name,'{:06}_info.pkl'.format(k))) data['RT']=pose data['cls_typ']=self.cls_name data['rnd_typ']='truncated' data['corners']=projector.project_K(modeldb.get_corners_3d(self.cls_name),data['RT'],K) data['farthest']=projector.project_K(modeldb.get_farthest_3d(self.cls_name),data['RT'],K) for num in [4,12,16,20]: data['farthest{}'.format(num)]=projector.project_K(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],K) data['small_bbox'] = projector.project_K(modeldb.get_small_bbox(self.cls_name), data['RT'], K) data['center']=projector.project_K(modeldb.get_centers_3d(self.cls_name)[None,:],data['RT'],K) # axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) # data['van_pts']=projector.project_h(axis_direct, data['RT'], K) data['K']=K database.append(data) save_pickle(database,self.pkl) return database
def collect_real_set_info(self): database=[] projector=Projector() modeldb=LineModModelDB() transformer=PoseTransformer(class_type=self.cls_name) img_num=len(os.listdir(os.path.join(self.linemod_dir,self.rgb_dir))) print(img_num) for k in range(img_num): data={} data['rgb_pth']=os.path.join(self.rgb_dir,'color_{:05}.png'.format(k)) data['dpt_pth']=os.path.join(self.mask_dir,'{}.png'.format(k)) pose=self.read_pose(os.path.join(self.rt_dir,'info_{:05}.txt'.format(k))) if len(pose)==0: # os.system('cp {} ./{:05}.png'.format(os.path.join(cfg.OCCLUSION_LINEMOD,data['rgb_pth']),k)) continue data['RT']=transformer.occlusion_pose_to_blender_pose(pose) data['cls_typ']=self.cls_name data['rnd_typ']='real' data['corners']=projector.project(modeldb.get_corners_3d(self.cls_name),data['RT'],'linemod') data['farthest']=projector.project(modeldb.get_farthest_3d(self.cls_name),data['RT'],'linemod') for num in [4,12,16,20]: data['farthest{}'.format(num)]=projector.project(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],'linemod') data['center']=projector.project(modeldb.get_centers_3d(self.cls_name)[None,:],data['RT'],'linemod') data['small_bbox'] = projector.project(modeldb.get_small_bbox(self.cls_name), data['RT'], 'linemod') axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) data['van_pts']=projector.project_h(axis_direct, data['RT'], 'blender') database.append(data) save_pickle(database,self.real_pkl) return database
def collect_ms_info(self): database=[] projector=Projector() model_db=LineModModelDB() for k in range(self.ms_num): data=dict() data['rgb_pth']=os.path.join(self.ms_dir, '{}.jpg'.format(k)) data['dpt_pth']=os.path.join(self.ms_dir, '{}_{}_mask.png'.format(k,self.cls_name)) # if too few foreground pts then continue mask=imread(os.path.join(self.linemod_dir,data['dpt_pth'])) if np.sum(mask)<5: continue data['RT'] = read_pickle(os.path.join(self.linemod_dir, self.ms_dir, '{}_{}_RT.pkl'.format(self.cls_name,k)))['RT'] data['cls_typ']=self.cls_name data['rnd_typ']='render_multi' data['corners']=projector.project(model_db.get_corners_3d(self.cls_name),data['RT'],'blender') data['farthest']=projector.project(model_db.get_farthest_3d(self.cls_name),data['RT'],'blender') for num in [4,12,16,20]: data['farthest{}'.format(num)]=projector.project(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],'blender') data['center']=projector.project(model_db.get_centers_3d(self.cls_name)[None,:],data['RT'],'blender') data['small_bbox'] = projector.project(modeldb.get_small_bbox(self.cls_name), data['RT'], 'blender') axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) data['van_pts']=projector.project_h(axis_direct, data['RT'], 'blender') database.append(data) save_pickle(database,self.ms_pkl) return database
def collect_fuse_info(self): database=[] modeldb=LineModModelDB() projector=Projector() for k in range(self.fuse_num): data=dict() data['rgb_pth']=os.path.join(self.fuse_dir, '{}_rgb.jpg'.format(k)) data['dpt_pth']=os.path.join(self.fuse_dir, '{}_mask.png'.format(k)) # if too few foreground pts then continue mask=imread(os.path.join(self.linemod_dir,data['dpt_pth'])) if np.sum(mask==(cfg.linemod_cls_names.index(self.cls_name)+1))<400: continue data['cls_typ']=self.cls_name data['rnd_typ']='fuse' begins,poses=read_pickle(os.path.join(self.linemod_dir,self.fuse_dir,'{}_info.pkl'.format(k))) data['RT'] = poses[self.cls_idx] K=projector.intrinsic_matrix['linemod'].copy() K[0,2]+=begins[self.cls_idx,1] K[1,2]+=begins[self.cls_idx,0] data['K']=K data['corners']=projector.project_K(modeldb.get_corners_3d(self.cls_name),data['RT'],K) data['center']=projector.project_K(modeldb.get_centers_3d(self.cls_name),data['RT'],K) data['farthest']=projector.project_K(modeldb.get_farthest_3d(self.cls_name),data['RT'],K) for num in [4,12,16,20]: data['farthest{}'.format(num)]=projector.project_K(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],K) data['small_bbox'] = projector.project_K(modeldb.get_small_bbox(self.cls_name), data['RT'], K) database.append(data) save_pickle(database,self.fuse_pkl) return database
def collect_real_set_info(self): # linemod standard database=[] projector=Projector() modeldb=LineModModelDB() img_num=len(os.listdir(os.path.join(self.linemod_dir,self.rgb_dir))) for k in range(img_num): data={} data['rgb_pth']=os.path.join(self.rgb_dir, '{:06}.jpg'.format(k)) data['dpt_pth']=os.path.join(self.mask_dir, '{:04}.png'.format(k)) pose=read_pose(os.path.join(self.rt_dir, 'rot{}.rot'.format(k)), os.path.join(self.rt_dir, 'tra{}.tra'.format(k))) pose_transformer = PoseTransformer(class_type=self.cls_name) data['RT'] = pose_transformer.orig_pose_to_blender_pose(pose).astype(np.float32) data['cls_typ']=self.cls_name data['rnd_typ']='real' data['corners']=projector.project(modeldb.get_corners_3d(self.cls_name),data['RT'],'linemod') data['farthest']=projector.project(modeldb.get_farthest_3d(self.cls_name),data['RT'],'linemod') for num in [4,12,16,20]: data['farthest{}'.format(num)]=projector.project(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],'linemod') data['center']=projector.project(modeldb.get_centers_3d(self.cls_name)[None, :],data['RT'],'linemod') data['small_bbox'] = projector.project(modeldb.get_small_bbox(self.cls_name), data['RT'], 'linemod') axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) data['van_pts']=projector.project_h(axis_direct, data['RT'], 'linemod') database.append(data) save_pickle(database,self.set_pkl) return database
def make_truncated_linemod_dataset(): for cls_name in cfg.linemod_cls_names: print(cls_name) linemod_dir = cfg.LINEMOD rgb_dir = '{}/JPEGImages'.format(cls_name) mask_dir = '{}/mask'.format(cls_name) rt_dir = os.path.join(cfg.DATA_DIR, 'LINEMOD_ORIG', cls_name, 'data') if not os.path.exists( os.path.join(linemod_dir, 'truncated', cls_name)): os.mkdir(os.path.join(linemod_dir, 'truncated', cls_name)) projector = Projector() img_num = len(os.listdir(os.path.join(linemod_dir, rgb_dir))) print(img_num) for k in range(img_num): rgb = imread( os.path.join(linemod_dir, rgb_dir, '{:06}.jpg'.format(k))) msk = imread( os.path.join(linemod_dir, mask_dir, '{:04}.png'.format(k))) msk = (np.sum(msk, 2) > 0).astype(np.uint8) before = np.sum(msk) count = 0 while True: rgb_new, msk_new, hbeg, wbeg = LineModImageDB.crop_instance( rgb, msk, 256) after = np.sum(msk_new) count += 1 if after / before >= 0.2 or count > 50: rgb, msk = rgb_new, msk_new break imsave( os.path.join(linemod_dir, 'truncated', cls_name, '{:06}_rgb.jpg'.format(k)), rgb) imsave( os.path.join(linemod_dir, 'truncated', cls_name, '{:04}_msk.png'.format(k)), msk) pose = read_pose(os.path.join(rt_dir, 'rot{}.rot'.format(k)), os.path.join(rt_dir, 'tra{}.tra'.format(k))) pose_transformer = PoseTransformer(class_type=cls_name) pose = pose_transformer.orig_pose_to_blender_pose(pose).astype( np.float32) K = projector.intrinsic_matrix['linemod'].copy() K[0, 2] += wbeg K[1, 2] += hbeg save_pickle([pose, K], os.path.join(linemod_dir, 'truncated', cls_name, '{:06}_info.pkl'.format(k))) if k % 500 == 0: print(k)
def fuse(): W, H = 720, 540 noofobjects = 30 if not os.path.exists(output_rgb_dir): os.makedirs(output_rgb_dir) if not os.path.exists(output_mask_dir): os.makedirs(output_mask_dir) noofimages = { i + 1: len(glob.glob(os.path.join(tless_dir, str(i + 1), '*.pkl'))) for i in range(noofobjects) } obj_info = [] for i in tqdm.tqdm(range(num_train_imgs)): train_img = np.zeros((H, W, 3), dtype=np.uint8) train_mask = np.zeros((H, W), dtype=np.uint8) instance_id = 0 ann = [] for k in range(max_objects_in_scene): obj_id = np.random.randint(0, noofobjects) + 1 img_id = np.random.randint(0, noofimages[obj_id]) img_path = os.path.join(tless_dir, str(obj_id), str(img_id) + '.png') dpt_path = os.path.join(tless_dir, str(obj_id), str(img_id) + '_depth.png') rand_img = cv2.imread(img_path) dpt = np.array(Image.open(dpt_path)) mask = (dpt != 65535).astype(np.uint8) instance_id += 1 cut_and_paste(rand_img, mask, train_img, train_mask, instance_id) ann.append([obj_id, instance_id]) new_img = cv2.resize( cv2.imread(bg_paths[np.random.randint(0, len(bg_paths))]), (W, H)) fg_mask = train_mask != 0 new_img[fg_mask] = train_img[fg_mask] img_path = os.path.join(output_rgb_dir, str(i) + '.png') mask_path = os.path.join(output_mask_dir, str(i) + '.png') obj_info.append({ 'img_path': img_path, 'mask_path': mask_path, 'ann': ann }) cv2.imwrite(img_path, new_img) cv2.imwrite(mask_path, train_mask) base_utils.save_pickle(obj_info, os.path.join(output_dir, 'obj_info.pkl'))
def _handle_render_train_symmetry_pose(obj_id): data_root = 'data/tless' render_dir = os.path.join(data_root, 'renders', str(obj_id)) ann_num = len(glob.glob(os.path.join(render_dir, '*.pkl'))) for ind in tqdm.tqdm(range(ann_num)): pkl_path = os.path.join(render_dir, '{}_RT.pkl'.format(ind)) K_P = read_pickle(pkl_path) pose = K_P['RT'] symmetry_R = symmetry_utils.TLESS_rectify(obj_id, pose[:, :3]) K_P['s_RT'] = np.concatenate([symmetry_R, pose[:, 3:]], axis=1) save_pickle(K_P, pkl_path)
def collect_real_set_info(self): ''' pvnet 的数据集linemod有做更改, cat.ply 与原始的linemod中的cat.ply,有模型偏移和旋转, 所以 原始数据集中的pose真值,需要 简单变换,就是这边数据集的 pose 会这样问了,既然图片数据集 这是没有改变的,怎么RT改变呢 因为 这边提的3d特征是 新的model上取的,所以计算RT的时候, RT要变的 pose_real.pkl ''' database = [] projector = Projector() modeldb = LineModModelDB() img_num = len(os.listdir(os.path.join(self.linemod_dir, self.rgb_dir))) for k in range(img_num): data = {} data['rgb_pth'] = os.path.join(self.rgb_dir, '{:06}.jpg'.format(k)) data['dpt_pth'] = os.path.join(self.mask_dir, '{:04}.png'.format(k)) pose = read_pose(os.path.join(self.rt_dir, 'rot{}.rot'.format(k)), os.path.join(self.rt_dir, 'tra{}.tra'.format(k))) pose_transformer = PoseTransformer(class_type=self.cls_name) data['RT'] = pose_transformer.orig_pose_to_blender_pose( pose).astype(np.float32) data['cls_typ'] = self.cls_name data['rnd_typ'] = 'real' data['corners'] = projector.project( modeldb.get_corners_3d(self.cls_name), data['RT'], 'linemod') data['farthest'] = projector.project( modeldb.get_farthest_3d(self.cls_name), data['RT'], 'linemod') for num in [4, 12, 16, 20]: data['farthest{}'.format(num)] = projector.project( modeldb.get_farthest_3d(self.cls_name, num), data['RT'], 'linemod') data['center'] = projector.project( modeldb.get_centers_3d(self.cls_name)[None, :], data['RT'], 'linemod') data['small_bbox'] = projector.project( modeldb.get_small_bbox(self.cls_name), data['RT'], 'linemod') axis_direct = np.concatenate( [np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) data['van_pts'] = projector.project_h(axis_direct, data['RT'], 'linemod') database.append(data) save_pickle(database, self.real_pkl) return database
def get_plane_height(self): if os.path.exists(self.plane_height_path): plane_height = read_pickle(self.plane_height_path) else: plane_height = {} if self.class_type in plane_height: return plane_height[self.class_type] else: pose_transformer = PoseTransformer(self.class_type) model = pose_transformer.get_blender_model() height = np.min(model[:, -1]) plane_height[self.class_type] = height save_pickle(plane_height, self.plane_height_path) return height
def get_rendering_model(data_root): rendering_model_path = os.path.join(data_root, 'rendering_model.pkl') if os.path.exists(rendering_model_path): return base_utils.read_pickle(rendering_model_path) cad_path_pattern = os.path.join(data_root, 'obj_{:03}.ply') obj_ids = [i + 1 for i in range(30)] models = {} for obj_id in tqdm.tqdm(obj_ids): cad_path = cad_path_pattern.format(obj_id) model = opengl_renderer.load_ply(cad_path) models.update({obj_id: model}) base_utils.save_pickle(models, rendering_model_path) return models
def get_mask(): data_root = 'data/tless/test_primesense' scene_ids = [i + 1 for i in range(20)] models = get_rendering_model('data/tless/models_cad') for scene_id in tqdm.tqdm(scene_ids): scene_dir = os.path.join(data_root, '{:02}'.format(scene_id)) rgb_dir = os.path.join(scene_dir, 'rgb') mask_dir = os.path.join(scene_dir, 'mask') os.system('mkdir -p {}'.format(mask_dir)) rgb_paths = glob.glob(os.path.join(rgb_dir, '*.png')) gt = yaml.load(open(os.path.join(scene_dir, 'gt.yml'))) K_info = yaml.load(open(os.path.join(scene_dir, 'info.yml'))) a_pixel_num_dict = {} for rgb_path in tqdm.tqdm(rgb_paths): img_id = int(os.path.basename(rgb_path).replace('.png', '')) gt_ = gt[img_id] w, h = Image.open(rgb_path).size K = K_info[img_id]['cam_K'] K = np.array(K).reshape(3, 3) mask_map = np.zeros(shape=[h, w], dtype=np.int16) depth_map = 10 * np.ones(shape=[h, w], dtype=np.float32) a_pixel_nums = [] for instance_id, instance_gt in enumerate(gt_): R = np.array(instance_gt['cam_R_m2c']).reshape(3, 3) t = np.array(instance_gt['cam_t_m2c']) * 0.001 pose = np.concatenate([R, t[:, None]], axis=1) obj_id = instance_gt['obj_id'] depth = opengl_renderer.render(models[obj_id], pose, K, w, h) update_mask(depth, w, mask_map, depth_map, obj_id, instance_id) a_pixel_nums.append(np.sum(depth != 0)) mask_path = rgb_path.replace('rgb', 'mask') Image.fromarray(mask_map).save(mask_path, 'PNG') a_pixel_num_dict.update({img_id: a_pixel_nums}) base_utils.save_pickle(a_pixel_num_dict, os.path.join(scene_dir, 'pixel_num.pkl'))
def collect_render_set_info(self,pkl_file,render_dir,format='jpg'): database=[] # blender standard projector=Projector() modeldb=LineModModelDB() for k in range(self.render_num): data={} data['rgb_pth']=os.path.join(render_dir,'{}.{}'.format(k,format)) data['RT']=read_pickle(os.path.join(self.linemod_dir,render_dir,'{}_RT.pkl'.format(k)))['RT'] data['cls_typ']=self.cls_name data['rnd_typ']='render' data['corners']=projector.project(modeldb.get_corners_3d(self.cls_name),data['RT'],'blender') data['farthest']=projector.project(modeldb.get_farthest_3d(self.cls_name),data['RT'],'blender') data['center']=projector.project(modeldb.get_centers_3d(self.cls_name)[None,:],data['RT'],'blender') for num in [4,12,16,20]: data['farthest{}'.format(num)]=projector.project(modeldb.get_farthest_3d(self.cls_name,num),data['RT'],'blender') data['small_bbox'] = projector.project(modeldb.get_small_bbox(self.cls_name), data['RT'], 'blender') axis_direct=np.concatenate([np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) data['van_pts']=projector.project_h(axis_direct, data['RT'], 'blender') database.append(data) save_pickle(database,pkl_file) return database
def collect_val_render(self, pkl_file, render_dir, format='jpg'): database = [] projector = Projector() modeldb = HomemadeModelDB() for k in range(3482, 3499): data = {} print(os.path.join(self.render_val_dir, '{}.{}'.format(k, format))) data['rgb_pth'] = os.path.join(self.render_val_dir, '{}.{}'.format(k, format)) data['dpt_pth'] = os.path.join(self.render_val_dir, '{}_depth.png'.format(k)) data['RT'] = read_pickle( os.path.join(self.homemade_dir, self.render_val_dir, '{}_RT.pkl'.format(k)))['RT'] data['cls_typ'] = self.cls_name data['rnd_typ'] = 'render' data['corners'] = projector.project( modeldb.get_corners_3d(self.cls_name), data['RT'], 'blender') data['farthest'] = projector.project( modeldb.get_farthest_3d(self.cls_name), data['RT'], 'blender') data['center'] = projector.project( modeldb.get_centers_3d(self.cls_name)[None, :], data['RT'], 'blender') for num in [4, 12, 16, 20]: data['farthest{}'.format(num)] = projector.project( modeldb.get_farthest_3d(self.cls_name, num), data['RT'], 'blender') data['small_bbox'] = projector.project( modeldb.get_small_bbox(self.cls_name), data['RT'], 'blender') axis_direct = np.concatenate( [np.identity(3), np.zeros([3, 1])], 1).astype(np.float32) data['van_pts'] = projector.project_h(axis_direct, data['RT'], 'blender') database.append(data) print("collectval successful?: length = ", len(database)) save_pickle(database, pkl_file) return database
def val(net, dataloader, epoch, val_prefix='val', use_camera_intrinsic=False, use_motion=False): for rec in recs: rec.reset() test_begin = time.time() evaluator = Evaluator() eval_net = DataParallel( EvalWrapper().cuda()) if not use_motion else DataParallel( MotionEvalWrapper().cuda()) uncertain_eval_net = DataParallel(UncertaintyEvalWrapper().cuda()) net.eval() for idx, data in enumerate(dataloader): if use_camera_intrinsic: image, mask, vertex, vertex_weights, pose, corner_target, Ks = [ d.cuda() for d in data ] else: image, mask, vertex, vertex_weights, pose, corner_target = [ d.cuda() for d in data ] with torch.no_grad(): seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net( image, mask, vertex, vertex_weights) loss_seg, loss_vertex, precision, recall = [ torch.mean(val) for val in (loss_seg, loss_vertex, precision, recall) ] if (train_cfg['eval_epoch'] and epoch % train_cfg['eval_inter'] == 0 and epoch >= train_cfg['eval_epoch_begin']) or args.test_model: if args.use_uncertainty_pnp: mean, cov_inv = uncertain_eval_net(seg_pred, vertex_pred) mean = mean.cpu().numpy() cov_inv = cov_inv.cpu().numpy() else: corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy() pose = pose.cpu().numpy() b = pose.shape[0] pose_preds = [] for bi in range(b): intri_type = 'use_intrinsic' if use_camera_intrinsic else 'linemod' K = Ks[bi].cpu().numpy() if use_camera_intrinsic else None if args.use_uncertainty_pnp: pose_preds.append( evaluator.evaluate_uncertainty(mean[bi], cov_inv[bi], pose[bi], args.linemod_cls, intri_type, vote_type, intri_matrix=K)) else: pose_preds.append( evaluator.evaluate(corner_pred[bi], pose[bi], args.linemod_cls, intri_type, vote_type, intri_matrix=K)) if args.save_inter_result: mask_pr = torch.argmax(seg_pred, 1).cpu().detach().numpy() mask_gt = mask.cpu().detach().numpy() # assume batch size = 1 imsave( os.path.join(args.save_inter_dir, '{}_mask_pr.png'.format(idx)), mask_pr[0]) imsave( os.path.join(args.save_inter_dir, '{}_mask_gt.png'.format(idx)), mask_gt[0]) imsave( os.path.join(args.save_inter_dir, '{}_rgb.png'.format(idx)), imagenet_to_uint8(image.cpu().detach().numpy()[0])) save_pickle([pose_preds[0], pose[0]], os.path.join(args.save_inter_dir, '{}_pose.pkl'.format(idx))) vals = [loss_seg, loss_vertex, precision, recall] for rec, val in zip(recs, vals): rec.update(val) with torch.no_grad(): batch_size = image.shape[0] nrow = 5 if batch_size > 5 else batch_size recorder.rec_segmentation(F.softmax(seg_pred, dim=1), num_classes=2, nrow=nrow, step=epoch, name='{}/image/seg'.format(val_prefix)) recorder.rec_vertex(vertex_pred, vertex_weights, nrow=4, step=epoch, name='{}/image/ver'.format(val_prefix)) losses_batch = OrderedDict() for name, rec in zip(recs_names, recs): losses_batch['{}/'.format(val_prefix) + name] = rec.avg if (train_cfg['eval_epoch'] and epoch % train_cfg['eval_inter'] == 0 and epoch >= train_cfg['eval_epoch_begin']) or args.test_model: proj_err, add, cm = evaluator.average_precision(False) losses_batch['{}/scalar/projection_error'.format( val_prefix)] = proj_err losses_batch['{}/scalar/add'.format(val_prefix)] = add losses_batch['{}/scalar/cm'.format(val_prefix)] = cm recorder.rec_loss_batch(losses_batch, epoch, epoch, val_prefix) for rec in recs: rec.reset() print('epoch {} {} cost {} s'.format(epoch, val_prefix, time.time() - test_begin))