def save_scene(scene_id): """ Saves the specified scene. """ global scene_info global scene_gt # Collect poses expressed in the world coordinate system ref_obj_poses = [] for obj in bpy.data.objects: if is_obj(obj.name): obj_id = obj.name.split('obj_')[1].split('.')[0] # Get object ID e, t = get_pose(obj.name) R = transform.euler_matrix(e[0], e[1], e[2], axes='sxyz')[:3, :3] ref_obj_poses.append({ 'obj_id': int(obj_id), 'cam_R_m2w': R, 'cam_t_m2w': np.array(t).reshape((3, 1)) }) # Load models of objects present in the scene obj_ids = set([p['obj_id'] for p in ref_obj_poses]) models = {} for obj_id in obj_ids: models[obj_id] = inout.load_ply(par['model_mpath'].format(obj_id)) # Transform the poses to the camera coordinate systems using the known # camera-to-world transformations for im_id in scene_gt.keys(): scene_gt[im_id] = [] K = scene_info[im_id]['cam_K'] R_w2c = scene_info[im_id]['cam_R_w2c'] t_w2c = scene_info[im_id]['cam_t_w2c'] for pose in ref_obj_poses: R_m2c_new = R_w2c.dot(pose['cam_R_m2w']) t_m2c_new = R_w2c.dot(pose['cam_t_m2w']) + t_w2c # Get 2D bounding box of the projection of the object model at # the refined ground truth pose pts_im = misc.project_pts(models[int(obj_id)]['pts'], K, R_m2c_new, t_m2c_new) pts_im = np.round(pts_im).astype(np.int) ys, xs = pts_im[:, 1], pts_im[:, 0] obj_bb = misc.calc_2d_bbox(xs, ys, par['test_im_size']) scene_gt[im_id].append({ 'obj_id': int(obj_id), 'obj_bb': obj_bb, 'cam_R_m2c': R_m2c_new, 'cam_t_m2c': t_m2c_new }) # Save the updated ground truth poses scene_gt_path = par['scene_gt_mpath'].format(scene_id) print('Saving GT poses: ' + scene_gt_path) inout.save_gt(scene_gt_path, scene_gt)
depth_buf = np.zeros((shape[0], shape[1]), np.float32).view(renderer.gloo.DepthTexture) fbo = renderer.gloo.FrameBuffer(color=color_buf, depth=depth_buf) fbo.activate() for obj_id in obj_ids_curr: azimuth_range = dp['test_obj_azimuth_range'] elev_range = dp['test_obj_elev_range'] min_n_views = 200 clip_near = 10 # [mm] clip_far = 10000 # [mm] ambient_weight = 0.8 # Weight of ambient light [0, 1] shading = 'phong' # 'flat', 'phong' # Load model model_path = dp['model_mpath'].format(obj_id) model = inout.load_ply(model_path) # Load model texture if dp['model_texture_mpath']: model_texture_path = dp['model_texture_mpath'].format(obj_id) model_texture = inout.load_im(model_texture_path) else: model_texture = None ###################################################### # prepare renderer rather than rebuilding every time texture = model_texture surf_color = None mode = 'rgb+depth' K = dp['cam']['K']
pose['t'] = yz_flip.dot(pose['t']) return pose # Get list of image IDs rgb_fpaths = sorted( glob.glob(os.path.dirname(pose_mpath.format('Ape', 0)) + '/*.txt')) im_ids = sorted( [int(e.split('info_')[1].split('.txt')[0]) for e in rgb_fpaths]) scene_gt = {} for obj_name in sorted(obj_names_id_map.keys()): # Load object model obj_id = obj_names_id_map[obj_name] model = inout.load_ply(model_mpath.format(obj_id)) # Transformation which was applied to the object models (its inverse will # be applied to the GT poses): # 1) Translate the bounding box center to the origin - Brachmann et al. # already translated the bounding box to the center # 2) Rotate around Y axis by pi + flip for some objects R_model = transform.rotation_matrix(math.pi, [0, 1, 0])[:3, :3] # Extra rotation around Z axis by pi for some models if hinter_flip.obj_flip_z[obj_id]: R_z = transform.rotation_matrix(math.pi, [0, 0, 1])[:3, :3] R_model = R_z.dot(R_model) # The ground truth poses of Brachmann et al. are related to a different # model coordinate system - to get the original Hinterstoisser's orientation
else: model_type = '' cam_type = '' # Load dataset parameters dp = get_dataset_params(dataset, model_type=model_type, test_type=test_type, cam_type=cam_type) # Load object models if error_type in ['vsd', 'add', 'adi', 'cou']: print('Loading object models...') models = {} for obj_id in range(1, dp['obj_count'] + 1): models[obj_id] = inout.load_ply(dp['model_mpath'].format(obj_id)) # Directories with results for individual scenes scene_dirs = sorted([ d for d in glob.glob(os.path.join(result_path, '*')) if os.path.isdir(d) ]) for scene_dir in scene_dirs: scene_id = int(os.path.basename(scene_dir)) # Load info and GT poses for the current scene scene_info = inout.load_info(dp['scene_info_mpath'].format(scene_id)) scene_gt = inout.load_gt(dp['scene_gt_mpath'].format(scene_id)) res_paths = sorted(glob.glob(os.path.join(scene_dir, '*.yml')))
# Prepare output folder # misc.ensure_dir(os.path.dirname(out_obj_info_path)) # Image size and K for SSAA im_size_rgb = [int(round(x * float(ssaa_fact))) for x in par['cam']['im_size']] K_rgb = par['cam']['K'] * ssaa_fact for obj_id in obj_ids: # Prepare folders misc.ensure_dir(os.path.dirname(out_rgb_mpath.format(obj_id, 0))) misc.ensure_dir(os.path.dirname(out_depth_mpath.format(obj_id, 0))) # Load model model_path = par['model_mpath'].format(obj_id) model = inout.load_ply(model_path) # Load model texture if par['model_texture_mpath']: model_texture_path = par['model_texture_mpath'].format(obj_id) model_texture = inout.load_im(model_texture_path) else: model_texture = None obj_info = {} obj_gt = {} im_id = 0 for radius in radii: # Sample views views, views_level = view_sampler.sample_views(min_n_views, radius, azimuth_range, elev_range)
lock = False bridge = CvBridge() readTemplFrom = './yaml/%s_templ.yaml' readInfoFrom = './yaml/{}_info.yaml' readModelFrom = './models/{0}/{0}.fly' K_cam = None detector = linemodLevelup_pybind.Detector() poseRefine = linemodLevelup_pybind.poseRefine() detector.readClasses(objIds, readTemplFrom) infos = {} models = {} for id in objIds: model = inout.load_ply(readModelFrom.format(id)) models[id] = model templateInfo = inout.load_info(readInfoFrom.format(id)) infos[id] = templateInfo def nms_norms(ts, scores, thresh): order = scores.argsort()[::-1] keep = [] while order.size > 0: # magic: order[[]] = [] i = order[0] keep.append(i) norms = np.linalg.norm(ts[i] - ts[order[1:]], axis=1) inds = np.where(norms > thresh)[0] order = order[inds + 1] return keep
for obj_id in obj_ids_curr: templateInfo = dict() radii = [600, 700, 800, 900, 1000] azimuth_range = (0, 2 * math.pi) elev_range = (0, 0.5 * math.pi) min_n_views = 200 clip_near = 10 # [mm] clip_far = 10000 # [mm] ambient_weight = 0.8 # Weight of ambient light [0, 1] shading = 'phong' # 'flat', 'phong' # Load model model_path = dp['model_mpath'].format(obj_id) model = inout.load_ply(model_path) # Load model texture if dp['model_texture_mpath']: model_texture_path = dp['model_texture_mpath'].format(obj_id) model_texture = inout.load_im(model_texture_path) else: model_texture = None im_id = 0 for radius in radii: # Sample views views, views_level = view_sampler.sample_views( min_n_views, radius, azimuth_range, elev_range) print('Sampled views: ' + str(len(views)))
else: model_type = 'cad' else: model_type = '' cam_type = '' # Load dataset parameters dp = get_dataset_params(dataset, model_type=model_type, test_type=test_type, cam_type=cam_type) # Load object models if error_type in ['vsd', 'add', 'adi', 'cou']: print('Loading object models...') models = {} for obj_id in range(1, dp['obj_count'] + 1): models[obj_id] = inout.load_ply(dp['model_mpath'].format(obj_id)) # Directories with results for individual scenes scene_dirs = sorted([d for d in glob.glob(os.path.join(result_path, '*')) if os.path.isdir(d)]) for scene_dir in scene_dirs: scene_id = int(os.path.basename(scene_dir)) # Load info and GT poses for the current scene scene_info = inout.load_info(dp['scene_info_mpath'].format(scene_id)) scene_gt = inout.load_gt(dp['scene_gt_mpath'].format(scene_id)) res_paths = sorted(glob.glob(os.path.join(scene_dir, '*.yml'))) errs = [] im_id = -1
LCHF_infos = [] LCHF_linemod_feats = [] for obj_id in obj_ids_curr: radii = [train_from_radius] azimuth_range = (0, 2 * math.pi) elev_range = (0, 0.5 * math.pi) min_n_views = 100 clip_near = 10 # [mm] clip_far = 10000 # [mm] ambient_weight = 0.8 # Weight of ambient light [0, 1] shading = 'phong' # 'flat', 'phong' # Load model model_path = dp['model_mpath'].format(obj_id) model = inout.load_ply(model_path) # Load model texture if dp['model_texture_mpath']: model_texture_path = dp['model_texture_mpath'].format(obj_id) model_texture = inout.load_im(model_texture_path) else: model_texture = None for radius in radii: # Sample views views, views_level = view_sampler.sample_views(min_n_views, radius, azimuth_range, elev_range, tilt_range=(-math.pi/2, math.pi/2), tilt_step=0.2*math.pi) print('Sampled views: ' + str(len(views)))
# Subset of images to be considered if use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None scene_ids_curr = range(1, dp['scene_count'] + 1) if scene_ids: scene_ids_curr = set(scene_ids_curr).intersection(scene_ids) for scene_id in scene_ids_curr: # Load scene info and gt poses scene_info = inout.load_info(dp['scene_info_mpath'].format(scene_id)) scene_gt = inout.load_gt(dp['scene_gt_mpath'].format(scene_id)) model_path = dp['model_mpath'].format(scene_id) model = inout.load_ply(model_path) # Considered subset of images for the current scene if im_ids_sets is not None: im_ids_curr = im_ids_sets[scene_id] else: im_ids_curr = sorted(scene_info.keys()) if im_ids: im_ids_curr = set(im_ids_curr).intersection(im_ids) for im_id in im_ids_curr: print('scene: {}, im: {}'.format(scene_id, im_id)) K = scene_info[im_id]['cam_K'] render_K = K
import json # DATA_ROOT = r'D:\SL\PoseCNN\Loc_data\DUCK\POSE_iPBnet' # DATA_ROOT = r'D:\SL\PoseCNN\Loc_data\DUCK\POSE_iPBnet' DATA_ROOT = '/media/shawnle/Data0/YCB_Video_Dataset/SLM_datasets/Exhibition/DUCK' p0 = os.path.abspath(DATA_ROOT) # GEN_ROOT = r'D:\SL\Summer_2019\original_sixd_toolkit\sixd_toolkit\data\gen_data' GEN_ROOT = DATA_ROOT # model = inout.load_ply(r'D:\SL\Summer_2019\sixd_toolkit\data\sheep\textured.ply') # model = inout.load_ply(r'D:\SL\Summer_2019\sixd_toolkit\data\ply\rotated.ply') # model = inout.load_ply(r'D:\SL\PoseCNN\Loc_data\DUCK\015_duck_toy\textured_m_text.ply') # model = inout.load_ply('/media/shawnle/Data0/YCB_Video_Dataset/YCB_Video_Dataset/data_syn_LOV/models/015_duck_toy/textured_dense.ply') model = inout.load_ply('/home/shawnle/Downloads/textured.ply') print('model keys', model.keys()) # meta_file = os.path.join(p0, '{:06d}'.format(0) + '-meta.json') # print('opening ', meta_file) # with open(meta_file, 'r') as f: # meta_json = json.load(f) # print('kyes ',meta_json.keys() ) # print('poses ') # pose = np.array(meta_json['poses']).reshape(4,4) # print(pose) # print('intrinsic_matrix ')
lock = False bridge = CvBridge() readTemplFrom = './yaml/%s_templ.yaml' readInfoFrom = './yaml/{}_info.yaml' readModelFrom = './models/{0}/{0}.fly' K_cam = None detector = linemodLevelup_pybind.Detector() poseRefine = linemodLevelup_pybind.poseRefine() detector.readClasses(objIds, readTemplFrom) infos = {} models = {} for id in objIds: model = inout.load_ply(readModelFrom.format(id)) models[id] = model templateInfo = inout.load_info(readInfoFrom.format(id)) infos[id] = templateInfo def nms_norms(ts, scores, thresh): order = scores.argsort()[::-1] keep = [] while order.size > 0: # magic: order[[]] = [] i = order[0] keep.append(i) norms = np.linalg.norm(ts[i]-ts[order[1:]], axis=1) inds = np.where(norms > thresh)[0] order = order[inds + 1] return keep
def augmentAcPData(params): ''' params.DATA_ROOT \n params.PLY_MODEL \n params.pose_tuning = [tx, ty, tz, rz] -> transl: meter, rot: deg \n params.frame_num ''' # DATA_ROOT = r'D:\SL\PoseCNN\Loc_data\DUCK\POSE_iPBnet' # DATA_ROOT = r'D:\SL\PoseCNN\Loc_data\DUCK\POSE_iPBnet' # DATA_ROOT = '/media/shawnle/Data0/YCB_Video_Dataset/SLM_datasets/Exhibition/DUCK' DATA_ROOT = params.DATA_ROOT p0 = os.path.abspath(DATA_ROOT) # GEN_ROOT = r'D:\SL\Summer_2019\original_sixd_toolkit\sixd_toolkit\data\gen_data' GEN_ROOT = DATA_ROOT # model = inout.load_ply(r'D:\SL\Summer_2019\sixd_toolkit\data\sheep\textured.ply') # model = inout.load_ply(r'D:\SL\Summer_2019\sixd_toolkit\data\ply\rotated.ply') # model = inout.load_ply(r'D:\SL\PoseCNN\Loc_data\DUCK\015_duck_toy\textured_m_text.ply') # model = inout.load_ply('/media/shawnle/Data0/YCB_Video_Dataset/YCB_Video_Dataset/data_syn_LOV/models/015_duck_toy/textured_dense.ply') # model = inout.load_ply('/home/shawnle/Downloads/textured.ply') model = inout.load_ply(params.PLY_MODEL) print('model keys', model.keys()) max = np.amax(model['pts'], axis=0) min = np.amin(model['pts'], axis=0) extents = np.abs(max) + np.abs(min) max_all_dim = np.amax(extents) assert max_all_dim < 1., 'Unit is millimeter? Meter should be used instead.' exit() # meta_file = os.path.join(p0, '{:06d}'.format(0) + '-meta.json') # print('opening ', meta_file) # with open(meta_file, 'r') as f: # meta_json = json.load(f) # print('kyes ',meta_json.keys() ) # print('poses ') # pose = np.array(meta_json['poses']).reshape(4,4) # print(pose) # print('intrinsic_matrix ') # print(np.array(meta_json['intrinsic_matrix']).reshape(3,3)) # tuning pose tx = params.pose_tuning[0] #-.001 # m ty = params.pose_tuning[1] # -.005 tz = params.pose_tuning[2] # -.001 rz = params.pose_tuning[3] / 180. * math.pi #2./180.*math.pi # rad xaxis, yaxis, zaxis = [1, 0, 0], [0, 1, 0], [0, 0, 1] Tt = tf.translation_matrix([tx, ty, tz]) Rt = tf.rotation_matrix(rz, zaxis) TT = np.eye(4) TT[:3, :3] = Rt[:3, :3] TT[:3, 3] = Tt[:3, 3] # print('Tt = ') # print(Tt) # print('Rt = ') # print(Rt) print('TT = ') print(TT) # TT1 = np.dot(Tt,Rt) # print('TT1 = ') # print(TT1) for i in range(params.frame_num): file_name = os.path.join(p0, '{:06d}'.format(i) + '-color.png') print(file_name) rgb = cv2.imread(file_name, cv2.IMREAD_UNCHANGED) im_size = [rgb.shape[1], rgb.shape[0]] # cv2.imshow("rgb", rgb) # cv2.waitKey(1) # meta_file = os.path.join(p0, '{:06d}'.format(i) + '-meta.mat') # meta = scipy.io.loadmat(meta_file) meta_file = os.path.join(p0, '{:06d}'.format(i) + '-meta.json') print('opening ', meta_file) with open(meta_file, 'r') as f: meta = json.load(f) K = np.array(meta['intrinsic_matrix']).reshape(3, 3) # print('K',K) poses = np.array(meta['poses']).reshape(4, 4) R = poses[:3, :3] # print ('R',R) t = poses[:3, 3] t /= 1000. # print('t',t) # update with tuning Rt44 = np.eye(4) Rt44[:3, :3] = R Rt44[:3, 3] = t Rt44 = np.dot(Rt44, TT) R = Rt44[:3, :3] t = Rt44[:3, 3] mdl_proj, mdl_proj_depth = renderer.render(model, im_size, K, R, t, mode='rgb+depth', clip_near=.3, clip_far=6., shading='flat') # print("dtype", mdl_proj.dtype) # print("max min", np.amax(mdl_proj), np.amin(mdl_proj)) # cv2.imshow('model', mdl_proj) # cv2.waitKey(1) # depth format is int16 # convert depth (see PCNN train_net.py) factor_depth = 10000 zfar = 6.0 znear = 0.25 im_depth_raw = factor_depth * 2 * zfar * znear / ( zfar + znear - (zfar - znear) * (2 * mdl_proj_depth - 1)) I = np.where(mdl_proj_depth == 1) im_depth_raw[I[0], I[1]] = 0 depth_file = os.path.join(GEN_ROOT, '{:06d}-depth.png'.format(i)) cv2.imwrite(depth_file, im_depth_raw.astype(np.uint16)) print('writing depth ' + depth_file) label_file = os.path.join(GEN_ROOT, '{:06d}-label.png'.format(i)) # process the label image i.e. achieve nonzero pixel, then cast to cls_id value I = np.where(mdl_proj_depth > 0) # print('I shape',I.shape) label = np.zeros((rgb.shape[0], rgb.shape[1])) if len(I[0]) > 0: print('len I0', len(I[0])) print('label is exported') label[I[0], I[1]] = 1 cv2.imwrite(label_file, label.astype(np.uint8)) print('writing label ' + label_file) blend_name = os.path.join(GEN_ROOT, "{:06d}-blend.png".format(i)) gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY) mdl_proj_g = cv2.cvtColor(mdl_proj, cv2.COLOR_BGR2GRAY) alf = .5 bet = 1 - alf bld = cv2.addWeighted(mdl_proj_g, alf, gray, bet, 0.) cv2.imwrite(blend_name, bld) cv2.imshow('blend', bld) cv2.waitKey(1) print('writing blend ' + blend_name) # revise pose json -> unit of pose is now in meter # save meta_data meta_file_rev = os.path.join(p0, '{:06d}'.format(i) + '-meta_rev.json') meta['poses'] = Rt44.flatten().tolist() with open(meta_file_rev, 'w') as fp: json.dump(meta, fp) print('writing meta ', meta_file_rev)
import sys import glob import numpy as np import matplotlib.pyplot as plt import cv2 import scipy.io sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from pysixd import inout from pysixd import misc from pysixd import renderer DATA_ROOT = r'D:\SL\PoseCNN\Loc_data' p0 = os.path.abspath(DATA_ROOT) model = inout.load_ply(r'D:\SL\Summer_2019\sixd_toolkit\data\ply\sheep_meshlab.ply') print('mdl keys', model.keys()) # print('model points', model['pts']) print('model normals', model['normals']) print('model colors', model['colors']) print('model texture_uv', model['texture_uv']) print('model faces', model['faces']) for i in range(1): file_name = os.path.join(p0, '{:06d}'.format(i) + '-color.png') print(file_name) rgb = cv2.imread(file_name, cv2.IMREAD_UNCHANGED) im_size = [rgb.shape[1], rgb.shape[0]] cv2.imshow("rgb", rgb) cv2.waitKey(0)
import scipy.io sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from pysixd import inout from pysixd import misc from pysixd import renderer from pysixd import transform as tf import cfg rgb = cv2.imread(cfg.SAMPLE_RGB, cv2.IMREAD_UNCHANGED) num_inst = cfg.NUM_INST model = inout.load_ply(cfg.MESH_ROOT + '/textured.ply') K = np.array(cfg.INTRINSICS).reshape(3, 3) im_size = cfg.IMAGE_SHAPE im_size = (int(im_size[0]), int(im_size[1])) gt_poses = cfg.GT_POSES Rs = [] ts = [] for i in range(len(gt_poses)): RT = np.array(gt_poses[i]).reshape(4, 4) R = RT[:3, :3] t = RT[:3, 3] * .001 # to meter Rs.append(R) ts.append(t)