del rgb, depth, mask print('success: {}'.format(success)) for i in range(len(dep_anchors)): if success[i] != -1: aTemplateInfo = dict() aTemplateInfo['cam_K'] = copy.deepcopy(dp['cam']['K']) aTemplateInfo['cam_R_w2c'] = copy.deepcopy(view['R']) aTemplateInfo['cam_t_w2c'] = copy.deepcopy(view['t']) aTemplateInfo['cam_t_w2c'][2] = dep_anchors[i] templateInfo = templateInfo_radius[dep_anchors[i]] templateInfo[success[i]] = aTemplateInfo for radius in dep_anchors: inout.save_info(tempInfo_saved_to.format(obj_id, radius), templateInfo_radius[radius]) detector.writeClasses(template_saved_to) # clear to save RAM detector.clear_classes() else: for radius in dep_anchors: # Sample views # with camera tilt # tilt_factor = (80 / 180) tilt_factor = 1 views, views_level = view_sampler.sample_views(min_n_views, radius, azimuth_range, elev_range, tilt_range=(-math.pi * tilt_factor, math.pi * tilt_factor),
# Get 2D bounding box of the object model at the ground truth pose obj_bb = misc.calc_pose_2d_bbox(models[obj_id], par['cam']['im_size'], par['cam']['K'], R_m2c, t_m2c) # Visualisation if False: print(R_m2c) print(t_m2c) ren_rgb = renderer.render(models[obj_id], par['cam']['im_size'], par['cam']['K'], R_m2c, t_m2c, mode='rgb') vis_rgb = 0.4 * rgb.astype(np.float32) + 0.6 * ren_rgb.astype(np.float32) vis_rgb = vis_rgb.astype(np.uint8) vis_rgb = misc.draw_rect(vis_rgb, obj_bb) plt.imshow(vis_rgb) plt.show() scene_gt[im_id_out].append( { 'obj_id': obj_id, 'cam_R_m2c': R_m2c, 'cam_t_m2c': t_m2c, 'obj_bb': obj_bb } ) im_id_out += 1 inout.save_info(scene_info_mpath.format(scene_id), scene_info) inout.save_gt(scene_gt_mpath.format(scene_id), scene_gt)
aTemplateInfo['cam_K'] = K aTemplateInfo['cam_R_w2c'] = R aTemplateInfo['cam_t_w2c'] = t aTemplateInfo['width'] = int(xmax - xmin) aTemplateInfo['height'] = int(ymax - ymin) mask = (depth > 0).astype(np.uint8) * 255 visual = False if visual: cv2.namedWindow('rgb') cv2.imshow('rgb', rgb) cv2.namedWindow('depth') cv2.imshow('depth', depth) cv2.namedWindow('mask') cv2.imshow('mask', mask) cv2.waitKey(1000) success = detector.addTemplate([rgb, depth], '{}_templ'.format(obj_id), mask) print('success {}'.format(success)) del rgb, depth, mask if success != -1: templateInfo[success] = aTemplateInfo inout.save_info(tempInfo_saved_to.format(obj_id), templateInfo) detector.writeClasses(template_saved_to) elapsed_time = time.time() - start_time print('train time: {}\n'.format(elapsed_time))
aTemplateInfo = dict() aTemplateInfo['cam_K'] = K aTemplateInfo['cam_R_w2c'] = R aTemplateInfo['cam_t_w2c'] = t aTemplateInfo['width'] = int(xmax - xmin) aTemplateInfo['height'] = int(ymax - ymin) mask = (depth > 0).astype(np.uint8) * 255 visual = False if visual: cv2.namedWindow('rgb') cv2.imshow('rgb', rgb) cv2.namedWindow('depth') cv2.imshow('depth', depth) cv2.namedWindow('mask') cv2.imshow('mask', mask) cv2.waitKey(1000) success = detector.addTemplate([rgb, depth], '{}_templ'.format(obj_id), mask) print('success {}'.format(success)) del rgb, depth, mask if success != -1: templateInfo[success] = aTemplateInfo inout.save_info(tempInfo_saved_to.format(obj_id), templateInfo) detector.writeClasses(template_saved_to) elapsed_time = time.time() - start_time print('train time: {}\n'.format(elapsed_time))