def finish_calibration(self, calib_type='camera'): """ Finds the final transform between parent_frame and hydra_base. """ if not self.calib_transforms or not self.hydra_transforms: return False if len(self.calib_transforms) != len(self.hydra_transforms): return False Tas = solve4(self.calib_transforms, self.hydra_transforms) avg_hydra_base_transforms = [calib_tfm.dot(Tas).dot(np.linalg.inv(hydra_tfm)) for calib_tfm, hydra_tfm in zip(self.calib_transforms, self.hydra_transforms)] tfm = {} tfm['tfm'] = utils.avg_transform(avg_hydra_base_transforms) if calib_type is 'camera': pf = self.parent_frame[calib_type] cname = pf.split('_')[0] tfm['parent'] = cname+'_link' tfm['tfm'] = tfm_link_rof.dot(tfm['tfm']) else: tfm['parent'] = self.parent_frame[calib_type] tfm['child'] = 'hydra_base' self.transforms[calib_type] = tfm return True
def finish_calibration_pycb(self, avg=False): cb_transforms = {i:[] for i in range(self.num_cameras)} I = np.eye(4) for i in self.image_list: yellowprint("Getting transform data for camera %i." % (i + 1)) ind = 1 for img in self.image_list[i]: blueprint("... Observation %i." % ind) ind += 1 tfm = cu.get_checkerboard_transform(img, chessboard_size=chessboard_size) if tfm is None: return False cb_transforms[i].append(tfm) rel_tfms = {i:[] for i in range(1, self.num_cameras)} for i in range(1, self.num_cameras): cam_transform = {} cam_transform['parent'] = 'camera1_link' cam_transform['child'] = 'camera%d_link' % (i + 1) for j in range(len(cb_transforms[i])): rtfm = cb_transforms[0][j].dot(nlg.inv(cb_transforms[i][j])) rel_tfms[i].append(rtfm) if avg: tfm = utils.avg_transform(rel_tfms[i]) # convert from in to cm tfm[0:3, 3] *= 1.0 # 0.0254 cam_transform['tfm'] = tfm_link_rof.dot(tfm).dot(np.linalg.inv(tfm_link_rof)) else: scores = [] for j, rtfm in enumerate(rel_tfms[i]): scores.append(0) rtfm = rel_tfms[i][j] for k, rtfm2 in enumerate(rel_tfms[i]): if j == k: continue scores[j] += nlg.norm(rtfm.dot(nlg.inv(rtfm2)) - I) print "Scores:", scores tfm = rel_tfms[i][np.argmin(scores)] # convert from in to m tfm[0:3, 3] *= 1.0 # 0.0254 cam_transform['tfm'] = tfm_link_rof.dot(tfm).dot(np.linalg.inv(tfm_link_rof)) self.camera_transforms[0, i] = cam_transform return True
def finish_calibration(self, use_icp): """ Average out transforms and store final values. Return true/false based on whether transforms were found. """ if not self.point_list: return False for c1, c2 in self.point_list: points_c1 = self.point_list[c1, c2][c1] points_c2 = self.point_list[c1, c2][c2] tfm = find_rigid_tfm(points_c1, points_c2) cam_transform = {} cam_transform['parent'] = 'camera%d_link' % (c1 + 1) cam_transform['child'] = 'camera%d_link' % (c2 + 1) cam_transform['tfm'] = tfm_link_rof.dot(tfm).dot(np.linalg.inv(tfm_link_rof)) # if use_icp: # if self.icpService is None: # self.icpService = rospy.ServiceProxy("icpTransform", ICPTransform) # # # greenprint("Refining calibration with ICP.") # req = ICPTransformRequest() # # # # Interchange pc1 and pc2 or use inv(cam_transform) as guess. # raw_input(colorize("Cover camera %i and hit enter!" % (c2 + 1), 'yellow', True)) # pc2 = self.cameras.get_pointcloud(c1) # pc2_points = ru.pc2xyz(pc2) # pc2_points = np.reshape(pc2_points, (640 * 480, 3), order='F') # pc2_points = pc2_points[np.bitwise_not(np.isnan(pc2_points).any(axis=1)), :] # req.pc2 = ru.xyz2pc(pc2_points, pc2.header.frame_id) # # raw_input(colorize("Cover camera %i and hit enter!" % (c1 + 1), 'yellow', True)) # pc1 = self.cameras.get_pointcloud(c2) # pc1_points = ru.pc2xyz(pc1) # pc1_points = np.reshape(pc1_points, (640 * 480, 3), order='F') # pc1_points = pc1_points[np.bitwise_not(np.isnan(pc1_points).any(axis=1)), :] # pc1_points = (np.c_[pc1_points, np.ones((pc1_points.shape[0], 1))].dot(tfm.T))[:, 0:3] # req.pc1 = ru.xyz2pc(pc1_points, pc1.header.frame_id) # # req.guess = conversions.hmat_to_pose(np.eye(4)) # # try: # res = self.icpService(req) # print res # res_tfm = conversions.pose_to_hmat(res.pose) # cam_transform['tfm'] = tfm_link_rof.dot(res_tfm.dot(tfm)).dot(np.linalg.inv(tfm_link_rof)) # except: # redprint("ICP failed. Using AR-only calibration.") self.camera_transforms[c1, c2] = cam_transform self.cameras.calibrated = True self.cameras.store_calibrated_transforms(self.camera_transforms) return True
def quick_calibrate(NUM_CAM, N_AVG): rospy.init_node('quick_calibrate') cameras = RosCameras(NUM_CAM) tfm_pub = tf.TransformBroadcaster() frames = {i:'/camera%i_link'%(i+1) for i in xrange(NUM_CAM)} calib_tfms = {(frames[0],frames[i]):None for i in xrange(1,NUM_CAM)} sleeper = rospy.Rate(30) try: while True: tfms_found = {i:{} for i in xrange(NUM_CAM)} for _ in xrange(N_AVG): for i in xrange(NUM_CAM): mtfms = cameras.get_ar_markers(camera=i) for m in mtfms: if m not in tfms_found[i]: tfms_found[i][m] = [] tfms_found[i][m].append(mtfms[m]) for i in tfms_found: for m in tfms_found[i]: tfms_found[i][m] = utils.avg_transform(tfms_found[i][m]) for i in xrange(1,NUM_CAM): rof_tfm = find_transform(tfms_found[0], tfms_found[i]) if rof_tfm is not None: calib_tfms[(frames[0], frames[i])] = tfm_link_rof.dot(rof_tfm).dot(nlg.inv(tfm_link_rof)) for parent, child in calib_tfms: if calib_tfms[parent, child] is not None: trans, rot = conversions.hmat_to_trans_rot(calib_tfms[parent, child]) tfm_pub.sendTransform(trans, rot, rospy.Time.now(), child, parent) sleeper.sleep() except KeyboardInterrupt: print "got ctrl-c"