def landmark_test(lm1, lm2, xyz, angs): cam = pu.CameraHelper() _x, _y, _z = xyz _azi, _ele = angs # for the two landmarks: # - translate landmark by camera offset # - rotate by azimuth and elevation # - project into image cam_xyz = np.float32([_x, _y, _z]) # determine pixel location of fixed LM xyz1 = lm1.xyz - cam_xyz xyz1_rot = pu.calc_xyz_after_rotation_deg(xyz1, _ele, _azi, 0) uv1 = cam.project_xyz_to_uv(xyz1_rot) # determine pixel location of left/right LM xyz2 = lm2.xyz - cam_xyz xyz2_rot = pu.calc_xyz_after_rotation_deg(xyz2, _ele, _azi, 0) uv2 = cam.project_xyz_to_uv(xyz2_rot) if cam.is_visible(uv1) and cam.is_visible(uv2): pass else: print print "Image Landmark #1:", uv1 print "Image Landmark #2:", uv2 print "At least one landmark is NOT visible!" return False, 0., 0., 0. # all is well so proceed with test... # landmarks have been acquired # camera elevation and world Y also need updating cam.elev = _ele * pu.DEG2RAD cam.world_y = _y lm1.set_current_uv(uv1) lm2.set_current_uv(uv2) world_x, world_z, world_azim = cam.triangulate_landmarks(lm1, lm2) # this integer coordinate stuff is disabled for now... if False: print "Now try with integer pixel coords and known Y coords..." lm1.set_current_uv((int(u1 + 0.5), int(v1 + 0.5))) lm2.set_current_uv((int(u2 + 0.5), int(v2 + 0.5))) print lm1.uv print lm2.uv world_x, world_z, world_azim = cam.triangulate_landmarks(lm1, lm2) print "Robot is at", world_x, world_z, world_azim * pu.RAD2DEG print return True, world_x, world_z, world_azim * pu.RAD2DEG
def pnp_test(key, xyz, angs): cam = pu.CameraHelper() _x, _y, _z = xyz _azi, _ele = angs cam_xyz = np.float32([_x, _y, _z]) # world landmark positions xyz1_o = mark1[key].xyz xyz2_o = mark2[key].xyz xyz3_o = mark3[key].xyz xyzb_o = markb[key].xyz # rotate and offset landmark positions as camera will see them xyz1_rot = pu.calc_xyz_after_rotation_deg(xyz1_o - cam_xyz, _ele, _azi, 0) xyz2_rot = pu.calc_xyz_after_rotation_deg(xyz2_o - cam_xyz, _ele, _azi, 0) xyz3_rot = pu.calc_xyz_after_rotation_deg(xyz3_o - cam_xyz, _ele, _azi, 0) xyzb_rot = pu.calc_xyz_after_rotation_deg(xyzb_o - cam_xyz, _ele, _azi, 0) # project them to camera plane uv1 = cam.project_xyz_to_uv(xyz1_rot) uv2 = cam.project_xyz_to_uv(xyz2_rot) uv3 = cam.project_xyz_to_uv(xyz3_rot) uvb = cam.project_xyz_to_uv(xyzb_rot) if cam.is_visible(uv1) and cam.is_visible(uv2) and cam.is_visible( uv3) and cam.is_visible(uvb): objectPoints = np.array([xyz1_o, xyz2_o, xyz3_o, xyzb_o]) imagePoints = np.array([uv1, uv2, uv3, uvb]) rvecR, tvecR, inliers = cv2.solvePnPRansac(objectPoints, imagePoints, cam.camA, cam.distCoeff) if inliers is not None: newImagePoints, _ = cv2.projectPoints(objectPoints, rvecR, tvecR, cam.camA, cam.distCoeff) # print newImagePoints rotM, _ = cv2.Rodrigues(rvecR) q = -np.matrix(rotM).T * np.matrix(tvecR) print q else: print "*** PnP failed ***" else: print "a PnP coord is not visible"
def pnp_test(key, xyz, angs): cam = pu.CameraHelper() _x, _y, _z = xyz _azi, _ele = angs cam_xyz = np.float32([_x, _y, _z]) # world landmark positions xyz1_o = mark1[key].xyz xyz2_o = mark2[key].xyz xyz3_o = mark3[key].xyz xyzb_o = markb[key].xyz # rotate and offset landmark positions as camera will see them xyz1_rot = pu.calc_xyz_after_rotation_deg(xyz1_o - cam_xyz, _ele, _azi, 0) xyz2_rot = pu.calc_xyz_after_rotation_deg(xyz2_o - cam_xyz, _ele, _azi, 0) xyz3_rot = pu.calc_xyz_after_rotation_deg(xyz3_o - cam_xyz, _ele, _azi, 0) xyzb_rot = pu.calc_xyz_after_rotation_deg(xyzb_o - cam_xyz, _ele, _azi, 0) # project them to camera plane uv1 = cam.project_xyz_to_uv(xyz1_rot) uv2 = cam.project_xyz_to_uv(xyz2_rot) uv3 = cam.project_xyz_to_uv(xyz3_rot) uvb = cam.project_xyz_to_uv(xyzb_rot) if cam.is_visible(uv1) and cam.is_visible(uv2) and cam.is_visible(uv3) and cam.is_visible(uvb): objectPoints = np.array([xyz1_o, xyz2_o, xyz3_o, xyzb_o]) imagePoints = np.array([uv1, uv2, uv3, uvb]) rvecR, tvecR, inliers = cv2.solvePnPRansac(objectPoints, imagePoints, cam.camA, cam.distCoeff) if inliers is not None: newImagePoints, _ = cv2.projectPoints(objectPoints, rvecR, tvecR, cam.camA, cam.distCoeff) # print newImagePoints rotM, _ = cv2.Rodrigues(rvecR) q = -np.matrix(rotM).T * np.matrix(tvecR) print q else: print "*** PnP failed ***" else: print "a PnP coord is not visible"
def perspective_test(_y, _z, _ele, _azi): print "--------------------------------------" print "Perspective Transform tests" print cam = pu.CameraHelper() # some landmarks in a 3x3 grid pattern p0 = np.float32([-1., _y - 1.0, _z]) p1 = np.float32([0., _y - 1.0, _z]) p2 = np.float32([1., _y - 1.0, _z]) p3 = np.float32([-1., _y + 1.0, _z]) p4 = np.float32([0., _y + 1.0, _z]) p5 = np.float32([1., _y + 1.0, _z]) p6 = np.float32([-1., _y, _z]) p7 = np.float32([0, _y, _z]) p8 = np.float32([1., _y, _z]) # 3x3 grid array ppp = np.array([p0, p1, p2, p3, p4, p5, p6, p7, p8]) print "Here are some landmarks in world" print ppp puv_acc = [] quv_acc = [] for vp in ppp: # original view of landmarks u, v = cam.project_xyz_to_uv(vp) puv_acc.append(np.float32([u, v])) # rotated view of landmarks xyz_r = pu.calc_xyz_after_rotation_deg(vp, _ele, _azi, 0) u, v = cam.project_xyz_to_uv(xyz_r) quv_acc.append(np.float32([u, v])) puv = np.array(puv_acc) quv = np.array(quv_acc) # 4-pt "diamond" array quv4 = np.array([quv[1], quv[4], quv[6], quv[8]]) puv4 = np.array([puv[1], puv[4], puv[6], puv[8]]) print print "Landmark img coords before rotate:" print puv print "Landmark img coords after rotate:" print quv print quv4 print # h, _ = cv2.findHomography(puv, quv) # hh = cv2.getPerspectiveTransform(puv4, quv4) # print h # print hh # perspectiveTransform needs an extra dimension puv1 = np.expand_dims(puv, axis=0)