def calcTransformation(self, mp1, mp2): if self.use_H: # if we found enough matches do a RANSAC search to find inliers corresponding to one homography H, mask = cv2.findHomography(mp1, mp2, cv2.RANSAC, 1.0) if not self.use_E: self.nb_transform_solutions, self.rotations, self.translations, _ = cv2.decomposeHomographyMat( H, self.intrinsic_matrix) return mask else: mask_H = mask.ravel().astype(bool) mp1 = mp1[mask_H] mp2 = mp2[mask_H] if self.use_E: E, mask = cv2.findEssentialMat(mp1, mp2, self.intrinsic_matrix, cv2.RANSAC, 0.99, 1.0, None) _, self.rotations, self.translations, mask_cheirality = cv2.recoverPose( E, mp1, mp2, self.intrinsic_matrix, mask) self.nb_transform_solutions = 1 if not self.use_H: # Cheirality mask ensures that the solutions make sense return mask_cheirality else: i = 0 j = 0 while i < mask_H.shape[0]: if mask_H[i]: mask_H[i] = mask[j] j += 1 i += 1 return mask_H
def get_pose(self, start_R, start_t, s): retval, Rs, ts, norms = cv2.decomposeHomographyMat( self.est_H, self.camera_matrix) print 'Weve got {} options'.format(len(Rs)) if s < len(Rs): R = Rs[s] t = ts[s] norm = norms[s] else: print 'RAN OUT OF AXES' R, t, norm = self.pickHomography(Rs, ts, norms) # t[0] *= -1 # t[1] *= -1 # t[2] *= -1 print 'ANGLE', np.linalg.norm(R - np.identity(3)),'MAGNITUDE',\ np.linalg.norm(t), 'NORM', norm[2] print 'ROTATION\n', R print 'TRANSLATION\n', t R_tmp = copy.deepcopy(R) R_tmp[0][1] *= -1 R_tmp[1][0] *= -1 R_tmp[0][2] *= -1 R_tmp[2][0] *= -1 # R_tmp[1][2] *= -1 # R_tmp[2][1] *= -1 new_R = np.dot(start_R, R_tmp) diff_t = np.dot(new_R.T, t) * start_t[2] # diff_t[0] *= -1 new_t = start_t + diff_t pos = self.compose_pose(new_R, new_t) return pos
def get_vel_and_z(self): if self.prev_time == None: self.prev_time = rospy.get_time() return np.array([0, 0, 0]), self.curr_z else: try: retval, Rs, ts, norms = cv2.decomposeHomographyMat( self.est_H, self.camera_matrix) except Exception as e: return np.array([0, 0, 0]), self.curr_z min_index = 0 min_z_norm = 0 for i in range(len(Rs)): if norms[i][2] < min_z_norm: min_z_norm = norms[i][2] min_index = i # self.update_z(ts[min_index].T[0]) # uncomment for z estimation from camera #print 'raw', ts[min_index].T T = ts[min_index] * self.curr_z curr_time = rospy.get_time() timestep = curr_time - self.prev_time ret_val = T.T[0] / timestep, self.curr_z self.prev_time = curr_time return ret_val
def get_pose_alt(self, start_R, start_t, s): retval, Rs, ts, norms = cv2.decomposeHomographyMat(self.est_H \ /self.z_average, self.camera_matrix) # print 'Weve got {} options'.format(len(Rs)) if s < len(Rs): R = Rs[s] t = ts[s] norm = norms[s] else: print 'RAN OUT OF AXES' R, t, norm = self.pickHomography(Rs, ts, norms) # if np.absolute(norm[2]) < np.linalg.norm(norm[0:2]): # print 'NOT USING THIS ONE' # print 'ANGLE', np.linalg.norm(R - np.identity(3)),'MAGNITUDE',\ # np.linalg.norm(t), 'NORM', norm.T # print 'TRANSLATION\n', t # R[0][2] *= -1 # R[2][0] *= -1 flip_around_x = np.array([[1, 0, 0], [0,-1,0], [0,0,-1]]) flip_R = np.dot(R,flip_around_x) diff_R = np.dot(start_R, flip_around_x) new_R = np.dot(start_R, R) diff_t = np.dot(diff_R, t)*start_t[2] # diff_t[2] *= -1 new_t = start_t + diff_t pos = self.compose_pose(new_R, new_t) return pos
def run_bf_matcher(self): # set BFMatcher with default params bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False) matches = bf.knnMatch(self.__marker_desc, self.__input_desc, k=2) # apply ratio test good = [] for m, n in matches: if m.distance < 0.75 * n.distance: good.append([m]) if len(good) > MIN_MATCH_COUNT: src_pts = np.float32([ self.__marker_kp[m.queryIdx].pt for m in good ]).reshape(-1, 1, 2) dst_pts = np.float32([ self.__input_kp[m.trainIdx].pt for m in good ]).reshape(-1, 1, 2) # find homography and decompose H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) matchesMask = mask.ravel().tolist() h, w, _ = self.__in_image.shape pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, H) num, self.__r_vec, self.__t_vec, Ns = cv2.decomposeHomographyMat( H, self.__cam_mat) if self.__json_params["debug_draw"]: # draw the axis aruco.drawAxis(self.__in_image, self.__cam_mat, self.__dist_mat, self.__r_vec[0], self.__t_vec[0], 0.05) self.__in_image = cv2.polylines(self.__in_image, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
def EstimateMotion(E, H, K, cur_kp, prev_kp, Score_EH, Score_EH_threshold, list_R, list_t, list_Rs, list_Ts, list_Ns): if (Score_EH > Score_EH_threshold): #choosing E print('E is chosen.') # Estimate Rotation and translation vectors _, R, t, mask = cv2.recoverPose(E, cur_kp, prev_kp, K) t = Norm_Vector(t) list_R.append(R) list_t.append(t) else: #choosing H print('H is chosen.') #num possible solutions will be returned. #Rs contains a list of the rotation matrix. #Ts contains a list of the translation vector. #Ns contains a list of the normal vector of the plane. num, Rs, Ts, Ns = cv2.decomposeHomographyMat(H, K) for iter_Ts in range(0, num): Ts[iter_Ts] = Norm_Vector(Ts[iter_Ts]) list_Rs.append(Rs) list_Ts.append(Ts) list_Ns.append(Ns) R = Rs[0] t = Ts[0] return R, t, list_R, list_t, list_Rs, list_Ts, list_Ns
def get_decomposed_homo_matrix(homo_matrix, cam_matrix): """ Extracting the rotation matrix, transformation matrix (unused), and normals (unused) """ retval, rotation_matrix, trans_matrix, normals = cv2.decomposeHomographyMat( homo_matrix, cam_matrix) return retval, rotation_matrix, trans_matrix, normals
def getRt(img1, img2, kp1, des1): # Initiate SURF detector orb = cv2.ORB_create() # find the keypoints and descriptors with SIFT if kp1 is None: kp1, des1 = orb.detectAndCompute(img1, None) print("redoing frame 1") kp2, des2 = orb.detectAndCompute(img2, None) FLANN_INDEX_KDTREE = 0 index_params = dict( algorithm=6, table_number=6, # 12 key_size=12, # 20 multi_probe_level=1) #2 search_params = dict(checks=50) good = [] flann = cv2.FlannBasedMatcher(index_params, search_params) if des1 is not None and des2 is not None and len(des1) > 3 and len( des2) > 3: matches = flann.knnMatch(des1, des2, k=2) # store all the good matches as per Lowe's ratio test. for test in matches: if len(test) > 1: m, n = test if m.distance < 0.7 * n.distance: good.append(m) if len(good) > MIN_MATCH_COUNT: src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) # normalize point positions # src_pts = cv2.undistortPoints(src_pts, cameraMatrix=cameraMatrix, distCoeffs=distCoeffs) # dst_pts = cv2.undistortPoints(dst_pts, cameraMatrix=cameraMatrix, distCoeffs=distCoeffs) H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) retval, Rs, ts, norms = cv2.decomposeHomographyMat(H, cameraMatrix) R, t, norm = pickHomography(Rs, ts, norms) #t[1] *= -1 t[0] *= -1 R[2][0] *= -1 R[0][2] *= -1 return R, t, kp2, des2 else: print "Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT) return None, None, None, None
def getRt(self, img2, img1 = None): assert img1 is not None or (self.kp1 is not None and self.des1 is not None) # Initiate SURF detector orb = cv2.ORB_create(nfeatures=500, nlevels=8, scaleFactor=2) # find the keypoints and descriptors with SIFT if self.kp1 is None: self.kp1, self.des1 = orb.detectAndCompute(img1,None) kp2, des2 = orb.detectAndCompute(img2,None) # Match keypoints good = [] flann = cv2.FlannBasedMatcher(self.index_params, self.search_params) if self.des1 is not None and des2 is not None and len(self.des1) > 3 and len(des2) > 3: matches = flann.knnMatch(self.des1,des2,k=2) # store all the good matches as per Lowe's ratio test. for test in matches: if len(test) > 1: m, n = test if m.distance < 0.7*n.distance: good.append(m) if len(good) > self.min_match_count: src_pts = np.float32([self.kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2) # Find Homography H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) retval, Rs, ts, norms = cv2.decomposeHomographyMat(H, self.camera_matrix) self.est_H = H # FIRST FRAME # self.est_H = np.dot(H, self.est_H) R, t, norm = self.pickHomography(Rs, ts, norms) t[0] *= -1 z_sum = 0 z_num = 0 for kp in src_pts: z_sum += np.dot(self.est_H, np.concatenate((kp,np.array([[1]])),1).T)[2] z_num += 1 self.z_average = z_sum / z_num print self.z_average # self.est_H = self.est_H / z_average # R[2][0] *= -1 # R[0][2] *= -1 # for kp in src_pts: # print np.dot(self.est_H, # np.concatenate((kp,np.array([[1]])),1).T) # FIRST FRAME # self.kp1 = kp2 # self.des1 = des2 return R, t else: print "Not enough matches are found - %d/%d" % (len(good),self.min_match_count) return None
def derive_transform(img1,img2,K=numpy.array([[1000,0,0],[0,1000,0],[0,0,1]])): H = get_homography_by_keypoints(img1, img2).astype('float') n, R, T, normal = cv2.decomposeHomographyMat(H, K) R = numpy.array(R[0]) T = numpy.array(T[0]) normal = numpy.array(normal[0]) HH=compose_homography(R,T,normal,K) return R,T,normal,HH,K
def process(img2): img1 = cv2.imread("sampleImages/img-center.png") src_points = extract_points(img1) dst_points = extract_points(img2) # retval = cv2.estimateRigidTransform(src_points, dst_points, fullAffine=False) # matrix = None # matrix = cv2.getPerspectiveTransform(src_points, dst_points) retval, mask = cv2.findHomography(src_points, dst_points) # print(retval) # print(MATRIX) time.sleep(1) number, rotations, translations, normals = cv2.decomposeHomographyMat( retval, MATRIX) print(number) for possible_rotation in rotations: rot_vector = cv2.Rodrigues(possible_rotation)[0] print(list(map(math.degrees, rot_vector))) # [print(math.degrees(rotation_val)) for sublist in [cv2.Rodrigues(rotation)[0] for rotation in rotations] for rotation_val in sublist] # print(translations)z # print(normals) # print(output) # print(output[0]) # print(output[1]) # print(output[2]) new_src = np.int32(src_points) new_dst = np.int32(dst_points) # print(src_points) # print(dst_points) essentialMatrix, mask = cv2.findFundamentalMat(new_src, new_dst, cv2.FM_LMEDS) # print(essentialMatrix) # print("\n"*20) # pose = cv2.recoverPose(essentialMatrix, src_points, dst_points, MATRIX) R1, R2, t = cv2.decomposeEssentialMat( essentialMatrix) # for thing in pose: vector1 = cv2.Rodrigues(R1)[0] vector2 = cv2.Rodrigues(R2)[0] pos1 = [ math.degrees(item) for sublist in vector1.tolist() for item in sublist ] pos2 = [ math.degrees(item) for sublist in vector2.tolist() for item in sublist ] # print(vector2.tolist()) print(pos1) print(pos2)
def get_RT(H): retval, Rs, ts, norms = cv2.decomposeHomographyMat(H, camera_matrix) min_index = 0 min_z_norm = 0 for i in range(len(Rs)): if norms[i][2] < min_z_norm: min_z_norm = norms[i][2] min_index = i T = ts[min_index] * z return Rs[min_index], T, norms[min_index]
def select_model(_): """ Select model and return possible transform permutations """ if (_['model'] == 'H'): # decompose_H() res_h, Hr, Ht, Hn = cv2.decomposeHomographyMat(_['H'], _['K']) Ht = np.float32(Ht) Ht /= np.linalg.norm(Ht, axis=(1, 2), keepdims=True) T_perm = zip(Hr, Ht) else: # decompose_E() R1, R2, t = cv2.decomposeEssentialMat(_['E']) T_perm = [(R1, t), (R2, t), (R1, -t), (R2, -t)] return T_perm
def match_image(self, candidate_image): if len(np.shape(candidate_image)) is 3: candidate_image = cv.cvtColor(candidate_image, cv.COLOR_BGR2GRAY) print("match_image") self.train_image = candidate_image self.train_features, self.train_descriptors = self.orb.detectAndCompute(candidate_image, None) self.train_image_kp = cv.drawKeypoints(self.train_image, self.train_features, self.train_image_kp, color=(255, 0, 0)) cv.imshow("train_image", self.train_image_kp) cv.waitKey(20) raw_matches = self.bf_matcher.match(self.query_descriptors, self.train_descriptors) # raw_matches = sorted(raw_matches, key=lambda x: x.distance) good_matches = [] for match in raw_matches: if match.distance < 20: good_matches.append(match) print("raw matches: ", len(raw_matches)) print("matched feature number: ", len(good_matches)) if len(good_matches) < 5: return None, None self.query_good_features, self.train_good_features = self.filter_features(good_matches) matched_image = cv.drawMatches(self.query_image, self.query_image_features, self.train_image, self.train_features, good_matches, None) cv.imwrite("candidate_image.png", candidate_image) cv.imwrite("matched_image.png", matched_image) # conduct perspective transform H, mask = self.perspectiveTransform(self.query_good_features, self.train_good_features) print("calculated matrix: ", H) # find rotation and translation from homography matrix _, Rotation, translation, _ = cv.decomposeHomographyMat(H, self.K) print("Rotation: ", Rotation) print("translation: ", translation) return Rotation, translation
def get_roll(H): data = np.load('calib.npz') K = data['mtx'] num, Rs, Ts, Ns = cv2.decomposeHomographyMat(H, K) # print(num) for i in range(num): R = Rs[i] if isRotationMatrix(R): angles = rotationMatrixToEulerAngles(R) # print(angles) return abs(angles[2]) return 0
def getAngles(self): if self.StringToList == None: # return "No April Tag detected!" return None STL = StringToList(self.results) homography = STL.getHomography() homography = np.array(homography) _, Rs, Ts, Ns = cv2.decomposeHomographyMat(homography, self.camera_cal_matrix) angles = [] for rotation_matrix in Rs: r = R.from_dcm(rotation_matrix) angle = r.as_euler('xyz', degrees = True) angles.append(angle) return angles
def get_RT(self, start_RT, H): if H is None: return None retval, Rs, ts, norms = cv2.decomposeHomographyMat( H, self.camera_matrix) min_index = 0 min_z_norm = 0 for i in range(len(Rs)): if norms[i][2] < min_z_norm: min_z_norm = norms[i][2] min_index = i T = np.zeros(3) T = ts[min_index] * start_RT[2, 3] return Rs[min_index], T, norms[min_index]
def get_vel(self, start_RT, timestep): retval, Rs, ts, norms = cv2.decomposeHomographyMat( self.est_H, self.camera_matrix) # print 'Weve got {} options'.format(len(Rs)) # if s < len(Rs): min_index = 0 min_z_norm = 0 for i in range(len(Rs)): if norms[i][2] < min_z_norm: min_z_norm = norms[i][2] min_index = i T = np.zeros(3) T = ts[min_index] * start_RT[2, 3] return T / timestep
def bend_homography(H, alpha, K=numpy.array([[1000, 0, 0], [0, 1000, 0], [0, 0, 1]])): n, R, T, normal = cv2.decomposeHomographyMat(H, K) R = R[0] T = T[0] normal = normal[0] angles = rotationMatrixToEulerAngles(R) * alpha R = eulerAnglesToRotationMatrix(angles) T = T * alpha H = compose_homography(R, T, normal, K) return H
def run_flann_matcher(self): # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(np.asarray(self.__marker_desc, np.float32), np.asarray(self.__input_desc, np.float32), k=2) # Need to draw only good matches, so create a mask matchesMask = [[0, 0] for i in range(len(matches))] # store all the good matches as per Lowe's ratio test. good = [] for m, n in matches: if m.distance < 0.7 * n.distance: good.append(m) if len(good) > MIN_MATCH_COUNT: src_pts = np.float32([ self.__marker_kp[m.queryIdx].pt for m in good ]).reshape(-1, 1, 2) dst_pts = np.float32([ self.__input_kp[m.trainIdx].pt for m in good ]).reshape(-1, 1, 2) H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) matchesMask = mask.ravel().tolist() h, w, _ = self.in_image.shape pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, H) num, self.r_vec, self.t_vec, Ns = cv2.decomposeHomographyMat( H, self.__cam_mat) print(self.__in_image.shape) if (self.__json_params["debug_draw"] == True): aruco.drawAxis(self.in_image, self.__cam_mat, self.__dist_mat, self.r_vec[1], self.t_vec[1], 0.1) # Draw Axis self.in_image = cv2.polylines(self.in_image, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
def get_pose(self, start_R, start_t, s): # print self.est_H retval, Rs, ts, norms = \ cv2.decomposeHomographyMat(self.est_H, self.camera_matrix) # print 'Weve got {} options'.format(len(Rs)) if s < len(Rs): R = Rs[s] t = ts[s] norm = norms[s] else: print 'RAN OUT OF AXES' R, t, norm = self.pickHomography(Rs, ts, norms) # if np.absolute(norm[2]) < np.linalg.norm(norm[0:2]): # print 'NOT USING THIS ONE' # t[0] *= -1 # t[1] *= -1 # t[2] *= -1 # R_tmp = copy.deepcopy(R) # R_tmp[0][1] *= -1 # R_tmp[1][0] *= -1 # R_tmp[0][2] *= -1 # R_tmp[2][0] *= -1 # R_tmp[1][2] *= -1 # R_tmp[2][1] *= -1 # print 'ANGLE', np.linalg.norm(R - np.identity(3)),'MAGNITUDE',\ # np.linalg.norm(t), 'NORM', norm.T # print 'ROTATION\n', R # print 'TRANSLATION\n', t flip_around_x = np.array([[1, 0, 0], [0,-1,0], [0,0,-1]]) # flipped_R = np.dot(flip_around_x, R) # R[1][2] *= -1 # R[2][1] *= -1 # R[0][1] *= -1 # R[1][0] *= -1 flip_R = np.dot(R, flip_around_x) # t = np.dot(flip_around_x, t) new_R = np.dot(start_R, R) diff_t = np.dot(new_R.T, t)*start_t[2] diff_t[2] *= -1 new_t = start_t + diff_t pos = self.compose_pose(new_R, new_t) return pos
def calculate_pyramid(homography, image_test, pos_x, pos_y): ''' Calculates the position of the pyramid and places it :param homography: :param image_test: :param pos_x: :param pos_y: :return: ''' print("Calculating pyramid position") # get camera calibrations mtx, dist = get_calibrations() # decompose the homography into its various components retval, rotations, translations, normals = cv.decomposeHomographyMat(homography, mtx) # get the rotation vector from the rotation matrix rot = cv.Rodrigues(np.array(rotations)) rotV = rot[0] # define the points in a 3D space objp = np.zeros((5, 3), np.float32) objp[0] = [0, 0, 1] # vertex objp[1] = [-1, -1, 0] # top-left 1 objp[2] = [-1, 1, 0] # bottom-left 2 objp[3] = [1, 1, 0] # bottom-right 3 objp[4] = [1, -1, 0] # top-right 4 # define the points of the image in a 2D space image_corners = get_corners(pos_x, pos_y) # find the object pose from a 3D to a 2D space retval, rotVector, translVector = cv.solvePnP(objp, image_corners, mtx, dist, rotV, np.array(translations)) # project the points from 3D to 2D scene_corners, _ = cv.projectPoints(objp, rotV, np.array(translVector), mtx, dist) # draw the pyramid based on the calculated points print("Drawing pyramid in the image") draw_pyramid(image_test, scene_corners)
def findPoseTransform(self, frame1_gray, frame2_gray): warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 1e-5) # Get homography (cc, warp_matrix) = cv2.findTransformECC(frame2_gray, frame1_gray, warp_matrix, cv2.MOTION_EUCLIDEAN, criteria) # Homogeneous coordinate version of homography last_row = np.array([0, 0, 1], dtype=np.float32) warp_matrix_ext = np.vstack((warp_matrix, last_row)) # Getting 4 matrices decompositions _, Rs, Ts, Ns = cv2.decomposeHomographyMat( warp_matrix_ext, self.cam_params.get_intrinsic_matrix()) # Select the best matrix self.transf_last = self.getFeasibleTranformation(Ns, Rs, Ts) self.transf_trajectory.append(self.transf_last) self.transf_accum = self.transf_accum.dot(self.transf_last) return self.transf_last, warp_matrix
def toCoords(self, detection, pos, dist, cameraMatrix): """ Calculates the planar coordinates from calculated distance and homography :param detection: apriltag detection object :param pos: translation vector of the robot :param dist: calculated distance from apriltag :param cameraMatrix: intrinsic camera matrix :return: x,y planar coordinates as a top-down view with 0,0 at the center of the apriltag """ # retrieve rotation vectors from homography num, rotvec, tvec, normvec = cv2.decomposeHomographyMat( detection.homography, cameraMatrix) # in experimentation solution 2 always yielded the correct solution optI = 3 goalrotvec = rotvec[3] fourdmtx = np.matrix( [[goalrotvec[0, 0], goalrotvec[0, 1], goalrotvec[0, 2], pos[0]], [goalrotvec[1, 0], goalrotvec[1, 1], goalrotvec[1, 2], pos[1]], [goalrotvec[2, 0], goalrotvec[2, 1], goalrotvec[2, 2], pos[2]], [0., 0., 0., 1.]]) # decompose rotation matrix into euler angles vals = decomposeSO3(rotvec[optI]) inverted = -np.linalg.inv(fourdmtx) * np.sign(1 - abs(vals[2])) theta = (vals[1] * np.sign(1 - abs(vals[2]))) - (math.atan2( (pos[0]), pos[2])) subtract = -vals[0] if np.sign(1 - abs(vals[2])) > 0: subtract = -(vals[0] + math.pi) phi = subtract - (math.atan2((pos[1]), pos[2])) # calculate planar coordinates as a function of the distance and the y rotation x = abs(dist * math.sin(theta)) y = dist * math.cos(theta) z = dist * math.sin(phi) return inverted[0], inverted[2], inverted[1], theta, ( vals[1] * np.sign(1 - vals[2])), subtract
def decompose_homography(K, H): '''opencv 中的相机定位方法,参考相机和当前相机内参必须一致 K 相机内参, H Homography 矩阵 返回结果 rotations Array of rotation matrices. translations Array of translation matrices. normals Array of plane normal matrices. This function extracts relative camera motion between two views observing a planar object from the homography H induced by the plane. The intrinsic camera matrix K must also be provided. The function may return up to four mathematical solution sets. At least two of the solutions may further be invalidated if point correspondences are available by applying positive depth constraint (all points must be in front of the camera). The decomposition method is described in detail in [104] . ''' retval, rotations, translations, normals = cv2.decomposeHomographyMat(H, K) return rotations, translations, normals
def get_pose_alt(self, start_RT): retval, Rs, ts, norms = cv2.decomposeHomographyMat( self.est_H, self.camera_matrix) # print 'Weve got {} options'.format(len(Rs)) # if s < len(Rs): min_index = 0 min_z_norm = 0 for i in range(len(Rs)): if norms[i][2] < min_z_norm: min_z_norm = norms[i][2] min_index = i T = np.zeros(3) T = ts[min_index] * start_RT[2, 3] RT = np.identity(4) RT[0:3, 0:3] = np.identity(3) if np.linalg.norm(T) < 10: RT[0:3, 3] = T.T est_RT = np.dot(RT, self.est_RT) # comment out for first frame return est_RT[0:3, 0:3], est_RT[0:3, 3], norms[ min_index] # comment out for first frame
def example_03_find_homography_manual(): folder_input = 'images/ex_homography_manual/' im_target = cv2.imread(folder_input + 'background.jpg') im_source = cv2.imread(folder_input + 'rect.jpg') folder_output = 'images/output/' if not os.path.exists(folder_output): os.makedirs(folder_output) else: tools_IO.remove_files(folder_output) c1, r1 = 296, 95 c2, r2 = 570, 180 c3, r3 = 404, 614 c4, r4 = 130, 531 p_target = numpy.array([[c1, r1], [c2, r2], [c3, r3], [c4, r4]]) p_source = numpy.array( [[0, 0], [im_source.shape[1], 0], [im_source.shape[1], im_source.shape[0]], [0, im_source.shape[0]]], dtype=numpy.float) # via affine homography_afine, status = tools_pr_geom.fit_affine(p_source, p_target) homography = numpy.vstack((homography_afine, numpy.array([0, 0, 1]))) cv2.imwrite( folder_output + 'affine.png', tools_image.put_layer_on_image( im_target, cv2.warpPerspective(im_source, homography, (im_target.shape[1], im_target.shape[0])), background_color=(0, 0, 0))) # homography itself homography2, status = tools_pr_geom.fit_homography( p_source.reshape((-1, 1, 2)), p_target.reshape((-1, 1, 2))) cv2.imwrite( folder_output + 'homography.png', tools_image.put_layer_on_image( im_target, cv2.warpPerspective(im_source, homography2, (im_target.shape[1], im_target.shape[0])), background_color=(0, 0, 0))) # homography normalized K = numpy.array([[im_target.shape[1], 0, 0], [0, im_target.shape[0], 0], [0, 0, 1]]) n, R, T, normal = cv2.decomposeHomographyMat(homography, K) R = numpy.array(R[0]) T = numpy.array(T[0]) normal = numpy.array(normal[0]) homography3 = tools_calibrate.compose_homography(R, T, normal, K) cv2.imwrite( folder_output + 'homography_normalized.png', tools_image.put_layer_on_image( im_target, cv2.warpPerspective(im_source, homography3, (im_target.shape[1], im_target.shape[0])), background_color=(0, 0, 0))) return
[points[1][0], points[1][1]], [points[2][0], points[2][1]], [points[3][0], points[3][1]]]) if w == max(w, h): pts_dst = np.array([[x, y], [x, y + w], [x + w, y + w], [x + w, y]]) else: pts_dst = np.array([[x, y], [x, y + h], [x + h, y + h], [x + h, y]]) h, status = cv2.findHomography(pts_src, pts_dst) # print(h) # print('----------------------------------------------') k = np.array([[1280, 0, 629], [0, 1280, 372], [0, 0, 1]]) retval, rotations, translations, normals = cv2.decomposeHomographyMat( h, k) # print(rotations) # print('---------------------------------------------------') for rotation in rotations: sy = math.sqrt(rotation[0, 0] * rotation[0, 0] + rotation[1, 0] * rotation[1, 0]) singular = sy < 1e-6 if not singular: ang_x = math.atan2(rotation[2, 1], rotation[2, 2]) ang_y = math.atan2(-rotation[2, 0], sy) ang_z = math.atan2(rotation[1, 0], rotation[0, 0]) else: ang_x = math.atan2(-rotation[1, 2], rotation[1, 1]) ang_y = math.atan2(-rotation[2, 0], sy) ang_z = 0
def get_H(conn, table_name, id=7, fmt='d'): rows = get_rows(conn, table_name) Hs = {} for row in rows: if row[id] is not None: data1 = get_data(row[id], fmt) Hs[row[0]] = np.array(data1).reshape((3, 3)) return Hs if __name__ == "__main__": project_dir = '/home/weihao/Projects' key = 'heads' # office" #heads mode = 'Test' db = '{}/colmap_features/{}_{}/proj.db'.format(project_dir, key, mode) conn = sqlite3.connect(db) Hs = get_H(conn, 'two_view_geometries') focal = 525.0 cam = PinholeCamera(640.0, 480.0, focal, focal, 320.0, 240.0) nt = 0 for idx in Hs: As, Rs, Ts, Ns = cv2.decomposeHomographyMat(Hs[idx], cam.mx) for a in range(As): print idx, Utils.rotationMatrixToEulerAngles(Rs[a]) nt += 1 if nt > 10: break
def reproject(imgname1, imgname2, corners1, corners2, plane_depth): ''' return the coordinate of the first image! ''' img1 = cv2.imread(imgname1) img2 = cv2.imread(imgname2) # print 'image shape: ', img1.shape # # dummy # pts1 = np.array([[184,272],[476,557],[1102,308],[743,128]]) # pts2 = np.array([[305,275],[384,545],[1178,361],[889,154]]) pts1 = np.array(corners1) pts2 = np.array(corners2) kp1,des1 = computeDesc(img1, pts1) kp2,des2 = computeDesc(img2, pts2) # lower the threshold to pass the test!! matches = fE.getMatches(des1,des2,threshold=0.8) #, kp1, kp2, img1, img2) # print len(matches), ' is length of matches!' if (len(matches) < 4): raise NameError('all 4 corners are not matched!') pts1,pts2 = fE.getMatchPts(matches, kp1, kp2) P1 = np.concatenate((pts1, np.array([[1,1,1,1]]).T), axis=1) P2 = np.concatenate((pts2, np.array([[1,1,1,1]]).T), axis=1) # print pts1, pts2 H = fE.getHomography(matches, kp1, kp2) # normalize U,S,V = np.linalg.svd(H) # print S H = H / S[1] if (np.dot(P2[0,:], np.dot(H, P1[0,:].T))): H = -H # print H # A = np.dot(H.T, H) # U,S,V = np.linalg.svd(A) # print S # v1,v2,v3 = V[:,0], V[:,1], V[:,2] # K is needed because x needs to times K^-1 retval, rotations, translations, normals = cv2.decomposeHomographyMat(H,K) # H = np.dot(np.dot(K_inv, H), K) # print np.dot(H - rotations[2], normals[2]) ################################################################### # calculate 3d, "T" is in unit of plane-depth -- d Rs = [] Ts = [] for i, n in enumerate(normals): if (n[2] > 0): # positive depth constraint Rs.append(rotations[i]) Ts.append(translations[i]) # print Rs, Ts if len(Rs) == 0: raise NameError('Cannot decompose Homography Matrix -- correspondences are wrong!') # no more shit we can do right now, we need more correspondences! iResult = SelectionXtrans(img1, img2, Rs, Ts) R = Rs[iResult] T = Ts[iResult] print 'The real R is ', R print 'unit translation length: ', np.linalg.norm(T) # finally calculate the coordinate of the whole 4 shits Xs = [] for i in range(0,4): x1 = np.dot(K_inv, P1[i,:].T) x2 = np.dot(K_inv, P2[i,:].T) Xs.append(plane_depth * lstsql3d(R, T, x1, x2)) # print np.linalg.norm(Xs[0] - Xs[1]) return Xs
#print("dp:", dp1, dp2) if uv1[0] < cu + xoff: r = -np.arccos(dp1) * fps else: r = np.arccos(dp1) * fps if uv2[1] < cv + yoff: q = -np.arccos(dp2) * fps else: q = np.arccos(dp2) * fps print("A ypr: %.2f %.2f %.2f" % (r, q, p)) # alternative method for determining pose change from previous frame if filter_method == "homography" and not M is None: print("M:\n", M) (result, Rs, tvecs, norms) = cv2.decomposeHomographyMat(M, K) possible = cv2.filterHomographyDecompByVisibleRefpoints( Rs, norms, np.array([newp1]), np.array([newp2])) #print("R:", Rs) print("Num:", len(Rs), "poss:", possible) best = 100000 best_index = None best_val = None for i, R in enumerate(Rs): (Hpsi, Hthe, Hphi) = transformations.euler_from_matrix(R, 'rzyx') hp = Hpsi * fps hq = Hphi * fps hr = Hthe * fps d = np.linalg.norm(np.array([p, q, r]) - np.array([hp, hq, hr])) if d < best: best = d