def EstimateExterior(gcpCoo_file, imgCoo_GCP_file, interior_orient, estimate_exterior, unit_gcp, max_orientation_deviation, ransacApprox, angles_eor, pos_eor, directoryOutput): try: #read object coordinates of GCP (including point ID) gcpObjPts_table = np.asarray( pd.read_table(gcpCoo_file, header=None, delimiter='\t')) except: print('failed reading GCP file (object space)') try: #read pixel coordinates of image points of GCPs (including ID) gcpImgPts_table = np.asarray( pd.read_table(imgCoo_GCP_file, header=None, delimiter='\t')) except: print('failed reading GCP file (imgage space)') gcpPts_ids = gcpImgPts_table[:, 0] gcpPts_ids = gcpPts_ids.reshape(gcpPts_ids.shape[0], 1) gcpImgPts_to_undist = gcpImgPts_table[:, 1:3] #undistort image measurements of GCP gcpImgPts_undist = photogrF.undistort_img_coos(gcpImgPts_to_undist, interior_orient, False) gcpImgPts_undist = np.hstack((gcpPts_ids, gcpImgPts_undist)) #get exterior orientation try: #estimate exterior orientation from GCPs if estimate_exterior: if ransacApprox: exteriorApprox = np.asarray([0, 0, 0, 0, 0, 0]).reshape(6, 1) else: exteriorApprox = np.vstack((pos_eor, angles_eor)) * unit_gcp eor_mat = photogrF.getExteriorCameraGeometry( gcpImgPts_undist, gcpObjPts_table, interior_orient, unit_gcp, max_orientation_deviation, ransacApprox, exteriorApprox, True, directoryOutput) #...or use predefined camera pose information else: rot_mat = photogrF.rot_Matrix(angles_eor[0], angles_eor[1], angles_eor[2], 'radians').T rot_mat = rot_mat * np.array([[-1, -1, -1], [1, 1, 1], [-1, -1, -1]]) eor_mat = np.hstack( (rot_mat.T, pos_eor )) #if rotation matrix received from opencv transpose rot_mat eor_mat = np.vstack((eor_mat, [0, 0, 0, 1])) print(eor_mat) eor_mat[0:3, 3] = eor_mat[0:3, 3] * unit_gcp except Exception as e: print(e) print('Referencing image failed\n') return eor_mat
def LineWaterSurfaceIntersect(imgPts, cameraGeometry_interior, cameraGeometry_exterior, pointCloud, epsilon=1e-6): #get water plane with plane fitting (when water surface not horizontal) planeParam = ausgl_ebene(pointCloud) #planeParam = [a,b,c,d] try: np.sum(np.asarray(planeParam)) return np.asarray(planeParam) except Exception as e: _, _, exc_tb = sys.exc_info() print(e, 'line ' + str(exc_tb.tb_lineno)) print('plane fitting failed') return #calculate plane normal planeNormal = np.array([planeParam[0],planeParam[1],planeParam[2]]) #normal vector planePoint = np.array([0,0,-1*planeParam[3]/planeParam[2]]) #support vector (for plane in normal form) #calculate angle of plane PlanarPlaneNorm = np.asarray([0,0,1]) len_NivelPlaneNorm = np.sum(np.sqrt((PlanarPlaneNorm**2))) len_planeNormal = np.sum(np.sqrt((planeNormal**2))) zaehler = planeNormal[0] * PlanarPlaneNorm[0] + planeNormal[1] * PlanarPlaneNorm[1] + planeNormal[2] * PlanarPlaneNorm[2] angleNormVec = np.arccos(zaehler / (len_NivelPlaneNorm * len_planeNormal)) * 180/np.pi print('angle of plane: ' + str(angleNormVec)) #origin of ray is projection center rayPoint = np.asarray([cameraGeometry_exterior[0], cameraGeometry_exterior[1], cameraGeometry_exterior[2]]) #transform image ray into object space imgPts_undist_mm = photo_tool.undistort_img_coos(imgPts, cameraGeometry_interior) rayDirections = photo_tool.imgDepthPts_to_objSpace(imgPts_undist_mm, cameraGeometry_exterior, cameraGeometry_interior.resolution_x, cameraGeometry_interior.resolution_y, cameraGeometry_interior.sensor_size_x / cameraGeometry_interior.resolution_x, cameraGeometry_interior.ck) PtsIntersectedWaterPlane = [] for ray in rayDirections: #perform intersection ndotu = planeNormal.dot(ray) if abs(ndotu) < epsilon: raise RuntimeError("no intersection with plane possible") w = rayPoint - planePoint si = -planeNormal.dot(w) / ndotu Psi = w + si * ray + planePoint PtsIntersectedWaterPlane.append(Psi) PtsIntersectedWaterPlane = np.asarray(PtsIntersectedWaterPlane) return PtsIntersectedWaterPlane
def LinePlaneIntersect(imgPts, waterlevel, cameraGeometry_interior, cameraGeometry_exterior, unit_gcp=1, epsilon=1e-6): #assume water is horizontal plane planeNormal = np.array([0,0,1]) #normal vector planePoint = np.array([0,0,waterlevel*unit_gcp]) #support vector (for plane in normal form) planeNormal_norm = planeNormal * (1/np.linalg.norm(planeNormal)) #origin of ray is projection center rayPoint = np.asarray([cameraGeometry_exterior[0,3], cameraGeometry_exterior[1,3], cameraGeometry_exterior[2,3]]) #transform image ray into object space imgPts_undist_mm = photo_tool.undistort_img_coos(imgPts, cameraGeometry_interior) imgPts_undist_forObj_x = imgPts_undist_mm[:,0] * -1 imgPts_undist_forObj_y = imgPts_undist_mm[:,1] imgPts_undist_forObj = np.hstack((imgPts_undist_forObj_x.reshape(imgPts_undist_forObj_x.shape[0],1), imgPts_undist_forObj_y.reshape(imgPts_undist_forObj_y.shape[0],1))) imgPts_undist_forObj = np.hstack((imgPts_undist_forObj, np.ones((imgPts_undist_mm.shape[0],1)) * cameraGeometry_interior.ck)) #transform into object space imgPts_XYZ = np.matrix(cameraGeometry_exterior) * np.matrix(np.vstack((imgPts_undist_forObj.T, np.ones(imgPts_undist_forObj.shape[0])))) rayPts = np.asarray(imgPts_XYZ.T)[:,0:3] #plot ray of camera viewing direction # z_range = np.asarray(range(500)) # Z_range = z_range.reshape(z_range.shape[0],1) * (np.ones((z_range.shape[0],3)) * cameraGeometry_exterior[0:3,2].T) + np.ones((z_range.shape[0],3)) * cameraGeometry_exterior[0:3,3].T rayDirections = np.ones((rayPts.shape)) * rayPoint - rayPts rayDirections_norm = rayDirections * (1/np.linalg.norm(rayDirections)) PtsIntersectedWaterPlane = [] for ray in rayDirections_norm: #perform intersection ndotu = planeNormal_norm.dot(ray) if abs(ndotu) < epsilon: raise RuntimeError("no intersection with plane possible") w = rayPoint - planePoint si = -planeNormal_norm.dot(w) / ndotu Psi = w + si * ray + planePoint PtsIntersectedWaterPlane.append(Psi) PtsIntersectedWaterPlane = np.asarray(PtsIntersectedWaterPlane) return PtsIntersectedWaterPlane
def getWaterborderXYZ(borderPts, ptCloud, exteriorOrient, interiorOrient): #project points into depth image xyd_rgb_map = photo_tool.project_pts_into_img(exteriorOrient, interiorOrient, ptCloud, False) if xyd_rgb_map.any() == None: print('point projection into image failed') return #undistort border points borderPts_undist = photo_tool.undistort_img_coos(borderPts, interiorOrient, False) borderPts_undist_px = photo_tool.metric_to_pixel( borderPts_undist, interiorOrient.resolution_x, interiorOrient.resolution_y, interiorOrient.sensor_size_x, interiorOrient.sensor_size_y) #find nearest depth value to border points in depth image borderPts_xyd, borderPtsNN_undist_px = NN_pts(xyd_rgb_map, borderPts_undist_px, 5, False) if borderPts_xyd.any() == None: print('no NN for border found') return borderPts_xyd_mm = photo_tool.pixel_to_metric(borderPts_xyd[:, 0:2], interiorOrient.resolution_x, interiorOrient.resolution_y, interiorOrient.sensor_size_x, interiorOrient.sensor_size_y) borderPts_mm_d = borderPts_xyd[:, 2] xyd_map = np.hstack( (borderPts_xyd_mm, borderPts_mm_d.reshape(borderPts_mm_d.shape[0], 1))) xyd_map_mm = photo_tool.imgDepthPts_to_objSpace( xyd_map, exteriorOrient, interiorOrient.resolution_x, interiorOrient.resolution_y, interiorOrient.sensor_size_x / interiorOrient.resolution_x, interiorOrient.ck) return xyd_map_mm, borderPtsNN_undist_px
def FeatureTracking(template_width, template_height, search_area_x_CC, search_area_y_CC, shiftSearchFromCenter_x, shiftSearchFromCenter_y, frameCount, FT_forNthNberFrames, TrackEveryNthFrame, dir_imgs, img_list, featuresToTrack, interior_orient, performLSM, lsmBuffer, threshLSM, subpixel, trackedFeaturesOutput_undist, save_gif, imagesForGif, directoryOutput, lk, initialEstimatesLK, maxDistBackForward_px=1): #prepare function input template_size = np.asarray([template_width, template_height]) search_area = np.asarray([search_area_x_CC, search_area_y_CC]) shiftSearchArea = np.asarray([shiftSearchFromCenter_x, shiftSearchFromCenter_y]) #save initial pixel position of features trackedFeatures0_undist = photogrF.undistort_img_coos(featuresToTrack[:,1:3], interior_orient) trackedFeatures0_undist_px = photogrF.metric_to_pixel(trackedFeatures0_undist, interior_orient.resolution_x, interior_orient.resolution_y, interior_orient.sensor_size_x, interior_orient.sensor_size_y) frame_name0 = np.asarray([img_list[frameCount] for x in range(featuresToTrack.shape[0])]) trackedFeaturesOutput_undist0 = np.hstack((frame_name0, featuresToTrack[:,0])) trackedFeaturesOutput_undist0 = np.hstack((trackedFeaturesOutput_undist0, trackedFeatures0_undist_px[:,0])) trackedFeaturesOutput_undist0 = np.hstack((trackedFeaturesOutput_undist0, trackedFeatures0_undist_px[:,1])) trackedFeaturesOutput_undist0 = trackedFeaturesOutput_undist0.reshape(4, frame_name0.shape[0]).T trackedFeaturesOutput_undist.extend(trackedFeaturesOutput_undist0) #loop through images img_nbr_tracking = frameCount while img_nbr_tracking < frameCount+FT_forNthNberFrames: #read images templateImg = cv2.imread(dir_imgs + img_list[img_nbr_tracking], 0) searchImg = cv2.imread(dir_imgs + img_list[img_nbr_tracking+TrackEveryNthFrame], 0) print('template image: ' + img_list[img_nbr_tracking] + ', search image: ' + img_list[img_nbr_tracking+TrackEveryNthFrame] + '\n') #track features per image sequence if lk: #tracking (matching templates) with Lucas Kanade try: #consider knowledge about flow velocity and direction (use shift of search window) if initialEstimatesLK == True: featureEstimatesNextFrame = featuresToTrack[:,1:] x_initialGuess, y_initialGuess = featureEstimatesNextFrame[:,0], featureEstimatesNextFrame[:,1] x_initialGuess = x_initialGuess.reshape(x_initialGuess.shape[0],1) + np.ones((featureEstimatesNextFrame.shape[0],1)) * shiftSearchFromCenter_x y_initialGuess = y_initialGuess.reshape(y_initialGuess.shape[0],1) + np.ones((featureEstimatesNextFrame.shape[0],1)) * shiftSearchFromCenter_y featureEstimatesNextFrame = np.hstack((x_initialGuess, y_initialGuess)) #...or not else: featureEstimatesNextFrame = None #perform tracking trackedFeaturesLK, status = trackF.performFeatureTrackingLK(templateImg, searchImg, featuresToTrack[:,1:], initialEstimatesLK, featureEstimatesNextFrame, search_area_x_CC, search_area_y_CC, maxDistBackForward_px) featuresId = featuresToTrack[:,0] trackedFeaturesLKFiltered = np.hstack((featuresId.reshape(featuresId.shape[0],1), trackedFeaturesLK)) trackedFeaturesLKFiltered = np.hstack((trackedFeaturesLKFiltered, status)) #remove points with erroneous LK tracking (ccheck column 3) trackedFeaturesLK_px = trackedFeaturesLKFiltered[~np.all(trackedFeaturesLKFiltered == 0, axis=1)] #drop rows with nan values (which are features that failed back-forward tracking test) trackedFeaturesLK_pxDF = pd.DataFrame(trackedFeaturesLK_px) trackedFeaturesLK_pxDF = trackedFeaturesLK_pxDF.dropna() trackedFeaturesLK_px = np.asarray(trackedFeaturesLK_pxDF) trackedFeatures = trackedFeaturesLK_px[:,0:3] #undistort tracked feature measurement trackedFeature_undist = photogrF.undistort_img_coos(trackedFeaturesLK_px[:,1:3], interior_orient) trackedFeature_undist_px = photogrF.metric_to_pixel(trackedFeature_undist, interior_orient.resolution_x, interior_orient.resolution_y, interior_orient.sensor_size_x, interior_orient.sensor_size_y) frameName = np.asarray([img_list[img_nbr_tracking+TrackEveryNthFrame] for x in range(trackedFeaturesLK_px.shape[0])]) trackedFeaturesOutput_undistArr = np.hstack((frameName, trackedFeaturesLK_px[:,0])) trackedFeaturesOutput_undistArr = np.hstack((trackedFeaturesOutput_undistArr, trackedFeature_undist_px[:,0])) trackedFeaturesOutput_undistArr = np.hstack((trackedFeaturesOutput_undistArr, trackedFeature_undist_px[:,1])) trackedFeaturesOutput_undistArr = trackedFeaturesOutput_undistArr.reshape(4, frameName.shape[0]).T trackedFeaturesOutput_undist.extend(trackedFeaturesOutput_undistArr) except Exception as e: print(e) print('stopped tracking features with LK after frame ' + img_list[img_nbr_tracking]) else: #tracking (matching templates) with NCC trackedFeatures = [] for featureToTrack in featuresToTrack: try: #perform tracking trackedFeature_px = trackF.performFeatureTracking(template_size, search_area, featureToTrack[1:], templateImg, searchImg, shiftSearchArea, performLSM, lsmBuffer, threshLSM, subpixel, False) #check backwards trackedFeature_pxCheck = trackF.performFeatureTracking(template_size, search_area, trackedFeature_px, searchImg, templateImg, -1*shiftSearchArea, performLSM, lsmBuffer, threshLSM, subpixel, False) #set points that fail backward forward tracking test to nan distBetweenBackForward = abs(featureToTrack[1:]-trackedFeature_pxCheck).reshape(-1, 2).max(-1) if distBetweenBackForward > maxDistBackForward_px: print('feature ' + str(featureToTrack[0]) + ' failed backward test.') x = 1/0 #join tracked feature and id of feature trackedFeatures.append([featureToTrack[0], trackedFeature_px[0], trackedFeature_px[1]]) #undistort tracked feature measurement trackedFeature_undist = photogrF.undistort_img_coos(trackedFeature_px.reshape(1,2), interior_orient) trackedFeature_undist_px = photogrF.metric_to_pixel(trackedFeature_undist, interior_orient.resolution_x, interior_orient.resolution_y, interior_orient.sensor_size_x, interior_orient.sensor_size_y) trackedFeaturesOutput_undist.append([img_list[img_nbr_tracking+TrackEveryNthFrame], int(featureToTrack[0]), trackedFeature_undist_px[0,0], trackedFeature_undist_px[0,1]]) except: print('stopped tracking feature ' + str(featureToTrack[0]) + ' after frame ' + img_list[img_nbr_tracking] + '\n') trackedFeatures = np.asarray(trackedFeatures) print('nbr of tracked features: ' + str(trackedFeatures.shape[0]) + '\n') #for visualization of tracked features in gif featuers_end, featuers_start, _ = drawF.assignPtsBasedOnID(trackedFeatures, featuresToTrack) arrowsImg = drawF.drawArrowsOntoImg(templateImg, featuers_start, featuers_end) if save_gif: arrowsImg.savefig(directoryOutput + 'temppFT.jpg', dpi=150, pad_inches=0) imagesForGif.append(cv2.imread(directoryOutput + 'temppFT.jpg')) else: arrowsImg.savefig(directoryOutput + 'temppFT' + str(frameCount) + '.jpg', dpi=150, pad_inches=0) arrowsImg.close() del arrowsImg featuresToTrack = trackedFeatures img_nbr_tracking = img_nbr_tracking + TrackEveryNthFrame return trackedFeaturesOutput_undist, imagesForGif
#read point cloud pt_cloud_table = pd.read_table(ptCloud_file, header=None, delimiter=ptCloud_separator) ptCloud = np.asarray(pt_cloud_table) del pt_cloud_table #read pixel coordinates of image points of GCPs (including ID) gcpImgPts_table = pd.read_table(imgCoo_GCP_file, header=None) gcpImgPts_table = np.asarray(gcpImgPts_table) gcpPts_ids = gcpImgPts_table[:, 0] gcpPts_ids = gcpPts_ids.reshape(gcpPts_ids.shape[0], 1) gcpImgPts_to_undist = gcpImgPts_table[:, 1:3] #undistort image measurements of GCP gcpImgPts_undist = photogrF.undistort_img_coos(gcpImgPts_to_undist, interior_orient, False) gcpImgPts_undist = np.hstack((gcpPts_ids, gcpImgPts_undist)) #read object coordinates of GCP (including point ID) gcpObjPts_table = pd.read_table(gcpCoo_file, header=None) gcpObjPts_table = np.asarray(gcpObjPts_table) #read image names in folder img_list = [] for img_file in os.listdir(dir_imgs): if '.png' in img_file: img_list.append(img_file) img_list = sorted(img_list) #prepare output if not os.path.exists(directoryOutput):