Esempio n. 1
0
    def run(self, save_clouds, show_clouds):
        """
		Args:
			save_clouds (bool): Default False
			show_clouds (bool): Default False

		Returns: void
		"""
        Logger._log("Pipeline")

        ###
        # Stage 0: Detect features in loaded images
        ##
        '''
		NOTE: We should return modified things for scoping purposes,
				unless someone knows very well how to split classes in Python.
				I have looked at some ways but honestly it's not really "Pythonic". 
		'''
        images = []
        images = self.load_images.load_images(self.folder_path, images)

        ###
        # Stage 1: Detect features in loaded images
        ##
        cam_Frames = []
        descriptors_vec = []
        images, cam_Frames, descriptors_vec = self.feature_extraction.extract_features(
            images, cam_Frames, descriptors_vec, self.low_thresh,
            self.high_thresh)

        ###
        # Stage 2: Calculate descriptors and find image pairs through matching
        ##
        image_pairs = []
        image_pairs, cam_Frames = self.find_matching_pairs.find_matching_pairs(
            images, cam_Frames, descriptors_vec, image_pairs)

        ###
        # Stage 3: Compute pairwise R and t
        ##
        image_pairs, cam_Frames = self.registration.register_camera(
            image_pairs, cam_Frames, self.cameraMatrix, self.distCoeffs)

        ###
        # Stage 4: Construct associativity matrix and spannign tree
        ##
        assocMat = associativity(len(cam_Frames))
        # print(dir(assocMat))

        for p in range(len(image_pairs)):
            pair = image_pairs[p]
            i = pair.pair_index[0]
            j = pair.pair_index[1]

            if pair.R.size != 0:
                continue

            assocMat.assignPair(i, j, pair)
            assocMat.assignPair(j, i, pair)

        tree = associativity()
        camera_num = self.spanning_tree.build_spanning_tree(
            image_pairs, assocMat, tree)

        tree = assocMat

        ###
        # Stage 5: Compute global Rs and ts
        ##
        gCameraPoses = []
        gCameraPoses = self.global_cam_poses.glo_cam_poses(
            images, gCameraPoses, image_pairs, tree)

        ###
        # Stage 6: Find and cluster depth points from local camera frame to global camera frame
        ##
        pointClusters = []
        pointMap = {}
        pointClusters, pointMap = self.find_clust.find_clusters(
            assocMat, gCameraPoses, cam_Frames, pointClusters, pointMap,
            self.cameraMatrix, image_pairs)

        ###
        # Stage 7: get center of mass from clusters
        ##
        pointCloud = []
        pointCloud = self.findCoM.find_CoM(pointClusters, pointCloud)

        ## Save cloud before BA
        view = viewer("Before BA", self.low_thresh, self.high_thresh)
        cloud = view.createPointCloud(images, gCameraPoses, self.cameraMatrix)
        # n = len(clouds.points)
        # folder = os.current_dir()
        fname = "./" + self.filename + ".pcd"

        # if save_clouds:
        view.saveCloud(cloud, fname)
Esempio n. 2
0
def load_images(folder_path, images):
    Logger._log("\nStep 0 (preprocess)")

    ## Get list of image files from dataset path
    if not os.path.exists(folder_path):
        raise ValueError("Invalid data path: {}".format(folder_path))

    ## Reset images list
    images = []

    # Regex to parse image names
    fname_regex = "^.*/frame_([\\dT\\.]+)_(rgb|depth)\\.png$"

    ## Create intermediate map structure
    _tstamps = {}

    for filename in os.listdir(folder_path):
        path = os.path.abspath(filename)
        match = re.match(fname_regex, path)

        if match != None:
            """
			* 0: ../data/frame_20150312T172452.627702_depth.png
			* 1: 20150312T172452.627702
			* 2: depth
			"""
            time_str = match.group(1)
            _tstamps[time_str] = True

    ## Convert to vector of timestamps
    n = len(_tstamps)
    tstamps = []

    for key in _tstamps.keys():
        tstamps.append(key)

    new_n = 0
    for i in range(n):
        ## Get time string
        time_str = tstamps[i]

        ## Get file paths
        rgb_path = "{}/frame_{}_rgb.jpg".format(folder_path, time_str)
        dep_path = "{}/frame_{}_depth.png".format(folder_path, time_str)
        rgb_img = cv.imread(rgb_path)
        # _depth_img = cv.imread(dep_path, cv.IMREAD_ANYDEPTH) # load 16bit img; 2 refers to CV_LOAD_IMAGE_ANYDEPTH
        _depth_img = cv.imread(dep_path, 2)
        '''
		cv.imshow("rgb_img", rgb_img)
		cv.waitKey()
		cv.destroyAllWindows()
		'''

        ## White-balance RGB image
        '''
		wb = xphoto.createGrayworldWB()
		wb.setSaturationThreshold(0.9) # This threshold is set manually
		rgb_undist_img = wb.balanceWhite(rgb_img)
		'''
        rgb_undist_img = np.zeros_like(rgb_img)
        cv.xphoto.balanceWhite(rgb_img, rgb_undist_img, 0)

        ## Get grayscale image
        gray_undist_img = cv.cvtColor(rgb_undist_img, cv.COLOR_BGR2GRAY)

        ## Bm1
        gray_undist_img = cv.cvtColor(rgb_img, cv.COLOR_BGR2GRAY)
        '''
		cv.imshow("gray_undist_img", gray_undist_img)
		cv.waitKey()
		cv.destroyAllWindows()
		'''
        ## Convert depth map to floats
        # depth_img = (_depth_img.astype(float)) / 65535
        # depth_img = img_as_float(_depth_img)
        depth_img = np.float32(_depth_img)
        '''
		cv.imshow("depth_img", depth_img)
		cv.waitKey()
		cv.destroyAllWindows()
		'''

        ## Smooth depth map
        depth_smooth_img = cv.bilateralFilter(depth_img, 5, 10, 10)

        image = structures.Image(i, time_str, rgb_undist_img, gray_undist_img,
                                 depth_smooth_img, rgb_path, dep_path)
        images.append(image)

        new_n += 1

    print("Loaded {} RGB_D images.".format(new_n))
    return images