예제 #1
0
    def setUp(self):
        self.cam = Camera(None, Camera.extrinsic_matrix(np.eye(3), [1, 0, 0]),
                          Camera.intrinsic_matrix([1, 1], [0, 0]),
                          np.zeros(5), (2,2))
        theta = np.radians(22.5)
        rot = [[np.cos(theta), 0, np.sin(theta)],
               [0, 1, 0],
               [-np.sin(theta), 0, np.cos(theta)]]
        self.proj = DLPProjector(DLPProjector.extrinsic_matrix(rot, [0, 0, 0]),
                                  DLPProjector.intrinsic_matrix([1, 1], [0, 0]),
                                  np.zeros(5), (2,2))

        # Get the Gray code patterns that were self.projected
        patterns = [DLPPattern(np.ones(self.proj.resolution, dtype=np.uint8) * 255),
                    DLPPattern(np.zeros(self.proj.resolution, dtype=np.uint8))]
        patterns.extend(gray_code_patterns(self.proj.resolution))

        self.gen_patterns = []
        for p in patterns:
            self.gen_patterns.append([GeneratedPattern([(p, self.proj)])])

        # calculate the vertical stripe point locations
        h1 = 1 / np.sin(theta)
        p1 = np.array([-1, 0, h1 * np.cos(theta)])

        h2 = np.sin(np.radians(45)) / np.sin(theta)
        p2 = np.array([h2 * np.sin(theta), 0, h2 * np.cos(theta)])

        p3 = p1.copy()
        p3[1] = p3[2]

        p4 = p2.copy()
        p4[1] = p4[2]
        
        self.expected_points = [p1, p2, p3, p4]
예제 #2
0
 def setUp(self):
     self.width = 4
     self.height = 16
     patterns = gray_code_patterns((self.width, self.height), vertical_stripes=False)
     images = []
     for p in patterns:
         data = p.image
         images.append(Image(grayscale_to_RGB(data), None, [GeneratedPattern([(p, None)])]))
     (self.gray_code_values, self.valid_pixel_mask) = gray_code_estimates(images)
예제 #3
0
    def setUp(self):
        self.cam = Camera(None, Camera.extrinsic_matrix(np.eye(3), np.zeros(3)),
                          Camera.intrinsic_matrix([1, 1], [1, 0]),
                          np.zeros(5), (3,1))
        self.proj = DLPProjector(DLPProjector.extrinsic_matrix(np.eye(3), [1, 0, 0]),
                                  DLPProjector.intrinsic_matrix([0.5, 1], [0, 0]),
                                  np.zeros(5), (2,2))

        # Get the Gray code patterns that were self.projected
        patterns = [DLPPattern(np.ones(self.proj.resolution, dtype=np.uint8) * 255),
                    DLPPattern(np.zeros(self.proj.resolution, dtype=np.uint8))]
        patterns.extend(gray_code_patterns(self.proj.resolution))
        patterns.extend(gray_code_patterns(self.proj.resolution, vertical_stripes=False))

        self.gen_patterns = []
        for p in patterns:
            self.gen_patterns.append([GeneratedPattern([(p, self.proj)])])

        self.expected_points = [[-1, 0, 1], [0, 0, 0.5], [1, 0, 1]]
예제 #4
0
    def setUp(self):
        data = [[[8, 240, 12, 225],
                 [240, 12, 225, 12]],
                [[234, 12, 25, 8],
                 [12, 25, 8, 250]],
                [[12, 219, 248, 21],
                 [219, 248, 21, 8]],
                [[243, 5, 8, 252],
                 [174, 8, 252, 230]]]
        patterns = gray_code_patterns((2,4), vertical_stripes=False)

        images = []
        for i in range(len(data)):
            images.append(Image(grayscale_to_RGB(data[i]), None, [GeneratedPattern([(patterns[i], None)])]))

        (self.gray_code_values, self.valid_pixel_mask) = gray_code_estimates(images)
예제 #5
0
    def setUpClass(cls):
        # get the relevant data
        structured_light_dir = rel_to_file(os.path.join("test-data", "structured-light"),
                                               __file__)
        cls.calib = scipy.io.loadmat(os.path.join(structured_light_dir, "calib",
                                                  "calib_results", "calib_cam_proj.mat"),
                                      squeeze_me=True)


        # Initialize the camera
        cam_extrinsic_matrix = Camera.extrinsic_matrix(cls.calib['Rc_1_cam'],
                                                       cls.calib['Tc_1_cam'])
        cam_intrinsic_matrix = Camera.intrinsic_matrix(cls.calib['fc_cam'],
                                                       cls.calib['cc_cam'],
                                                       cls.calib['alpha_c_cam'])
        cam_distortion = cls.calib['kc_cam']
        cam_resolution = (cls.calib['nx_cam'], cls.calib['ny_cam'])
        cls.cam = Camera(None, cam_extrinsic_matrix, cam_intrinsic_matrix, cam_distortion, cam_resolution)

        # Initialize the projector
        proj_extrinsic_matrix = DLPProjector.extrinsic_matrix(cls.calib['Rc_1_proj'],
                                                           cls.calib['Tc_1_proj'])
        proj_intrinsic_matrix = DLPProjector.intrinsic_matrix(cls.calib['fc_proj'],
                                                           cls.calib['cc_proj'],
                                                           cls.calib['alpha_c_proj'])
        proj_distortion = cls.calib['kc_proj']
        proj_resolution = (cls.calib['nx_proj'], cls.calib['ny_proj'])
        cls.proj = DLPProjector(proj_extrinsic_matrix, proj_intrinsic_matrix, proj_distortion, proj_resolution)

        # Get the Images
        img_folder = os.path.join(structured_light_dir, "data", "Gray", "man", "v1")
        loaded_images = load_from_directory(img_folder)

        # Get the Gray code patterns that were projected
        pattern_shape = (cls.calib['nx_proj'], cls.calib['ny_proj'])
        patterns = [DLPPattern(np.ones(pattern_shape, dtype=np.uint8) * 255),
                          DLPPattern(np.zeros(pattern_shape, dtype=np.uint8))]
        patterns.extend(gray_code_patterns(pattern_shape))
        patterns.extend(gray_code_patterns(pattern_shape, vertical_stripes=False))

        cls.gen_patterns = []
        for p in patterns:
            cls.gen_patterns.append([GeneratedPattern([(p, cls.proj)])])

        # make sure patterns and images are the same length
        if len(cls.gen_patterns) != len(loaded_images):
            raise Exception("Pattern and Image list should be the same length. Please figure out why they aren't")

        # reconstruct the images
        images = []
        for i in range(len(loaded_images)):
            images.append(Image(loaded_images[i].data, cls.cam, cls.gen_patterns[i]))

        n = (len(images) - 2) / 2
            
        # finally at the point we would have been at if our machine scanned the
        # object :)
        cls.point_cloud = extract_point_cloud(images[0:2], images[2:2+n], images[2+n:])

        viewer = pcl.PCLVisualizer("View Gray Scan")
        viewer.add_point_cloud(cls.point_cloud)
        viewer.add_coordinate_system(translation=cls.proj.T, scale=10)
        viewer.add_text_3d("projector", cls.proj.T, text_id="projector")
        viewer.add_coordinate_system(translation=cls.cam.T, scale=10)
        viewer.add_text_3d("camera", cls.cam.T, text_id="camera")
        viewer.init_camera_parameters()

        viewer.spin()
        viewer.close()