def setUpClass(cls): # excess epsilon to add margin cls.eps = np.nextafter(0, 1) # set up input data dir, parameters interior_corners = (8,6) side_length = 30 # in mm # use results from matlab script to test calib_folder = rel_to_file(os.path.join("test-data", "swept-plane", "calib"), __file__) calib_images = load_from_directory(calib_folder) cls.calib_results = scipy.io.loadmat(os.path.join(calib_folder, "Calib_Results.mat"), squeeze_me=True) # run the calibration (cls.focal_length, cls.principal_point, cls.alpha, cls.distortion) = intrinsic_calibration_with_checkerboard( calib_images, interior_corners, side_length)
def setUpClass(cls): # get the relevant data structured_light_dir = rel_to_file(os.path.join("test-data", "structured-light"), __file__) cls.calib = scipy.io.loadmat(os.path.join(structured_light_dir, "calib", "calib_results", "calib_cam_proj.mat"), squeeze_me=True) # Initialize the camera cam_extrinsic_matrix = Camera.extrinsic_matrix(cls.calib['Rc_1_cam'], cls.calib['Tc_1_cam']) cam_intrinsic_matrix = Camera.intrinsic_matrix(cls.calib['fc_cam'], cls.calib['cc_cam'], cls.calib['alpha_c_cam']) cam_distortion = cls.calib['kc_cam'] cam_resolution = (cls.calib['nx_cam'], cls.calib['ny_cam']) cls.cam = Camera(None, cam_extrinsic_matrix, cam_intrinsic_matrix, cam_distortion, cam_resolution) # Initialize the projector proj_extrinsic_matrix = DLPProjector.extrinsic_matrix(cls.calib['Rc_1_proj'], cls.calib['Tc_1_proj']) proj_intrinsic_matrix = DLPProjector.intrinsic_matrix(cls.calib['fc_proj'], cls.calib['cc_proj'], cls.calib['alpha_c_proj']) proj_distortion = cls.calib['kc_proj'] proj_resolution = (cls.calib['nx_proj'], cls.calib['ny_proj']) cls.proj = DLPProjector(proj_extrinsic_matrix, proj_intrinsic_matrix, proj_distortion, proj_resolution) # Get the Images img_folder = os.path.join(structured_light_dir, "data", "Gray", "man", "v1") loaded_images = load_from_directory(img_folder) # Get the Gray code patterns that were projected pattern_shape = (cls.calib['nx_proj'], cls.calib['ny_proj']) patterns = [DLPPattern(np.ones(pattern_shape, dtype=np.uint8) * 255), DLPPattern(np.zeros(pattern_shape, dtype=np.uint8))] patterns.extend(gray_code_patterns(pattern_shape)) patterns.extend(gray_code_patterns(pattern_shape, vertical_stripes=False)) cls.gen_patterns = [] for p in patterns: cls.gen_patterns.append([GeneratedPattern([(p, cls.proj)])]) # make sure patterns and images are the same length if len(cls.gen_patterns) != len(loaded_images): raise Exception("Pattern and Image list should be the same length. Please figure out why they aren't") # reconstruct the images images = [] for i in range(len(loaded_images)): images.append(Image(loaded_images[i].data, cls.cam, cls.gen_patterns[i])) n = (len(images) - 2) / 2 # finally at the point we would have been at if our machine scanned the # object :) cls.point_cloud = extract_point_cloud(images[0:2], images[2:2+n], images[2+n:]) viewer = pcl.PCLVisualizer("View Gray Scan") viewer.add_point_cloud(cls.point_cloud) viewer.add_coordinate_system(translation=cls.proj.T, scale=10) viewer.add_text_3d("projector", cls.proj.T, text_id="projector") viewer.add_coordinate_system(translation=cls.cam.T, scale=10) viewer.add_text_3d("camera", cls.cam.T, text_id="camera") viewer.init_camera_parameters() viewer.spin() viewer.close()