Esempio n. 1
0
class Camera:
    """ ................................................................................................... "
    "                                                                                                       "
    "  Initializes a Camera object. If a Calibration file is received, it will use the save file            "
    "  to calibrate the cameras                                                                             "
    "                                                                                                       "
    "  Returns: none                                                                                        "
    "                                                                               Sindre Skaar            "
    "                                                                                                       "
    " ..................................................................................................  """

    def __init__(self, init_file_camera=None):
        self.resolution_width = 1280  # pixels
        self.resolution_height = 720  # pixels
        self.frame_rate = 30  # fps
        dispose_frames_for_stablisation = 30  # frames
        self.coordinate_dimentions = 3
        self.cameras_calibrated = False
        self.charuco_boards = {}
        self.transformation_devices = {}
        # Enable the streams from all the intel realsense devices
        rs_config = rs.config()
        rs_config.enable_stream(rs.stream.depth, self.resolution_width, self.resolution_height, rs.format.z16, self.frame_rate)
        rs_config.enable_stream(rs.stream.infrared, 1, self.resolution_width, self.resolution_height, rs.format.y8, self.frame_rate)
        rs_config.enable_stream(rs.stream.color, self.resolution_width, self.resolution_height, rs.format.bgr8, self.frame_rate)

        # Use the device manager class to enable the devices and get the frames
        self.device_manager = DeviceManager(rs.context(), rs_config)
        self.device_manager.enable_all_devices()
        self.device_manager._available_devices.sort()
        self.device_list = self.device_manager._available_devices


        #initialize pointcloud depth image matrix
        self.pcs = {}
        for camera in self.device_list:
            self.pcs[camera] = rs.pointcloud()
        self.pixels = self.resolution_width * self.resolution_height
        self.total_pixels = self.pixels * len(self.device_list)
        self.cloud = np.zeros((3, self.total_pixels))
        self.transformed_pixels = np.ones((4, self.pixels))


        # Allow some frames for the auto-exposure controller to stablise
        for frame in range(dispose_frames_for_stablisation):
            frames = self.device_manager.poll_frames_keep()

        assert (len(self.device_manager._available_devices) > 0)

        # Then we define the charuco boards used in the rig
        self.set_charuco()

        # Get the intrinsics of the realsense device
        self.intrinsics_devices = self.device_manager.get_device_intrinsics(frames)

        try:
            transfer_matirices_save = np.load(init_file_camera, allow_pickle=True)
            transfer_matrices = transfer_matirices_save[()]
            correct_filename = True
            transformation_devices = {}
            for matrix in transfer_matrices:
                the_matrix = transfer_matrices[matrix]
                transformation_devices[matrix] = Transformation(the_matrix[:3, :3], the_matrix[:3, 3])
            self.cameras_calibrated = True

            self.transformation_devices = transformation_devices
        except:
            print('No such file in directory: "', init_file_camera, '"')
            print("Rig not calibrated\n")
            return

        print("Calibration completed...\n")
        return

    def set_charuco(self):
        """ ................................................................................................... "
        "                                                                                                       "
        "   Creates the Charuco boards that will be used during the experiment.                                 "
        "   All Charuco boards made are saved in the class within self.charuco_boards dict.                     "
        "                                                                                                       "
        "   Returns: None                                                                                       "
        "                                                                                                       "
        "                                                                               Sindre Skaar            "
        "                                                                                                       "
        " ..................................................................................................  """
        charuco_boards = {}

        # Set the charuco board parameters for calibration
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_100)
        charuco_width = 8
        charuco_height = 5
        square_length = 0.0392
        marker_length = square_length*0.8

        charuco_boards['charuco_board'] = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length,
                                                                    marker_length,
                                                                    aruco_dict)
        # Set the charuco board parameters for robot
        aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250)
        charuco_width_r = 3
        charuco_height_r = 3
        square_length_r = 0.0311
        marker_length_r = .0247

        charuco_boards['charuco_robot'] = aruco.CharucoBoard_create(charuco_width_r, charuco_height_r,
                                                                          square_length_r,
                                                                          marker_length_r, aruco_dict_r)

        # Set the charuco board parameters for robot
        aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100)
        charuco_width_ro = 3
        charuco_height_ro = 3
        square_length_ro = 0.0311
        marker_length_ro = .0247

        charuco_boards['charuco_target'] = aruco.CharucoBoard_create(charuco_width_ro, charuco_height_ro,
                                                                            square_length_ro,
                                                                            marker_length_ro,
                                                                            aruco_dict_ro)
        self.charuco_boards = charuco_boards

    def calibrate(self, amount_frames=150):
        """ ................................................................................................... "
        "                                                                                                       "
        "  Calibrates the cameras by creating a transformation matrix between the different cameras.            "
        "  The calibration will continue until all cameras has a transformation matrix to the camera with       "
        "  the lowest serial number. The function will not return if it cannot find all matrices                "
        "                                                                                                       "
        "  Returns: transformation_devices, A dict containing all transfer_matrces from al cameras
        "  to lowest serial number camera. The key in the dict is the serial number in the "from" camera
        "                                                                                                       "
        "                                                                               Sindre Skaar            "
        "                                                                                                       "
        " ..................................................................................................  """
        # Make a dict to store all images for calibration
        frame_dict = {}
        transform_dict = {}
        rms_dict = {}
        for from_device in self.device_list:
            transform_dict[from_device] = {}
            rms_dict[from_device] = {}
            for to_device in self.device_list:
                transform_dict[from_device][to_device] = {}
                rms_dict[from_device][to_device] = np.inf
        print("Starting to take images in 5 seconds")
        time.sleep(5)
        devices_stitched = False

        while not devices_stitched:
            print("taking new set of  images")
            for frame_count in range(amount_frames):
                print("taking image")
                print(amount_frames - frame_count, "images left")
                frames = self.device_manager.poll_frames_keep()
                time.sleep(0.5)
                frame_dict[frame_count] = {}
                for device in self.device_list:
                    ir_frame = np.asanyarray(frames[device][(rs.stream.infrared, 1)].get_data())
                    depth_frame = np.asanyarray(
                        post_process_depth_frame(frames[device][rs.stream.depth]).get_data())
                    frame_dict[frame_count][device] = {'ir_frame': ir_frame, 'depth_frame': depth_frame}
                del frames

            # Make transfer matrices between all possible cameras
            for idx, from_device in enumerate(self.device_list[:-1]):
                for to_device in self.device_list[idx + 1:]:
                    if to_device != from_device:
                        temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(frame_dict,
                                                                                           [from_device, to_device],
                                                                                           self.intrinsics_devices,
                                                                                           self.charuco_boards['charuco_board'])
                        if temp_rms < rms_dict[from_device][to_device]:
                            rms_dict[from_device][to_device] = temp_rms
                            rms_dict[to_device][from_device] = temp_rms
                            transform_dict[from_device][to_device] = temp_transform
                            transform_dict[to_device][from_device] = temp_transform.inverse()

            # Use Dijkstra to find shortest path and check if all cameras are connected
            transformation_matrices = least_error_transfroms(transform_dict, rms_dict)
            if transformation_matrices != 0:
                devices_stitched = True
            # Prints rms error between camera transforms
            test = matrix_viewer(rms_dict)
            print(test)

        # Turns transformation matrices into Transfomation objects
        transformation_devices = {}
        for matrix in transformation_matrices:
            the_matirx = transformation_matrices[matrix]
            transformation_devices[matrix] = Transformation(the_matirx[:3, :3], the_matirx[:3, 3])

        self.transformation_devices = transformation_devices

        save_calibration = input('Press "y" if you want to save calibration \n')
        if save_calibration == "y":
            saved = False
            while not saved:
                name = input("Type in name of file to save. remember to end name with '.npy' \n")
                try:
                    np.save(name, transformation_matrices)
                    saved = True
                except:
                    print("could not save, try another name and remember '.npy' in the end")

        frame_dict.clear()
        self.cameras_calibrated = True
        return self.transformation_devices

    def visualise(self):
        """ ................................................................................................... "
        "                                                                                                       "
        "  A functions used to easily visualise the errors, by calculating the rms errors on a charuco board,   "
        "  visualising the chess board points, visualising point cloud stream or an rgbd image.                 "
        "  all functions are made as infinite while loops and are thus, just for visualisation and does not     "
        "  return anything.                                                                                     "
        "                                                                                                       "
        "  Returns: None                                                                                        "
        "                                                                                                       "
        "                                                                               Sindre Skaar            "
        "                                                                                                       "
        " ..................................................................................................  """
        if not self.cameras_calibrated:
            print("Cameras not calibrated")
            return
        self.device_manager.enable_emitter(True)
        key_list = self.device_manager.poll_frames().keys()

        while True:
            visualisation = input('Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n')

            if visualisation == '1':

                calculate_avg_rms_error(self.device_list, self.device_manager, self.transformation_devices, self.charuco_boards['charuco_board'],
                                        self.intrinsics_devices)

            elif visualisation == '2':

                visualise_chessboard(self.device_manager, self.device_list, self.intrinsics_devices, self.transformation_devices['charuco_board'],
                                     self.transformation_devices)

            elif visualisation == '3':

                visualise_point_cloud(key_list, self.resolution_height, self.resolution_width, self.device_manager,
                                      self.coordinate_dimentions,
                                      self.transformation_devices)
            elif visualisation == '4':
                visualise_rgbd_cloud(key_list, self.resolution_height, self.resolution_width, self.device_manager,
                                      self.coordinate_dimentions,
                                      self.transformation_devices)
            elif visualisation == '5':
                create_point_cloud(key_list, self.resolution_height, self.resolution_width, self.device_manager,
                                      self.coordinate_dimentions,
                                      self.transformation_devices)

            else:
                print("Input not recognised")

    def get_robot_data(self):
        """Returns data necessary for the robot calibration"""

        return self.transformation_devices, self.device_manager,\
               self.charuco_boards['charuco_target'], self.charuco_boards['charuco_robot']

    def get_transform_matrix(self):
        """Returns the transfer matrices used to go from all cameras to lowest serial camera.
           Returns -1 if the rig is not calibrated"""

        if self.cameras_calibrated:
            return self.transformation_devices
        else:
            print('Cameras not calibrated')
        return -1

    def poll_depth_frame(self):

        """ ................................................................................................... "
        "                                                                                                       "
        "  Polls the depth frames form all connected cameras, transforms the point clouds to lowest             "
        "  serial number cameras coordinate system and concatenates them together. If rig is not calibrated     "
        "  it returns -1                                                                                        "
        "                                                                                                       "
        "  Returns: cloud,  a point cloud form all cameras stitched together into one point cloud.              "
        "  Or returns -1 if cameras are not calibrated                                                          "
        "                                                                                                       "
        "                                                                                                       "
        "                                                                               Sindre Skaar            "
        "                                                                                                       "
        " ..................................................................................................  """

        if not self.cameras_calibrated:
            print("Cameras not calibrated")
            return -1

        the_frames = self.device_manager.poll_frames()

        for idx, camera in enumerate(self.device_list):
            pc = self.pcs[camera]
            frame = the_frames[camera][rs.stream.depth]
            points = pc.calculate(frame)
            vert = np.transpose(np.asanyarray(points.get_vertices(2)))
            self.transformed_pixels[:3, :] = vert
            calibrated_points = np.matmul(self.transformation_devices[camera].pose_mat, self.transformed_pixels)
            self.cloud[:, self.pixels * idx: self.pixels * (idx + 1)] = calibrated_points[:3, :]
        return self.cloud
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3

    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)

    # Estimate the pose of the cameras compared to the first camera in the list
    amount_devices = len(device_manager._available_devices)
    transformation_matrix = {}
    rms_matrix = {}
    for device in device_manager._available_devices:
        transformation_matrix[device] = {}
        rms_matrix[device] = {}
        for device2 in device_manager._available_devices:
            rms_matrix[device][device2] = np.inf

    devices_stitched = False
    while not devices_stitched:
        frames = device_manager.poll_frames()
        pose_estimator = PoseEstimation(frames, intrinsics_devices,
                                        charuco_board)
        transformation_result_kabsch = pose_estimator.perform_pose_estimation()
        object_point = pose_estimator.get_chessboard_corners_in3d()
        calibrated_device_count = 0
        for device in device_manager._available_devices:
            if not transformation_result_kabsch[device][0]:
                print("Device", device, "needs to be calibrated")
            else:
                # If this is the first camera in the list
                if calibrated_device_count == 0:
                    source_matrix = transformation_result_kabsch[device][1]
                    source_device = device
                    source_rms = transformation_result_kabsch[device][3]
                else:
                    # If new estimate is better than previous
                    if source_rms + transformation_result_kabsch[device][
                            3] < rms_matrix[device][source_device]:
                        rms_matrix[source_device][
                            device] = source_rms + transformation_result_kabsch[
                                device][3]
                        rms_matrix[device][
                            source_device] = source_rms + transformation_result_kabsch[
                                device][3]
                        slave_transfrom = transformation_result_kabsch[device][
                            1].inverse()
                        multiplied_transform = np.matmul(
                            source_matrix.pose_mat, slave_transfrom.pose_mat)
                        Multiplied_transform = Transformation(
                            multiplied_transform[:3, :3],
                            multiplied_transform[:3, 3])
                        transformation_matrix[device][
                            source_device] = Multiplied_transform
                        temp_inverted_matrix = np.matmul(
                            source_matrix.pose_mat, slave_transfrom.pose_mat)
                        inverted_transform = Transformation(
                            temp_inverted_matrix[:3, :3],
                            temp_inverted_matrix[:3, 3])
                        transformation_matrix[source_device][
                            device] = inverted_transform.inverse()
                calibrated_device_count += 1

        # Check if all devices are stitched together
        transformation_devices = least_error_transfroms(
            transformation_matrix, rms_matrix)
        if transformation_devices != 0:
            devices_stitched = True
        test = matrix_viewer(rms_matrix)
        print(test)

    print("Calibration completed... \n")

    # Enable the emitter of the devices and extract serial numbers to identify cameras
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()
    pcd = o3d.geometry.PointCloud()

    # enable visualiser
    vis = o3d.visualization.Visualizer()
    vis.create_window()
    first_image = True

    #Stitch together all the different camera pointclouds from different cameras
    pcs = {}
    for camera in key_list:
        pcs[camera] = rs.pointcloud()
    pixels = resolution_width * resolution_height
    total_pixels = pixels * len(key_list)
    cloud = np.zeros((3, total_pixels))
    transformed_pixels = np.ones((4, pixels))
    idxe = np.random.permutation(cloud.shape[1])
    while True:
        start = time.time()
        the_frames = device_manager.poll_frames()

        for idx, camera in enumerate(key_list):
            pc = pcs[camera]
            frame = the_frames[camera][rs.stream.depth]
            points = pc.calculate(frame)
            vert = np.transpose(np.asanyarray(points.get_vertices(2)))
            transformed_pixels[:coordinate_dimentions, :] = vert
            calibrated_points = np.matmul(transformation_devices[camera],
                                          transformed_pixels)
            cloud[:, pixels * idx:pixels *
                  (idx + 1)] = calibrated_points[:coordinate_dimentions, :]

        # Reduces rendered points and removes points with extreme z values
        keep_ratio = 0.01
        cloud_filtered = cloud[:,
                               idxe[0:math.floor(cloud.shape[1] * keep_ratio)]]
        #cloud_filtered = cloud_filtered - np.min(cloud_filtered[2, :])
        dist_thresh = 3
        cloud_filtered = -cloud_filtered[:, cloud_filtered[2, :] < dist_thresh]
        # cloud_filtered = cloud_filtered[:, cloud_filtered[2, :] > -1]
        # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))]
        # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))]

        # renders points from all different cameras
        #mlab.points3d( cloud_filtered[0, :],  cloud_filtered[1, :],  cloud_filtered[2, :], scale_factor=0.1)
        #mlab.show()
        pcd.points = o3d.utility.Vector3dVector(np.transpose(cloud_filtered))
        if first_image:
            vis.add_geometry(pcd)
            first_image = False
        vis.update_geometry()
        vis.poll_events()
        vis.update_renderer()
        end = time.time()
        #txt = input()
        #print(1 / (end - start))

    device_manager.disable_streams()
    vis.destroy_window()
Esempio n. 3
0
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()
    device_list = device_manager._available_devices

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3

    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)
    starting_points_found = False

    while not starting_points_found:
        frames = device_manager.poll_frames()
        corners = get_corners(device_list[0], frames, intrinsics_devices,
                              charuco_board)
        print("Show camera charuco board")
        time.sleep(1)
        if all(corners[3]):
            starting_points = corners
            starting_points_found = True
    print("Charuco board found")
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()
    pcd = o3d.geometry.PointCloud()

    # enable visualiser
    vis = o3d.visualization.Visualizer()
    vis.create_window()
    first_image = True
    visualised_cloud = np.transpose(np.array([[0, 0, 0], [0, 0, 0]]))
    last_used = time.clock()
    prev_transform = [cc.Transformation(np.eye(3), np.array([0, 0, 0])), 0]
    while True:
        if first_image:
            time.sleep(1)
        corners_new_point = get_corners(device_list[0], frames,
                                        intrinsics_devices, charuco_board)

        if time.clock() - last_used > 2 or first_image:
            print("Starting imaging")
            from_camera = np.transpose(np.array([0, 0, 0]))
            to_camera = np.transpose(np.array([0, 0, 0]))
            frames = device_manager.poll_frames()
            for corner in range(len(charuco_board.nearestMarkerCorners)):
                if starting_points[3][corner] and corners_new_point[3][corner]:
                    to_camera = np.vstack(
                        (to_camera, np.array(starting_points[2][:, corner])))
                    from_camera = np.vstack(
                        (from_camera, np.array(corners_new_point[2][:,
                                                                    corner])))
            if np.size(to_camera) > 25:
                print("update")
                last_used = time.clock()
                transformation = get_transform_short(from_camera, to_camera)
                difference = np.sum(
                    np.absolute(transformation[0].pose_mat -
                                prev_transform[0].pose_mat))
                if difference < 0.1:

                    start = time.time()
                    the_frames = device_manager.poll_frames()

                    frame = frames[device_list[0]][rs.stream.depth]
                    pc = rs.pointcloud()
                    points = pc.calculate(frame)
                    vert = np.transpose(np.asanyarray(points.get_vertices(2)))
                    calibrated_points = transformation[0].apply_transformation(
                        vert)
                    cloud = calibrated_points
                    idxe = np.random.permutation(cloud.shape[1])

                    # Reduces rendered points and removes points with extreme z values
                    keep_ratio = 0.01
                    cloud_filtered = cloud[:,
                                           idxe[0:math.floor(cloud.shape[1] *
                                                             keep_ratio)]]
                    # cloud_filtered = cloud_filtered - np.min(cloud_filtered[2, :])
                    dist_thresh = 3
                    #cloud_filtered = -cloud_filtered[:, cloud_filtered[2, :] < dist_thresh]
                    visualised_cloud = np.hstack(
                        (visualised_cloud, cloud_filtered))
                    # cloud_filtered = cloud_filtered[:, cloud_filtered[2, :] > -1]
                    # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))]
                    # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))]

                    # renders points from all different cameras
                    # mlab.points3d( cloud_filtered[0, :],  cloud_filtered[1, :],  cloud_filtered[2, :], scale_factor=0.1)
                    # mlab.show()

                    pcd.points = o3d.utility.Vector3dVector(
                        np.transpose(visualised_cloud))

                if first_image:
                    vis.add_geometry(pcd)
                    first_image = False
                vis.update_geometry()
                vis.poll_events()
                vis.update_renderer()
                prev_transform = transformation
        if not first_image:
            vis.update_geometry()
            vis.poll_events()
            vis.update_renderer()
    '''''' '''
    Pseudo code:
    
    -Take imege to get starting position
    
    while true:
        get new positionfrom camera
        extract pointcloud from that position and transform it to starting position 
        
    
    ''' '''''' ''
def run_demo():
	
	# Define some constants 
	resolution_width = 1280 # pixels
	resolution_height = 720 # pixels
	frame_rate = 15  # fps
	dispose_frames_for_stablisation = 30  # frames
	
	# chessboard_width = 6 # squares
	# chessboard_height = 9 	# squares
	# square_size = 0.0253 # meters

	try:
		# Enable the streams from all the intel realsense devices
		rs_config = rs.config()
		rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate)
		rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate)
		rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)

		# Use the device manager class to enable the devices and get the frames
		device_manager = DeviceManager(rs.context(), rs_config)
		device_manager.enable_all_devices()
		
		# Allow some frames for the auto-exposure controller to stablise
		for frame in range(dispose_frames_for_stablisation):
			frames = device_manager.poll_frames()

		assert( len(device_manager._available_devices) > 0 )
		"""
		1: Calibration
		Calibrate all the available devices to the world co-ordinates.
		For this purpose, a chessboard printout for use with opencv based calibration process is needed.
		
		"""
		# Get the intrinsics of the realsense device 
		intrinsics_devices = device_manager.get_device_intrinsics(frames)
		
                # Set the chessboard parameters for calibration 
		# chessboard_params = [chessboard_height, chessboard_width, square_size] 
		
		# Estimate the pose of the chessboard in the world coordinate using the Kabsch Method
		# calibrated_device_count = 0
		# while calibrated_device_count < len(device_manager._available_devices):
		# 	frames = device_manager.poll_frames()
		# 	pose_estimator = PoseEstimation(frames, intrinsics_devices, chessboard_params)
		# 	transformation_result_kabsch  = pose_estimator.perform_pose_estimation()
		# 	object_point = pose_estimator.get_chessboard_corners_in3d()
		# 	calibrated_device_count = 0
		# 	for device in device_manager._available_devices:
		# 		if not transformation_result_kabsch[device][0]:
		# 			print("Place the chessboard on the plane where the object needs to be detected..")
		# 		else:
		# 			calibrated_device_count += 1

		# Save the transformation object for all devices in an array to use for measurements
		# transformation_devices={}
		# chessboard_points_cumulative_3d = np.array([-1,-1,-1]).transpose()
		# for device in device_manager._available_devices:
		# 	transformation_devices[device] = transformation_result_kabsch[device][1].inverse()
		# 	points3D = object_point[device][2][:,object_point[device][3]]
		# 	points3D = transformation_devices[device].apply_transformation(points3D)
		# 	chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d,points3D) )

		# # Extract the bounds between which the object's dimensions are needed
		# # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard
		# chessboard_points_cumulative_3d = np.delete(chessboard_points_cumulative_3d, 0, 1)
		# roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d)

		print("Calibration completed... \nPlace the box in the field of view of the devices...")


		"""
                2: Measurement and display
                Measure the dimension of the object using depth maps from multiple RealSense devices
                The information from Phase 1 will be used here

                """

		# Enable the emitter of the devices
		device_manager.enable_emitter(True)

		# Load the JSON settings file in order to enable High Accuracy preset for the realsense
		device_manager.load_settings_json("./HighResHighAccuracyPreset.json")

		# Get the extrinsics of the device to be used later
		extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames)

		# Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image
		calibration_info_devices = defaultdict(list)
		for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices):
			for key, value in calibration_info.items():
				calibration_info_devices[key].append(value)

		# Continue acquisition until terminated with Ctrl+C by the user
		while 1:
			 # Get the frames from all the devices
				frames_devices = device_manager.poll_frames()

				# Calculate the pointcloud using the depth frames from all the devices
				point_cloud = calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2D)

				# Get the bounding box for the pointcloud in image coordinates of the color imager
				bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(point_cloud, calibration_info_devices )

				# Draw the bounding box points on the color image and visualise the results
				visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height)

	except KeyboardInterrupt:
		print("The program was interupted by the user. Closing the program...")
	
	finally:
		device_manager.disable_streams()
		cv2.destroyAllWindows()
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()
    device_list = device_manager._available_devices

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3

    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)

    # Choose amount of frames to average
    amount_frames = 12
    frame_dict = {}
    transform_dict = {}
    rms_dict = {}
    for from_device in device_list:
        transform_dict[from_device] = {}
        rms_dict[from_device] = {}
        for to_device in device_list:
            transform_dict[from_device][to_device] = {}
            rms_dict[from_device][to_device] = np.inf

    devices_stitched = False
    while not devices_stitched:
        print("taking new set of  images")
        for frame_count in range(amount_frames):
            print("taking image")
            print(amount_frames - frame_count, "images left")
            frames = device_manager.poll_frames()
            print("Next image in 1 seconds")
            time.sleep(1)
            frame_dict[frame_count] = frames

        for idx, from_device in enumerate(device_list[:-1]):
            for to_device in device_list[idx + 1:]:
                if to_device != from_device:
                    temp_transform, temp_rms = get_transformation_matrix(
                        frame_dict, [from_device, to_device],
                        intrinsics_devices, charuco_board)
                    if temp_rms < rms_dict[from_device][to_device]:
                        rms_dict[from_device][to_device] = temp_rms
                        rms_dict[to_device][from_device] = temp_rms
                        transform_dict[from_device][to_device] = temp_transform
                        transform_dict[to_device][
                            from_device] = temp_transform.inverse()

        test = matrix_viewer(rms_dict)
        print(test)
        devices_stitched = True
        for idx, from_device in enumerate(device_list[1:]):
            if rms_dict[from_device][device_list[idx]] == np.inf:
                devices_stitched = False
    transformation_devices = {}
    identity = np.identity(4)
    transformation_devices[device_list[0]] = Transformation(
        identity[:3, :3], identity[:3, 3])
    for idx, from_device in enumerate(device_list[1:]):
        temp_transform = np.matmul(
            transformation_devices[device_list[idx]].pose_mat,
            transform_dict[from_device][device_list[idx]].pose_mat)
        transformation_devices[from_device] = Transformation(
            temp_transform[:3, :3], temp_transform[:3, 3])

    # Printing
    print("Calibration completed... \n")

    # Enable the emitter of the devices and extract serial numbers to identify cameras
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()
    pcd = o3d.geometry.PointCloud()

    # enable visualiser
    vis = o3d.visualization.Visualizer()
    vis.create_window()
    first_image = True

    while True:
        frames = device_manager.poll_frames()
        displayed_points = np.zeros((10, 3))
        for camera in device_list:
            added_points = get_charuco_points(frames[camera],
                                              transformation_devices[camera],
                                              intrinsics_devices[camera],
                                              charuco_board)
            if added_points.any():
                displayed_points = np.vstack(
                    (displayed_points, np.transpose(added_points)))

        pcd.points = o3d.utility.Vector3dVector(displayed_points)
        if first_image:
            vis.add_geometry(pcd)
            first_image = False
        vis.update_geometry()
        vis.poll_events()
        vis.update_renderer()

    device_manager.disable_streams()
    vis.destroy_window()
def run_demo():
    
    # Define some constants 
    resolution_width = 640 # pixels
    resolution_height = 480 # pixels
    frame_rate = 15  # fps
    dispose_frames_for_stablisation = 30  # frames
    
    chessboard_width = 6 # squares
    chessboard_height = 9     # squares
    square_size = 0.0253 # meters
    align_to = rs.stream.color
    align = rs.align(align_to)
    try:
        # Enable the streams from all the intel realsense devices
        rs_config = rs.config()
        rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate)
        rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate)
        rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)

        # Use the device manager class to enable the devices and get the frames
        device_manager = DeviceManager(rs.context(), rs_config)
        device_manager.enable_all_devices()
        
        # Allow some frames for the auto-exposure controller to stablise
        for frame in range(dispose_frames_for_stablisation):
            frames,maps = device_manager.poll_frames(align)

        assert( len(device_manager._available_devices) > 0 )

        """
        1. Calibrate cameras and return transformation matrix (rotation matrix + translation vectors)

        """

        chessboard_params = [chessboard_height, chessboard_width, square_size] 
        cameras = calibrateCameras(align,device_manager,frames,chessboard_params)

        """
        2. Run OpenPose on each view frame

       """

        # Enable the emitter of the devices
        device_manager.enable_emitter(True)

        # Load the JSON settings file in order to enable High Accuracy preset for the realsense
        device_manager.load_settings_json("./HighResHighAccuracyPreset.json")

        # Get the extrinsics of the device to be used later
        extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames)

        # Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image
#        calibration_info_devices = defaultdict(list)
#        for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices):
#            for key, value in calibration_info.items():
#                calibration_info_devices[key].append(value)
        depth_list = {}
        color_list = {}
        frame_id = 0
        points = {}
        # Continue acquisition until terminated with Ctrl+C by the user
        switch = True
        while 1:
            # Get the frames from all the devices
            if switch:
                frames_devices, maps = device_manager.poll_frames(align,500)
                # print(frames_devices)
                
                # List collector for display
                depth_color= []
                color = []
                devices = [i for i in maps]
                devices.sort()
                project_depth = []
                for i in devices:
                    # 1. Get depth map and colorize
                    temp = maps[i]['depth']
                    depth_list.setdefault(i,[])
                    depth_list[i].append(np.array(temp))
                    depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(temp, alpha=0.03), cv2.COLORMAP_JET)
                    depth_color.append(depth_colormap)
                    project_depth.append((cameras[i]['matrix'],depth_colormap))
                    
                    # 2. Run OpenPose detector on image
                    if FLAGS_USE_BBOX:
                        box = bbox[i]
                    else:
                        box = None
                    joints,img = predict_keypoints(maps[i]['color'],box)
                    
                    # 3. Save annotated color image for display
                    color.append(img)
                    
                    color_list.setdefault(i,[])
                    color_list[i].append(img)
                    
                    # 4. Save keypoints for that camera viewpoint
                    cameras[i][frame_id] = joints
                    
                    # 5. Save images to folder
                    if FLAGS_SAVE_IMGS:
                        cv2.imwrite('./images/depth_{}_{}.png'.format(i,frame_id),temp)
                        cv2.imwrite('./images/color_{}_{}.png'.format(i,frame_id),img)

                #Triangulate 3d keypoints
                points[frame_id] = find3dpoints_rt(cameras,0.2,frame_id)
                if points[frame_id] != 'Invalid Frame':
                    depth_projected = []
                    for img in project_depth:
                        points2d = project_2d(img[0],points[frame_id])
                        img_draw = draw_pose(points2d,'openpose',img[1],True,points[frame_id])
                        depth_projected.append(img_draw)
                        # print(img_draw.shape)
                    depth_color = depth_projected
                # proj_img = show_img(cameras,devices[0],frame_id,points)
                frame_id += 1    
                images = np.vstack((np.hstack(color),np.hstack(depth_color)))
                # images = proj_img

                # Show images for debugging
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', images)

            key = cv2.waitKey(1)
            
            if key == 32:
                switch = not(switch)
            else:
                continue
            
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

    except KeyboardInterrupt:
        print("The program was interupted by the user. Closing the program...")
        
    finally:
        device_manager.disable_streams()
        cv2.destroyAllWindows()
        if FLAGS_SAVE_MATRIX:
            cam_pkl = open('cameras.pkl','wb')
            pkl.dump(cameras,cam_pkl)
        points_pkl = open('3dpoints.pkl','wb')
        pkl.dump(points,points_pkl)
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()
    device_list = device_manager._available_devices

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames_keep()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3
    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)
    # Set the charuco board parameters for robot
    aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250)
    charuco_width_r = 3
    charuco_height_r = 3
    square_length_r = 0.0311
    marker_length_r = .0247

    charuco_board_robot = aruco.CharucoBoard_create(charuco_width_r,
                                                    charuco_height_r,
                                                    square_length_r,
                                                    marker_length_r,
                                                    aruco_dict_r)

    # Set the charuco board parameters for robot
    aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100)
    charuco_width_ro = 3
    charuco_height_ro = 3
    square_length_ro = 0.0311
    marker_length_ro = .0247

    charuco_board_robot_2 = aruco.CharucoBoard_create(charuco_width_ro,
                                                      charuco_height_ro,
                                                      square_length_ro,
                                                      marker_length_ro,
                                                      aruco_dict_ro)

    # Decide if you want to use previous calibration or make a new one
    calibration_decision = input(
        'To use previous calibration, press "y". For new calibration press any key \n'
    )
    if calibration_decision != 'y':
        # Choose amount of frames to average
        correct_input = False
        while not correct_input:
            the_input = input(
                "Write amount of frames you want to use to calibrate \n")
            try:
                amount_frames = int(the_input)
                if amount_frames > 0:
                    correct_input = True
                else:
                    print("Frame count has to be positive")
            except ValueError:
                print('Input has to be int')

        # Make a dict to store all images for calibration
        frame_dict = {}
        transform_dict = {}
        rms_dict = {}
        for from_device in device_list:
            transform_dict[from_device] = {}
            rms_dict[from_device] = {}
            for to_device in device_list:
                transform_dict[from_device][to_device] = {}
                rms_dict[from_device][to_device] = np.inf
        print("Starting to take images in 5 seconds")
        time.sleep(5)
        devices_stitched = False

        while not devices_stitched:
            print("taking new set of  images")
            for frame_count in range(amount_frames):
                print("taking image")
                print(amount_frames - frame_count, "images left")
                frames = device_manager.poll_frames_keep()
                time.sleep(0.5)
                frame_dict[frame_count] = {}
                for device in device_list:
                    ir_frame = np.asanyarray(
                        frames[device][(rs.stream.infrared, 1)].get_data())
                    depth_frame = np.asanyarray(
                        post_process_depth_frame(
                            frames[device][rs.stream.depth]).get_data())
                    frame_dict[frame_count][device] = {
                        'ir_frame': ir_frame,
                        'depth_frame': depth_frame
                    }
                del frames

            # Make transfer matrices between all possible cameras
            for idx, from_device in enumerate(device_list[:-1]):
                for to_device in device_list[idx + 1:]:
                    if to_device != from_device:
                        temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(
                            frame_dict, [from_device, to_device],
                            intrinsics_devices, charuco_board)
                        if temp_rms < rms_dict[from_device][to_device]:
                            rms_dict[from_device][to_device] = temp_rms
                            rms_dict[to_device][from_device] = temp_rms
                            transform_dict[from_device][
                                to_device] = temp_transform
                            transform_dict[to_device][
                                from_device] = temp_transform.inverse()

            #Use Djikstra's to fin shortest path and check if all cameras are connected
            transformation_matrices = least_error_transfroms(
                transform_dict, rms_dict)
            if transformation_matrices != 0:
                devices_stitched = True
            # Prints rms error between camera transforms
            test = matrix_viewer(rms_dict)
            print(test)

        # Turns transformation matrices into Transfomation objects
        transformation_devices = {}
        for matrix in transformation_matrices:
            the_matirx = transformation_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matirx[:3, :3], the_matirx[:3, 3])

        # Saves calibration transforms if the user wants to
        save_calibration = input(
            'Press "y" if you want to save calibration \n')
        if save_calibration == "y":
            calibration_name = input
            saved = False
            while not saved:
                name = input(
                    "Type in name of file to save. remember to end name with '.npy' \n"
                )
                try:
                    np.save(name, transformation_matrices)
                    saved = True
                except:
                    print(
                        "could not save, try another name and remember '.npy' in the end"
                    )

        frame_dict.clear()
    else:
        correct_filename = False
        while not correct_filename:
            file_name = input('Type in calibration file name \n')
            try:
                transfer_matirices_save = np.load(file_name, allow_pickle=True)
                transfer_matrices = transfer_matirices_save[()]
                correct_filename = True
            except:
                print('No such file in directory: "', file_name, '"')
        transformation_devices = {}
        for matrix in transfer_matrices:
            the_matrix = transfer_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matrix[:3, :3], the_matrix[:3, 3])

    print("Calibration completed...")

    # Enable the emitter of the devices and extract serial numbers to identify cameras
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()

    while True:
        visualisation = input(
            'Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n'
        )

        if visualisation == '1':

            calculate_avg_rms_error(device_list, device_manager,
                                    transformation_devices, charuco_board,
                                    intrinsics_devices)

        elif visualisation == '2':

            visualise_chessboard(device_manager, device_list,
                                 intrinsics_devices, charuco_board,
                                 transformation_devices)

        elif visualisation == '3':

            visualise_point_cloud(key_list, resolution_height,
                                  resolution_width, device_manager,
                                  coordinate_dimentions,
                                  transformation_devices)
        elif visualisation == '4':
            robot_calibration = CameraRobotCalibration(transformation_devices,
                                                       device_manager,
                                                       charuco_board_robot_2,
                                                       charuco_board_robot)
            input("Set target in position for calibration\n")
            test = robot_calibration.test_functions()
            print(test)

        else:
            print("Input not recognised")
def run_demo():
	
	# Define some constants 
	resolution_width = 1280 # pixels
	resolution_height = 720 # pixels
	frame_rate = 15  # fps
	dispose_frames_for_stablisation = 30  # frames
	
	chessboard_width = 6 # squares
	chessboard_height = 9 	# squares
	square_size = 0.0253 # meters

	try:
		# Enable the streams from all the intel realsense devices
		rs_config = rs.config()
		rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate)
		rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate)
		rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)

		# Use the device manager class to enable the devices and get the frames
		device_manager = DeviceManager(rs.context(), rs_config)
		device_manager.enable_all_devices()
		
		# Allow some frames for the auto-exposure controller to stablise
		for frame in range(dispose_frames_for_stablisation):
			frames = device_manager.poll_frames()

		assert( len(device_manager._available_devices) > 0 )
		"""
		1: Calibration
		Calibrate all the available devices to the world co-ordinates.
		For this purpose, a chessboard printout for use with opencv based calibration process is needed.

		"""
		# Get the intrinsics of the realsense device 
		intrinsics_devices = device_manager.get_device_intrinsics(frames)
		
	 # Set the chessboard parameters for calibration 
		chessboard_params = [chessboard_height, chessboard_width, square_size] 
		
		# Estimate the pose of the chessboard in the world coordinate using the Kabsch Method
		calibrated_device_count = 0
		while calibrated_device_count < len(device_manager._available_devices):
			frames = device_manager.poll_frames()
			pose_estimator = PoseEstimation(frames, intrinsics_devices, chessboard_params)
			transformation_result_kabsch  = pose_estimator.perform_pose_estimation()
			object_point = pose_estimator.get_chessboard_corners_in3d()
			calibrated_device_count = 0
			for device in device_manager._available_devices:
				if not transformation_result_kabsch[device][0]:
					print("Place the chessboard on the plane where the object needs to be detected..")
				else:
					calibrated_device_count += 1

		# Save the transformation object for all devices in an array to use for measurements
		transformation_devices={}
		chessboard_points_cumulative_3d = np.array([-1,-1,-1]).transpose()
		for device in device_manager._available_devices:
			transformation_devices[device] = transformation_result_kabsch[device][1].inverse()
			points3D = object_point[device][2][:,object_point[device][3]]
			points3D = transformation_devices[device].apply_transformation(points3D)
			chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d,points3D) )

		# Extract the bounds between which the object's dimensions are needed
		# 	It is necessary for this demo that the object's length and breath is smaller than that of the chessboard
		chessboard_points_cumulative_3d = np.delete(chessboard_points_cumulative_3d, 0, 1)
		roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d)

		print("Calibration completed... \nPlace the box in the field of view of the devices...")


		"""
		2: Measurement and display
		Measure the dimension of the object using depth maps from multiple RealSense devices
		The information from Phase 1 will be used here

		"""

		# Enable the emitter of the devices
		device_manager.enable_emitter(True)

		# Load the JSON settings file in order to enable High Accuracy preset for the realsense
		device_manager.load_settings_json("./HighResHighAccuracyPreset.json")

		# Get the extrinsics of the device to be used later
		extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames)

		# Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image
		calibration_info_devices = defaultdict(list)
		for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices):
			for key, value in calibration_info.items():
				calibration_info_devices[key].append(value)

		# Continue acquisition until terminated with Ctrl+C by the user
		while 1:
			 # Get the frames from all the devices
				frames_devices = device_manager.poll_frames()

				# Calculate the pointcloud using the depth frames from all the devices
				point_cloud = calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2D)

				# Get the bounding box for the pointcloud in image coordinates of the color imager
				bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(point_cloud, calibration_info_devices )

				# Draw the bounding box points on the color image and visualise the results
				visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height)

	except KeyboardInterrupt:
		print("The program was interupted by the user. Closing the program...")
	
	finally:
		device_manager.disable_streams()
		cv2.destroyAllWindows()
Esempio n. 9
0
class RealSense:
    def __init__(self):
        self.WIN_NAME = 'RealSense'
        self.pitch, self.yaw = math.radians(-10), math.radians(-15)
        self.translation = np.array([0, 0, -1], dtype=np.float32)
        self.distance = 2
        self.paused = False
        self.decimate = 1
        self.scale = True
        self.color = True

        # Define some constants
        self.resolution_width = 1280  # pixels
        self.resolution_height = 720  # pixels
        self.frame_rate = 15  # fps
        self.dispose_frames_for_stablisation = 25  # frames

        self.chessboard_width = 6  # squares
        self.chessboard_height = 9  # squares
        self.square_size = 0.0253  # meters
        # Allow some frames for the auto-exposure controller to stablise
        self.intrinsics_devices = None
        self.chessboard_params = None
        self.rs_config = rs.config()
        self.rs_config.enable_stream(rs.stream.depth, self.resolution_width,
                                     self.resolution_height, rs.format.z16,
                                     self.frame_rate)
        self.rs_config.enable_stream(rs.stream.infrared, 1,
                                     self.resolution_width,
                                     self.resolution_height, rs.format.y8,
                                     self.frame_rate)
        self.rs_config.enable_stream(rs.stream.color, self.resolution_width,
                                     self.resolution_height, rs.format.bgr8,
                                     self.frame_rate)

        # Use the device manager class to enable the devices and get the frames
        self.device_manager = DeviceManager(rs.context(), self.rs_config)
        self.device_manager.enable_all_devices()
        print('0')
        for self.frame in range(self.dispose_frames_for_stablisation):
            self.frames = self.device_manager.poll_frames()
            #print('framm = ',self.frame)

    # assert( len(self.device_manager._available_devices) > 0 )
        print('realsense initialized!!')

    def calibaration(self):
        self.intrinsics_devices = self.device_manager.get_device_intrinsics(
            self.frames)
        #print(' Set the chessboard parameters for calibration ')
        self.chessboard_params = [
            self.chessboard_height, self.chessboard_width, self.square_size
        ]

        # Estimate the pose of the chessboard in the world coordinate using the Kabsch Method
        calibrated_device_count = 0
        while calibrated_device_count < len(
                self.device_manager._available_devices):
            self.frames = self.device_manager.poll_frames()
            pose_estimator = PoseEstimation(self.frames,
                                            self.intrinsics_devices,
                                            self.chessboard_params)
            transformation_result_kabsch = pose_estimator.perform_pose_estimation(
            )
            object_point = pose_estimator.get_chessboard_corners_in3d()
            calibrated_device_count = 0
            for device in self.device_manager._available_devices:
                if not transformation_result_kabsch[device][0]:
                    print(
                        "Place the chessboard on the plane where the object needs to be detected.."
                    )
                else:
                    calibrated_device_count += 1
        print('calibrated_device_count =', calibrated_device_count)
        # Save the transformation object for all devices in an array to use for measurements
        self.transformation_devices = {}
        chessboard_points_cumulative_3d = np.array([-1, -1, -1]).transpose()
        for device in self.device_manager._available_devices:
            self.transformation_devices[device] = transformation_result_kabsch[
                device][1].inverse()
            points3D = object_point[device][2][:, object_point[device][3]]
            points3D = self.transformation_devices[
                device].apply_transformation(points3D)
            chessboard_points_cumulative_3d = np.column_stack(
                (chessboard_points_cumulative_3d, points3D))

        print(
            ' Extract the bounds between which the objects dimensions are needed'
        )
        # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard
        chessboard_points_cumulative_3d = np.delete(
            chessboard_points_cumulative_3d, 0, 1)
        self.roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d)
        print(
            "Calibration completed... \nPlace the box in the field of view of the devices..."
        )

    def setEmitter(self):
        # print('Enable the emitter of the devices')
        self.device_manager.enable_emitter(True)
        # print('-------Enable the emitter of the devices')
        # Load the JSON settings file in order to enable High Accuracy preset for the realsense
        #self.device_manager.load_settings_json("HighResHighAccuracyPreset.json")

        #print(' Get the extrinsics of the device to be used later')
        extrinsics_devices = self.device_manager.get_depth_to_color_extrinsics(
            self.frames)

        print(
            ' Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image'
        )
        self.calibration_info_devices = defaultdict(list)
        for calibration_info in (self.transformation_devices,
                                 self.intrinsics_devices, extrinsics_devices):
            for key, value in calibration_info.items():
                self.calibration_info_devices[key].append(value)
        print("CalibrasetEmittertion completed... ")

    def processing(self):
        frames_devices = self.device_manager.poll_frames()
        print('get frames')
        # Calculate the pointcloud using the depth frames from all the devices
        point_cloud = calculate_cumulative_pointcloud(
            frames_devices, self.calibration_info_devices, self.roi_2D)
        # Get the bounding box for the pointcloud in image coordinates of the color imager
        bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(
            point_cloud, self.calibration_info_devices)
        print('길이=', length, ' 폭=', width, ' 높이', height)
        # Draw the bounding box points on the color image and visualise the results
        # visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height)

    def closeSense(self):
        self.device_manager.disable_streams()