class CozmoWarehouseWorker:
    def __init__(self, robot: cozmo.robot.Robot):
        self.current_arena_pose = None
        self.current_robot_pose = robot.pose
        self.robot = robot
        # start streaming
        robot.camera.image_stream_enabled = True
        robot.camera.color_image_enabled = False
        robot.camera.enable_auto_exposure()

        # Obtain the camera intrinsics matrix
        fx, fy = robot.camera.config.focal_length.x_y
        cx, cy = robot.camera.config.center.x_y
        self.camera_settings = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]],
                                        dtype=np.float)

        self.pick_up_pose = Pose(x=4.5, y=13.75, z=0, angle_z=degrees(90))
        self.drop_off_pose = Pose(x=21.75, y=13.75, z=0, angle_z=degrees(90))

        self.drop_off_directions = [
            Pose(x=3, y=4.5, z=0, angle_z=degrees(0)),
            Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), self.drop_off_pose
        ]
        self.pick_up_directions = [
            Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)),
            Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), self.pick_up_pose
        ]

        self.drive_speed = speed_mmps(50)
        print("Robot initialized!")
        self.grid = CozGrid("map_arena.json")
        self.pf = ParticleFilter(self.grid)
        print("Robot initialized!")
        threading.Thread(target=self.runGUI).start()

    def runGUI(self):
        self.gui = GUIWindow(self.grid, show_camera=True)
        self.gui.show_particles(self.pf.particles)
        self.gui.show_mean(0, 0, 0)
        self.gui.start()

    async def drive_to(self, directions):
        print("-" * 20 + "DRIVING" + "-" * 20)
        if isinstance(directions, (list, )):
            for pose in directions:
                await self.__drive_to_pose(pose)
        else:
            await self.__drive_to_pose(directions)

    async def __drive_to_pose(self, pose):
        print("We are at ", self.current_arena_pose, " and we are driving to ",
              pose)
        directions = pose - self.current_arena_pose
        print("We will follow these directions: ", directions, "\n\n")
        directions.rotation = degrees(pose.rotation)
        await self.__execute_directions(directions)
        print("Directions followed!", "\n\n")
        self.update_current_arena_pose()

    def update_current_arena_pose(self):
        print("-" * 20 + "UPDATING POSE" + "-" * 20)
        coordinate_systems_diff = diff_heading_deg(
            self.current_robot_pose.rotation.angle_z.degrees,
            self.current_arena_pose.rotation.angle_z.degrees)
        arena_initial_pose_mm = rotate_point(
            self.current_robot_pose.position.x,
            self.current_robot_pose.position.y, coordinate_systems_diff)
        arena_final_pose_mm = rotate_point(self.robot.pose.position.x,
                                           self.robot.pose.position.y,
                                           coordinate_systems_diff)
        print(
            "We think we moved ",
            convertPoseFromMmToInches(arena_final_pose_mm -
                                      arena_initial_pose_mm), "\n\n")
        self.current_arena_pose = self.current_arena_pose + convertPoseFromMmToInches(
            arena_final_pose_mm - arena_initial_pose_mm)
        print("Current pose is now ", self.current_arena_pose, "\n\n")

    async def pick_up_cube(self, tries=5):
        print("-" * 20 + "GETTING CUBE" + "-" * 20)
        cube = await self.robot.world.wait_for_observed_light_cube(timeout=30)
        print("Found cube: %s" % cube)
        picked_up_cube = await self.robot.pickup_object(
            cube, num_retries=tries).wait_for_completed().obj
        if (picked_up_cube == None):
            print("Could not get the cube.")
            await self.robot.say_text("Help me!").wait_for_completed()
            asyncio.sleep(5)
        else:
            print("Picked up cube!")

    async def set_down_cube(self):
        print("-" * 20 + "SETTING DOWN CUBE" + "-" * 20)
        await self.robot.set_lift_height(0.0).wait_for_completed()
        await self.robot.set_head_angle(degrees(3)).wait_for_completed()

    async def __execute_directions(self, directions):
        print("Current arena pose is:", self.current_arena_pose, "\n\n")
        print("Current robot pose is:", self.robot.pose, "\n\n")
        await self.robot.turn_in_place(
            angle=degrees(-self.current_arena_pose.rotation.angle_z.degrees)
        ).wait_for_completed()
        print("ROBOT is at AFTER TURNING to be parallel to X: ",
              self.robot.pose, "\n\n")
        await self.robot.drive_straight(
            distance=distance_mm(directions.position.x * self.grid.scale),
            speed=self.drive_speed).wait_for_completed()
        print("ROBOT is at AFTER DRIVING in the X direction: ",
              self.robot.pose, "\n\n")
        await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
        print("ROBOT is at AFTER TURNING to be parallel to Y: ",
              self.robot.pose, "\n\n")
        await self.robot.drive_straight(
            distance=distance_mm(directions.position.y * self.grid.scale),
            speed=self.drive_speed).wait_for_completed()
        print("ROBOT is at AFTER DRIVING in the Y direction: ",
              self.robot.pose, "\n\n")
        print("ROBOT is TURNING ",
              diff_heading_deg(directions.rotation.angle_z.degrees, 90),
              "degrees.", "\n\n")
        await self.robot.turn_in_place(angle=degrees(
            diff_heading_deg(directions.rotation.angle_z.degrees, 90))
                                       ).wait_for_completed()
        print("ROBOT is at AFTER FINAL TURN", self.robot.pose, "\n\n")

    async def localize(self, turn_angle=20):
        print("-" * 20 + "LOCALIZING" + "-" * 20)
        # reset our location estimates
        conf = False
        self.current_arena_pose = Pose(0, 0, 0, angle_z=degrees(0))
        self.pf = ParticleFilter(self.grid)

        # reset lift and head
        await self.robot.set_lift_height(0.0).wait_for_completed()
        await self.robot.set_head_angle(degrees(3)).wait_for_completed()

        while not conf:
            # move a little
            self.current_robot_pose = self.robot.pose
            await self.robot.turn_in_place(angle=degrees(turn_angle)
                                           ).wait_for_completed()
            odometry = self.__compute_odometry()
            detected_markers, camera_image = await self.__marker_processing()

            # update, motion, and measurment with the odometry and marker data
            curr_x, curr_y, curr_h, conf = self.pf.update(
                odometry, detected_markers)

            # update gui
            self.gui.show_particles(self.pf.particles)
            self.gui.show_mean(curr_x, curr_y, curr_h)
            self.gui.show_camera_image(camera_image)
            self.gui.updated.set()

        self.current_arena_pose = Pose(curr_x,
                                       curr_y,
                                       0,
                                       angle_z=degrees(curr_h))
        print("We localized to arena location ", self.current_arena_pose)

    def __compute_odometry(self, cvt_inch=True):
        '''
        Compute the odometry given the current pose of the robot (use robot.pose)

        Input:
            - curr_pose: a cozmo.robot.Pose representing the robot's current location
            - cvt_inch: converts the odometry into grid units
        Returns:
            - 3-tuple (dx, dy, dh) representing the odometry
        '''
        last_x, last_y, last_h = self.current_robot_pose.position.x, self.current_robot_pose.position.y, \
            self.current_robot_pose.rotation.angle_z.degrees
        curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
            self.robot.pose.rotation.angle_z.degrees

        dx, dy = rotate_point(curr_x - last_x, curr_y - last_y, -last_h)
        if cvt_inch:
            dx, dy = dx / self.grid.scale, dy / self.grid.scale

        return (dx, dy, diff_heading_deg(curr_h, last_h))

    async def __marker_processing(self, show_diagnostic_image=False):
        '''
        Obtain the visible markers from the current frame from Cozmo's camera. 
        Since this is an async function, it must be called using await, for example:

            markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)

        Input:
            - robot: cozmo.robot.Robot object
            - camera_settings: 3x3 matrix representing the camera calibration settings
            - show_diagnostic_image: if True, shows what the marker detector sees after processing
        Returns:
            - a list of detected markers, each being a 3-tuple (rx, ry, rh) 
            (as expected by the particle filter's measurement update)
            - a PIL Image of what Cozmo's camera sees with marker annotations
        '''
        # Wait for the latest image from Cozmo
        image_event = await self.robot.world.wait_for(
            cozmo.camera.EvtNewRawCameraImage, timeout=30)

        # Convert the image to grayscale
        image = np.array(image_event.image)
        image = color.rgb2gray(image)

        # Detect the markers
        markers, diag = detect.detect_markers(image,
                                              self.camera_settings,
                                              include_diagnostics=True)

        # Measured marker list for the particle filter, scaled by the grid scale
        marker_list = [marker['xyh'] for marker in markers]
        marker_list = [(x / self.grid.scale, y / self.grid.scale, h)
                       for x, y, h in marker_list]

        # Annotate the camera image with the markers
        if not show_diagnostic_image:
            annotated_image = image_event.image.resize(
                (image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(annotated_image, markers, scale=2)
        else:
            diag_image = color.gray2rgb(diag['filtered_image'])
            diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize(
                (image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(diag_image, markers, scale=2)
            annotated_image = diag_image

        return marker_list, annotated_image
Ejemplo n.º 2
0
                        cozmo.anim.Triggers.CodeLabUnhappy
                    ).wait_for_completed()
                    pf = ParticleFilter(grid)
                    converged = False
            else:
                # actively look around
                # random_degree = np.random.randint(low=85, high=93)
                # await robot.turn_in_place(degrees(random_degree),speed=Angle(1080)).wait_for_completed()
                robot.drive_wheel_motors(30, 0)


class CozmoThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self, daemon=False)

    def run(self):
        cozmo.robot.Robot.drive_off_charger_on_connect = False  # Cozmo can stay on his charger
        cozmo.run_program(run, use_viewer=False)


if __name__ == '__main__':

    # cozmo thread
    cozmo_thread = CozmoThread()
    cozmo_thread.start()

    # init
    gui.show_particles(pf.particles)
    gui.show_mean(0, 0, 0)
    gui.start()
class CozmoWarehouseWorker:

    def __init__(self, robot: cozmo.robot.Robot):
        self.current_arena_pose = None
        self.last_robot_pose = robot.pose
        self.robot = robot
        # start streaming
        await robot.set_head_angle(degrees(3)).wait_for_completed()
        robot.camera.image_stream_enabled = True
        robot.camera.color_image_enabled = False
        robot.camera.enable_auto_exposure()
      
        # Obtain the camera intrinsics matrix
        fx, fy = robot.camera.config.focal_length.x_y
        cx, cy = robot.camera.config.center.x_y
        self.camera_settings = np.array([
            [fx,  0, cx],
            [ 0, fy, cy],
            [ 0,  0,  1]
        ], dtype=np.float)

        self.grid = CozGrid("map_arena.json")
        self.pf = ParticleFilter(self.grid)
        self.gui = GUIWindow(self.grid, show_camera=True)

        self.pick_up_pose = Pose(x=4.5, y=20, 0, angle_z=degrees(90))
        self.drop_off_pose = Pose(x=21.75, y=13.75, 0, angle_z=degrees(90))

        self.drop_off_directions = [Pose(x=3, y=4.5, 0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), self.drop_off_pose]
        self.pick_up_directions = [Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), Pose(x=3, y=4.5, 0, angle_z=degrees(0)), self.pick_up_pose]

    async def drive_to(pose):
        print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
        directions = pose - self.current_arena_pose
        self.__execute_directions(directions) 

    async def __execute_directions(self, directions):
        print("Robot is at: ", self.robot.pose)
        await self.robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
        print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose)
        await self.robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=speed_mmps(80)).wait_for_completed()     
        print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose)
        await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
        print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose)
        await self.robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=speed_mmps(80)).wait_for_completed()
        print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose)
        
    async def localize(self):   

        # reset our location estimates
        conf = False
        self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
        self.pf = ParticleFilter(grid)

        # reset lift and head
        await self.robot.set_lift_height(0.0).wait_for_completed()
        await self.robot.set_head_angle(degrees(3)).wait_for_completed()
    
        while not conf:
            # move a little 
            self.last_robot_pose = self.robot.pose
            await self.robot.turn_in_place(angle=degrees(20)).wait_for_completed()
            odometry = self.__compute_odometry()
            detected_markers, camera_image = await self.__marker_processing()

            # update, motion, and measurment with the odometry and marker data
            curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
        
            # update gui
            self.gui.show_particles(self.pf.particles)
            self.gui.show_mean(curr_x, curr_y, curr_h)
            self.gui.show_camera_image(camera_image) 
            self.gui.updated.set()

        self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))

    def __compute_odometry(self, cvt_inch=True):
        '''
        Compute the odometry given the current pose of the robot (use robot.pose)

        Input:
            - curr_pose: a cozmo.robot.Pose representing the robot's current location
            - cvt_inch: converts the odometry into grid units
        Returns:
            - 3-tuple (dx, dy, dh) representing the odometry
        '''
        last_x, last_y, last_h = self.last_robot_pose.position.x, self.last_robot_pose.position.y, \
            self.last_robot_pose.rotation.angle_z.degrees
        curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
            self.robot.pose.rotation.angle_z.degrees
        
        dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
        if cvt_inch:
            dx, dy = dx / grid.scale, dy / grid.scale

        return (dx, dy, diff_heading_deg(curr_h, last_h))


    async def __marker_processing(self, show_diagnostic_image=False):
        '''
        Obtain the visible markers from the current frame from Cozmo's camera. 
        Since this is an async function, it must be called using await, for example:

            markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)

        Input:
            - robot: cozmo.robot.Robot object
            - camera_settings: 3x3 matrix representing the camera calibration settings
            - show_diagnostic_image: if True, shows what the marker detector sees after processing
        Returns:
            - a list of detected markers, each being a 3-tuple (rx, ry, rh) 
            (as expected by the particle filter's measurement update)
            - a PIL Image of what Cozmo's camera sees with marker annotations
        '''
        # Wait for the latest image from Cozmo
        image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)

        # Convert the image to grayscale
        image = np.array(image_event.image)
        image = color.rgb2gray(image)
        
        # Detect the markers
        markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)

        # Measured marker list for the particle filter, scaled by the grid scale
        marker_list = [marker['xyh'] for marker in markers]
        marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]

        # Annotate the camera image with the markers
        if not show_diagnostic_image:
            annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(annotated_image, markers, scale=2)
        else:
            diag_image = color.gray2rgb(diag['filtered_image'])
            diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(diag_image, markers, scale=2)
            annotated_image = diag_image

        return marker_list, annotated_image
class CozmoWarehouseWorker:

    def __init__(self, robot: cozmo.robot.Robot):
        self.current_arena_pose = None
        self.last_robot_pose = robot.pose
        self.robot = robot
        # start streaming
        await robot.set_head_angle(degrees(3)).wait_for_completed()
        robot.camera.image_stream_enabled = True
        robot.camera.color_image_enabled = False
        robot.camera.enable_auto_exposure()
      
        # Obtain the camera intrinsics matrix
        fx, fy = robot.camera.config.focal_length.x_y
        cx, cy = robot.camera.config.center.x_y
        self.camera_settings = np.array([
            [fx,  0, cx],
            [ 0, fy, cy],
            [ 0,  0,  1]
        ], dtype=np.float)

        self.grid = CozGrid("map_arena.json")
        self.pf = ParticleFilter(self.grid)
        self.gui = GUIWindow(self.grid, show_camera=True)

        self.pick_up_pose = Pose(x=4.5, y=20, 0, angle_z=degrees(90))
        self.drop_off_pose = Pose(x=21.75, y=13.75, 0, angle_z=degrees(90))

        self.drop_off_directions = [Pose(x=3, y=4.5, 0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), self.drop_off_pose]
        self.pick_up_directions = [Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), Pose(x=3, y=4.5, 0, angle_z=degrees(0)), self.pick_up_pose]

    async def drive_to(pose):
        print("-" * 20)
        print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
        directions = pose - self.current_arena_pose
        print("We will follow these directions: ", directions)
        await self.__execute_directions(directions) 
        print("Directions followed!")
        print("-" * 20)

    def update_current_arena_pose(self):
        arena_starting_pose = rotate_point(self.last_robot_pose, diff_heading_deg(self.last_robot_pose.rotation.degrees, self.cu) 
        self.current_arena_pose = self.current_arena_pose + convertPoseFromMmToInches(rotate_point(self.robot.pose, - last_robot_pose)


    async def __execute_directions(self, directions):
        print("Robot is at: ", self.robot.pose)
        await self.robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
        print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose)
        await self.robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=speed_mmps(80)).wait_for_completed()     
        print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose)
        await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
        print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose)
        await self.robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=speed_mmps(80)).wait_for_completed()
        print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose)
        
    async def localize(self):   

        # reset our location estimates
        conf = False
        self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
        self.pf = ParticleFilter(grid)

        # reset lift and head
        await self.robot.set_lift_height(0.0).wait_for_completed()
        await self.robot.set_head_angle(degrees(3)).wait_for_completed()
    
        while not conf:
            # move a little 
            self.last_robot_pose = self.robot.pose
            await self.robot.turn_in_place(angle=degrees(20)).wait_for_completed()
            odometry = self.__compute_odometry()
            detected_markers, camera_image = await self.__marker_processing()

            # update, motion, and measurment with the odometry and marker data
            curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
        
            # update gui
            self.gui.show_particles(self.pf.particles)
            self.gui.show_mean(curr_x, curr_y, curr_h)
            self.gui.show_camera_image(camera_image) 
            self.gui.updated.set()

        self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))

    def __compute_odometry(self, cvt_inch=True):
        '''
        Compute the odometry given the current pose of the robot (use robot.pose)

        Input:
            - curr_pose: a cozmo.robot.Pose representing the robot's current location
            - cvt_inch: converts the odometry into grid units
        Returns:
            - 3-tuple (dx, dy, dh) representing the odometry
        '''
        last_x, last_y, last_h = self.last_robot_pose.position.x, self.last_robot_pose.position.y, \
            self.last_robot_pose.rotation.angle_z.degrees
        curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
            self.robot.pose.rotation.angle_z.degrees
        
        dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
        if cvt_inch:
            dx, dy = dx / grid.scale, dy / grid.scale

        return (dx, dy, diff_heading_deg(curr_h, last_h))


    async def __marker_processing(self, show_diagnostic_image=False):
        '''
        Obtain the visible markers from the current frame from Cozmo's camera. 
        Since this is an async function, it must be called using await, for example:

            markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)

        Input:
            - robot: cozmo.robot.Robot object
            - camera_settings: 3x3 matrix representing the camera calibration settings
            - show_diagnostic_image: if True, shows what the marker detector sees after processing
        Returns:
            - a list of detected markers, each being a 3-tuple (rx, ry, rh) 
            (as expected by the particle filter's measurement update)
            - a PIL Image of what Cozmo's camera sees with marker annotations
        '''
        # Wait for the latest image from Cozmo
        image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)

        # Convert the image to grayscale
        image = np.array(image_event.image)
        image = color.rgb2gray(image)
        
        # Detect the markers
        markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)

        # Measured marker list for the particle filter, scaled by the grid scale
        marker_list = [marker['xyh'] for marker in markers]
        marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]

        # Annotate the camera image with the markers
        if not show_diagnostic_image:
            annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(annotated_image, markers, scale=2)
        else:
            diag_image = color.gray2rgb(diag['filtered_image'])
            diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(diag_image, markers, scale=2)
            annotated_image = diag_image

        return marker_list, annotated_image

async def run(robot: cozmo.robot.Robot):
   
    cosimo = CozmoWarehouseWorker()
    cosimo.localize()
    cosimo.drive_to(cosimo.pick_up_pose)

    directions = goal_pose - last_pose
    current_pose = last_pose
    last_robot_pose = robot.pose
    print("SETTING LAST ROBOT POSE TO: ", last_robot_pose)
    print("SO WE GOING TO FOLLOW THIS TO PICKUP ZONE:", directions)
    
    await execute_directions(robot, directions)
    await robot.turn_in_place(angle=degrees(45)).wait_for_completed()
    print("LAST ROBOT POSE IS: ", last_robot_pose)
    print("CURRENT POSE IS:", robot.pose)
    print("WE THINK WE MOVED THIS MUCH TO GO TO PICKUP ZONE: ", convertPoseToInches(robot.pose - last_robot_pose))
    
    last_robot_pose = robot.pose
    print("COZMO THINKS IT IS AT AFTER DRIVING TO PICKUPZONE: ", current_pose)
    
    # await robot.say_text('Ready for pick up!').wait_for_completed()
   
    while True:
      
      cube = await robot.world.wait_for_observed_light_cube(timeout=30)
      print("Found cube: %s" % cube)
    
      await robot.pickup_object(cube, num_retries=5).wait_for_completed()
      current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
      print("WE THINK WE MOVED THIS MUCH TO PICK UP CUBE: ", convertPoseToInches(robot.pose - last_robot_pose))
      last_robot_pose = robot.pose
      #cosimo.update_pose()
        
      print("COZMO THINKS IT IS AT AFTER PICKING UP CUBE: ", current_pose)

      #await look_around_until_converge(robot)

        # intialize an explorer after localized
        #cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
        # move robot to pickup zone once localized
      
        #print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
      #current_pose = last_pose
      
      # rrt to drop zone and drop off cube
      for destination in drop_off_directions:
        directions = convertInchesToPose(destination) - current_pose
        await execute_directions(robot,directions)
        current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
        print("WE THINK WE MOVED THIS MUCH TO FOLLOW DIRECTIONS: ", convertPoseToInches(robot.pose - last_robot_pose))
        last_robot_pose = robot.pose
        print("COZMO THINKS IT IS AT AFTER FOLLOWING DIRECTIONS: ", current_pose)
      #await cosimo.go_to_goal(goal_node=dropoff_node)  
      await robot.set_lift_height(0.0).wait_for_completed()


      
      # rrt to just in front of pick up zone
      # await cosimo.go_to_goal(goal_node=pickup_node)  


class CozmoThread(threading.Thread):
    
    def __init__(self):
        threading.Thread.__init__(self, daemon=False)

    def run(self):
        cozmo.robot.Robot.drive_off_charger_on_connect = False  # Cozmo can stay on his charger
        cozmo.run_program(run, use_viewer=False)

if __name__ == '__main__':
    # cozmo thread
    cozmo_thread = CozmoThread()
    cozmo_thread.start()

    # init
    gui.show_particles(pf.particles)
    gui.show_mean(0, 0, 0)
    gui.start()
Ejemplo n.º 5
0
class CozmoWarehouseWorker:

    def __init__(self, robot: cozmo.robot.Robot):
        self.current_arena_pose = None
        self.current_robot_pose = robot.pose
        self.robot = robot
        # start streaming
        robot.camera.image_stream_enabled = True
        robot.camera.color_image_enabled = False
        robot.camera.enable_auto_exposure()
    
        # Obtain the camera intrinsics matrix
        fx, fy = robot.camera.config.focal_length.x_y
        cx, cy = robot.camera.config.center.x_y
        self.camera_settings = np.array([
            [fx,  0, cx],
            [ 0, fy, cy],
            [ 0,  0,  1]
        ], dtype=np.float)

        self.current_drop_off_pose_idx = 0
        self.pick_up_pose = Pose(x=4.5, y=11, z=0, angle_z=degrees(90))
        self.drop_off_poses = [Pose(x=22, y=11, z=0, angle_z=degrees(90)),  Pose(x=20, y=11, z=0, angle_z=degrees(90)),  Pose(x=18, y=11, z=0, angle_z=degrees(90))]

        self.drop_off_directions = [Pose(x=5, y=6, z=0, angle_z=degrees(0)), Pose(x=22, y=6, z=0, angle_z=degrees(90)), self.drop_off_poses[0]]
        self.pick_up_directions = [Pose(x=21.75, y=4.5, z=0, angle_z=degrees(0)), Pose(x=5, y=4.5, z=0, angle_z=degrees(90)), self.pick_up_pose]

        self.drive_speed = speed_mmps(100)
       
        self.grid = CozGrid("map_arena.json")
        self.pf = ParticleFilter(self.grid)
        threading.Thread(target=self.runGUI).start()

        self.pose_offset = None

    def runGUI(self):
        self.gui = GUIWindow(self.grid, show_camera=True)
        self.gui.show_particles(self.pf.particles)
        self.gui.show_mean(0, 0, 0)
        self.gui.start()

    async def drive_to(self, directions):
        print_debug_header("DRIVING")
        if isinstance(directions, (list,)):
            for pose in directions:
                await self.__drive_to_pose(pose)
        else:
            await self.__drive_to_pose(directions)

    async def __drive_to_pose(self, pose):
        translation = (pose - self.current_arena_pose).position
        directions = Pose(x=translation.x, y=translation.y, z=0, angle_z=pose.rotation.angle_z)
        await self.__execute_directions(directions) 
        self.update_current_arena_pose()    
         
        
    async def __execute_directions(self, directions):
        print("Current arena pose is:", self.current_arena_pose, "\n\n")
        #print("Current robot pose is:", self.robot.pose, "\n\n")
        print("We will follow these directions: ", directions, "\n\n")
        initial_position = self.robot.pose.position
        await self.robot.turn_in_place(angle=degrees(-self.current_arena_pose.rotation.angle_z.degrees)).wait_for_completed()
        #print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose, "\n\n")
        #self.update_current_arena_pose()    
        await self.robot.drive_straight(distance=distance_mm(directions.position.x * self.grid.scale), speed=self.drive_speed).wait_for_completed()     
        #print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose, "\n\n")
        #self.update_current_arena_pose()    
        await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
        #self.update_current_arena_pose()    
        #print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose, "\n\n")
        print("TOTAL MOVEMENT IN X: ", math.sqrt((self.robot.pose.position.x - initial_position.x) ** 2 + (self.robot.pose.position.y - initial_position.y) ** 2) / 25.0, "\n")
        initial_position = self.robot.pose.position
        await self.robot.drive_straight(distance=distance_mm(directions.position.y * (self.grid.scale)), speed=self.drive_speed).wait_for_completed()
        #self.update_current_arena_pose()    
        #print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose, "\n\n")
        #print("ROBOT is TURNING ", diff_heading_deg(directions.rotation.angle_z.degrees, 90), "degrees.", "\n\n")
        await self.robot.turn_in_place(angle=degrees(diff_heading_deg(directions.rotation.angle_z.degrees, 90))).wait_for_completed()
        #print("ROBOT is at AFTER FINAL TURN", self.robot.pose, "\n\n") 
        print("TOTAL MOVEMENT IN Y: ", math.sqrt((self.robot.pose.position.x - initial_position.x) ** 2 + (self.robot.pose.position.y - initial_position.y) ** 2)/ 25.0, "\n")
        
            
    def update_current_arena_pose(self):
        print_debug_header("UPDATING CURRENT POSES")
        if (self.robot.pose.origin_id != self.current_robot_pose):
            print("WE WERE DELOCALIZED!")
            self.localize()
            
        coordinate_systems_diff = -self.pose_offset.degrees
        print("Started at arena pose: ", self.current_arena_pose, "\n\n")
        #print("Initial robot pose was: ", self.current_robot_pose)
        #print("Current robot pose is: ", self.robot.pose)
        #print("With a diff heading of: ", coordinate_systems_diff)
        d_x, d_y = rotate_point(self.robot.pose.position.x - self.current_robot_pose.position.x, self.robot.pose.position.y - self.current_robot_pose.position.y, coordinate_systems_diff)
        #print("Calculated initial arena pose is:", arena_initial_pose_mm)
        #arena_final_pose_mm = rotate_point(, self.robot.pose.position.y, coordinate_systems_diff)
        #print("Calculated final arena pose is:", arena_final_pose_mm)
        #print("COMPUTE ODOMETRY:" ,self.__compute_odometry())
        #d_x = arena_final_pose_mm[0] - arena_initial_pose_mm[0]
        #d_y = arena_final_pose_mm[1] - arena_initial_pose_mm[1]
        d_heading = diff_heading_deg(self.robot.pose.rotation.angle_z.degrees, self.current_robot_pose.rotation.angle_z.degrees)
        difference_pose = convertPoseFromMmToInches(Pose(x=d_x, y=d_y, z=0, angle_z=degrees(d_heading)))
        print("We think we moved ", difference_pose, "\n\n")
        self.current_arena_pose = self.current_arena_pose + difference_pose
        self.current_robot_pose = self.robot.pose
        print("Current arena pose is now: ", self.current_arena_pose, "\n\n")
    
    async def pick_up_cube(self, tries=5):
        print_debug_header("PICKING UP CUBE")
        cube = None
        turns = [-30, 30, 30, -30] * 20
        for t in turns:
            try:
                cube = await self.robot.world.wait_for_observed_light_cube(timeout=1)
                await self.robot.turn_in_place(angle=degrees(t)).wait_for_completed()
                if (cube != None):
                    print("Found a cube! ", cube)
                    break
            except:
                continue
        if (cube == None):
            raise Exception("Could not find cube!")
        
        picked_up_cube = await self.robot.pickup_object(cube, num_retries=tries).wait_for_completed()
        self.update_current_arena_pose()
        if (picked_up_cube == None):
            print("Could not get the cube.")
            await self.robot.say_text("Help me!").wait_for_completed()
            await self.robot.set_lift_height(1.0).wait_for_completed()
            asyncio.sleep(5)
        else:
            print("Picked up cube!")
       
    async def set_down_cube(self):
        print_debug_header("DROPPED")
        # Drop the cube
        await self.robot.set_lift_height(0.0).wait_for_completed()
        await self.robot.set_head_angle(degrees(3)).wait_for_completed()
        print("Dropped off at pose ", self.current_drop_off_pose_idx)

        # Store the old drop position
        old_drop_off_position =  self.drop_off_poses[self.current_drop_off_pose_idx].position
        
        # Calculate the next drop position
        self.current_drop_off_pose_idx = self.current_drop_off_pose_idx + 1 % 3
        new_drop_off_position = self.drop_off_poses[self.current_drop_off_pose_idx].position
        
        # Set the second step to stop perpendicular to the next drop off location
        self.drop_off_poses[1] = Pose(x=new_drop_off_position.x, y=old_drop_off_position.y, z=0, angle_z=degrees(90))
        
        # set the drop off location
        self.drop_off_poses[2] = self.drop_off_poses[self.current_drop_off_pose_idx]
        
    async def localize(self, turn_angle=20):  
        print_debug_header("LOCALIZING")
        # reset our location estimates
        conf = False
        self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
        self.pf = ParticleFilter(self.grid)

        # reset lift and head
        await self.robot.set_lift_height(0.0).wait_for_completed()
        await self.robot.set_head_angle(degrees(3)).wait_for_completed()
        while not conf:
            # move a little 
            self.current_robot_pose = self.robot.pose
            await self.robot.turn_in_place(angle=degrees(turn_angle)).wait_for_completed()
            odometry = self.__compute_odometry()
            detected_markers, camera_image = await self.__marker_processing()

            # update, motion, and measurment with the odometry and marker data
            curr_x, curr_y, curr_h, conf = self.pf.update(odometry, detected_markers)
        
            # update gui
            self.gui.show_particles(self.pf.particles)
            self.gui.show_mean(curr_x, curr_y, curr_h)
            self.gui.show_camera_image(camera_image) 
            self.gui.updated.set()

        self.current_robot_pose = self.robot.pose
        self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
        self.pose_offset = degrees(-(diff_heading_deg(curr_h, self.robot.pose.rotation.angle_z.degrees)))
        print("We localized to arena location ", self.current_arena_pose)
    
    def __compute_odometry(self, cvt_inch=True):
        '''
        Compute the odometry given the current pose of the robot (use robot.pose)

        Input:
            - curr_pose: a cozmo.robot.Pose representing the robot's current location
            - cvt_inch: converts the odometry into grid units
        Returns:
            - 3-tuple (dx, dy, dh) representing the odometry
        '''
        last_x, last_y, last_h = self.current_robot_pose.position.x, self.current_robot_pose.position.y, \
            self.current_robot_pose.rotation.angle_z.degrees
        curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
            self.robot.pose.rotation.angle_z.degrees
        
        dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
        if cvt_inch:
            dx, dy = dx / self.grid.scale, dy / self.grid.scale

        return (dx, dy, diff_heading_deg(curr_h, last_h))


    async def __marker_processing(self, show_diagnostic_image=False):
        '''
        Obtain the visible markers from the current frame from Cozmo's camera. 
        Since this is an async function, it must be called using await, for example:

            markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)

        Input:
            - robot: cozmo.robot.Robot object
            - camera_settings: 3x3 matrix representing the camera calibration settings
            - show_diagnostic_image: if True, shows what the marker detector sees after processing
        Returns:
            - a list of detected markers, each being a 3-tuple (rx, ry, rh) 
            (as expected by the particle filter's measurement update)
            - a PIL Image of what Cozmo's camera sees with marker annotations
        '''
        # Wait for the latest image from Cozmo
        image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)

        # Convert the image to grayscale
        image = np.array(image_event.image)
        image = color.rgb2gray(image)
        
        # Detect the markers
        markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)

        # Measured marker list for the particle filter, scaled by the grid scale
        marker_list = [marker['xyh'] for marker in markers]
        marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]

        # Annotate the camera image with the markers
        if not show_diagnostic_image:
            annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(annotated_image, markers, scale=2)
        else:
            diag_image = color.gray2rgb(diag['filtered_image'])
            diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
            annotator.annotate_markers(diag_image, markers, scale=2)
            annotated_image = diag_image

        return marker_list, annotated_image