def _setup_vision_service(self, first_robot_orientation=AN_ORIENTATION): first_robot_pose = RobotPose(self.A_POSITION, first_robot_orientation) a_robot_pose = RobotPose(self.A_POSITION, self.AN_ORIENTATION) self._vision_service.get_vision_state.side_effect = [ (self.AN_IMAGE, first_robot_pose), (self.AN_IMAGE, a_robot_pose), ]
def _find_movements_to_command_panel_zone(self, robot_pose: RobotPose): path = self._path_service.find_path_to_command_panel_zone( robot_pose.get_position()) GameState.get_instance().set_current_planned_trajectory(path) return self._movement_factory.create_movements( path, robot_pose.get_orientation_in_degree())
def find_movements_to_goal(self, robot_pose: RobotPose, goal_position): if FIND_MOVEMENTS_FOR_REAL: path = self._path_service.find_path( robot_pose.get_position() + Position(140, 0), goal_position, ) return MovementFactory().create_movements( path, robot_pose.get_orientation_in_degree()) else: # TODO return hardcoded movements pass
def _go_to_starting_zone_center(self, first_movement=False): # TODO Maybe get this out of here to now look clanky at the beginning of the stage self._rotation_service.rotate(CardinalOrientation.EAST.value) robot_pose = self._find_robot_pose() if first_movement: pos_x = robot_pose.get_position().get_x_coordinate() pos_y = robot_pose.get_position().get_y_coordinate() + 20 robot_pose = RobotPose( Position(pos_x, pos_y), robot_pose.get_orientation_in_degree() ) movements_to_starting_zone = self._find_movements_to_starting_zone(robot_pose) self._move_robot(movements_to_starting_zone)
def detect(self, image) -> RobotPose: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) corners, ids, _ = aruco.detectMarkers( gray, self._aruco_dictionary, parameters=self._detector_parameters) robot_marker_corners = self._get_robot_marker_corners(corners, ids) marker_position = ( self.get_quadrangle_center_coordinates_from_corner_coordinates( robot_marker_corners[0].reshape(-1, 2))) robot_marker_image_points = self._get_robot_image_points( robot_marker_corners) projection_points = self._get_projection_points( robot_marker_image_points) robot_orientation = self._get_robot_orientation_in_degree( robot_marker_corners) robot_position = self.get_quadrangle_center_coordinates_from_corner_coordinates( projection_points[0]) if marker_position.get_x_coordinate() < 640: if marker_position.get_x_coordinate( ) > robot_position.get_x_coordinate(): raise RobotNotFoundException() else: if marker_position.get_x_coordinate( ) < robot_position.get_x_coordinate(): raise RobotNotFoundException() return RobotPose(robot_position, robot_orientation)
class VisionWorker: last_known_pose = RobotPose( Position(STARTING_ZONE_CENTER_POSITION[0], STARTING_ZONE_CENTER_POSITION[1]), CardinalOrientation.WEST.value, ) def __init__(self, vision_service: VisionService): self._vision_service = vision_service def run(self): while True: self.update_vision_state(self._vision_service) time.sleep(0.1) @staticmethod def update_vision_state(vision_service: VisionService): try: table_image, robot_pose = vision_service.get_vision_state() GameState.get_instance().set_table_image(table_image) GameState.get_instance().set_robot_pose(robot_pose) VisionWorker.last_known_pose = robot_pose except RobotNotFoundException: GameState.get_instance().set_robot_pose( VisionWorker.last_known_pose)
def test_givenAnImage_whenDetectRobot_thenReturnCorrectRobotPose(self): an_image = cv2.imread(self.AN_IMAGE) expected_robot_position = Position(705, 664) expected_robot_orientation = Orientation(11) expected_robot_pose = RobotPose( expected_robot_position, expected_robot_orientation ) actual_robot_pose = self.robot_detector.detect(an_image) self.assertEqual(expected_robot_pose, actual_robot_pose)
def _create_straight_movement( self, direction: Direction, goal_position: Position, robot_pose: RobotPose, ): # TODO Fix it so we calculate diagonal path to avoid it looking dumb in the UI path = self._path_service.find_path(robot_pose.get_position(), goal_position) GameState.get_instance().set_current_planned_trajectory(path) position_to_use = ( robot_pose.get_gripper_position() if direction is Direction.FORWARD else robot_pose.get_position() ) return [ self._movement_factory.create_movement_to_get_to_point_with_direction( position_to_use, goal_position, direction, ) ]
def find_movements_to_puck_zone(self, robot_pose: RobotPose): path = self._path_service.find_path_to_puck_zone_center( robot_pose.get_position()) return MovementFactory().create_movements( path, robot_pose.get_orientation_in_degree())
def find_orientation_to_puck(self, puck_position: Position, robot_pose: RobotPose): orientation_to_puck = GeometryUtils.calculate_angle_between_positions( robot_pose.get_position(), puck_position) return orientation_to_puck
def _get_robot_position(self, robot_pose: RobotPose) -> dict: return ( robot_pose.get_position().to_dictionary() if robot_pose is not None else self.BASE_POSITION.to_dictionary() )
calibrator = OpenCvCalibrator(CALIBRATION_FILE_PATH) camera = OpenCvWorldCamera(LAPTOP_CAMERA_INDEX, calibrator) robot_detector = OpenCvRobotDetector( DICT_4X4_50, ROBOT_ARUCO_MARKER_ID, ROBOT_ARUCO_MARKER_SIZE, CAMERA_MATRIX, DISTORTION_COEFFICIENTS, ROBOT_HEIGHT, ) if __name__ == "__main__": while True: image = camera.take_world_image() robot_pose = RobotPose(Position(800, 600), Orientation(0)) try: robot_pose = robot_detector.detect(image) except: pass x, y = robot_pose.get_position().to_tuple() cv2.circle( image, robot_pose.get_gripper_position().to_tuple(), 3, [255, 0, 0], thickness=10, ) cv2.imshow("robot", image)
def _create_robot_pose(x_position: int, y_position: int, orientation: int): position = Position(x_position, y_position) orientation = Orientation(orientation) return RobotPose(position, orientation)