def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.target_selection = TargetSelection()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
示例#2
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations of Objects Needed
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Parameters from YAML File
        self.move_with_target = rospy.get_param("calculate_target")
        self.target_selector = rospy.get_param("target_selector")
        self.turtlebot_path_topic = rospy.get_param('turtlebot_path_topic')
        self.turtlebot_subgoals_topic = rospy.get_param(
            'turtlebot_subgoals_topic')
        self.turtlebot_curr_target_topic = rospy.get_param(
            'turtlebot_curr_target_topic')
        self.map_frame = rospy.get_param('map_frame')
        self.debug = rospy.get_param('debug')
        self.turtlebot_save_progress_images = rospy.get_param(
            'turtlebot_save_progress_images')
        self.limit_exploration_flag = rospy.get_param(
            'limit_exploration')  # Flag to Enable/ Disable OGM Enhancement

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0
        self.count_limit = 100  # 20 sec
        self.counter_to_next_sub = self.count_limit

        # Timer Thread (initialization)
        self.time_limit = 150  # in seconds
        self.timerThread = TimerThread(self.time_limit)

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(self.turtlebot_path_topic,
                                              Path,
                                              queue_size=10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(
            self.turtlebot_subgoals_topic, MarkerArray, queue_size=10)

        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(
            self.turtlebot_curr_target_topic, Marker, queue_size=10)

        # For Testing Purpose
        self.timer_flag = True  # True to Enable Timer for resetting Navigation

        # Speed Parameter
        self.max_linear_velocity = rospy.get_param('max_linear_speed')
        self.max_angular_velocity = rospy.get_param('max_angular_speed')

        # Parameters of OGM Enhancement
        self.first_run_flag = True  # Flag to Control first Enhancement in OGM Map

        # Map Container Visualize Message
        self.map_size = np.array(
            [[rospy.get_param('x_min_size'),
              rospy.get_param('y_min_size')],
             [rospy.get_param('x_min_size'),
              rospy.get_param('y_max_size')],
             [rospy.get_param('x_max_size'),
              rospy.get_param('y_min_size')],
             [rospy.get_param('x_max_size'),
              rospy.get_param('y_max_size')]],
            dtype=np.float64)

        if self.limit_exploration_flag:

            # Create CV Bridge Object
            self.bridge = CvBridge()
            # Parameters
            self.drone_image = []  # Hold the Drone OGM Image converted in CV!
            self.ogm_in_cv = []  # Hold the OGM in CV Compatible Version
            self.drone_yaw = 0.0  # Holds the current Drone Yaw
            self.drone_map_pose = []  # Holds the current Drone Position in Map
            # Service
            self.drone_explore_limit_srv_name = rospy.get_param(
                'drone_explore_limit_srv')
            self.limits_exploration_service = rospy.ServiceProxy(
                self.drone_explore_limit_srv_name, OgmLimits)
            # Subscriber
            self.drone_pub_image_topic = rospy.get_param(
                'drone_pub_image_topic')
            self.drone_image_sub = rospy.Subscriber(self.drone_pub_image_topic,
                                                    Image, self.drone_image_cb)
            # Flags
            self.done_enhancement = False  # Flag for Controlling OGM Enhancement Loop
            self.drone_take_image = False  # Flag for Controlling When to Read Image From Drone Camera
            # FIXME: Testing
            self.match_with_limit_pose = True  # True/False to Match with Known Location/Template Matching
            self.match_with_drone_pose = False  # True if you match with Drone Real Pose

    def checkTarget(self, event):

        # Check if we have a target or if the robot just wanders
        if not self.inner_target_exists or not self.move_with_target or self.next_subtarget == len(
                self.subtargets):
            return

        # Check if timer has expired
        if self.timer_flag:
            if self.timerThread.expired:
                Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
                self.inner_target_exists = False
                self.target_exists = False
                return

        # Get the robot pose in pixels
        [rx, ry] = [
            self.robot_perception.robot_pose['x_px'] -
            self.robot_perception.origin['x'] /
            self.robot_perception.resolution,
            self.robot_perception.robot_pose['y_px'] -
            self.robot_perception.origin['y'] /
            self.robot_perception.resolution
        ]
        theta_robot = self.robot_perception.robot_pose['th']

        # Clear achieved targets
        self.subtargets = self.subtargets[self.next_subtarget:]
        self.next_subtarget = 0

        # If distance between the robot pose and the next subtarget is < 7 pixel consider that Target is Reached
        dist = math.hypot(rx - self.subtargets[self.next_subtarget][0],
                          ry - self.subtargets[self.next_subtarget][1])
        if dist < 7:
            self.next_subtarget += 1
            self.counter_to_next_sub = self.count_limit
            # Check if the final subtarget has been approached
            if self.next_subtarget == len(self.subtargets):
                self.target_exists = False

        # Publish the current target
        if self.next_subtarget >= len(self.subtargets):
            return

        subtarget = [
            self.subtargets[self.next_subtarget][0] *
            self.robot_perception.resolution +
            self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1] *
            self.robot_perception.resolution +
            self.robot_perception.origin['y']
        ]

        RvizHandler.printMarker([subtarget], 1, 0, "map", "art_next_subtarget",
                                [0, 0, 0.8, 0.8], 0.2)

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):

        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over

        # Cancel previous goal timer
        self.timerThread.stop()
        # Check if we have a map
        while not self.robot_perception.have_map:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "Clearing all markers !\n\n"
        RvizHandler.printMarker([[0, 0]], 1, 3, "map", "null", [0, 0, 0, 0],
                                0.1)
        # Visualize Map Container
        map_container_mark = []
        for s in self.map_size:
            map_container_mark.append([
                s[0] * self.robot_perception.resolution +
                self.robot_perception.origin['x'],
                s[1] * self.robot_perception.resolution +
                self.robot_perception.origin['y']
            ])
        RvizHandler.printMarker(map_container_mark, 1, 0, "map",
                                "art_map_container", [1.0, 0.0, 0.0, 1.0], 0.3)

        print '----------------------------------------------------------'
        print 'Navigation: Producing new target'
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        ########################################################################
        # Get OGM Map and Coverage
        start_ = time.time()

        # Gets copies of the map and coverage
        start = time.time()
        local_ogm = self.robot_perception.getMap()
        if self.debug:
            print str('Robot Perception: Got the map in ') + str(
                time.time() - start) + str(' seconds.')

        start = time.time()
        local_ros_ogm = self.robot_perception.getRosMap()
        if self.debug:
            print str('Robot Perception: Got the ros map in ') + str(
                time.time() - start) + str(' seconds.')

        start = time.time()
        local_coverage = self.robot_perception.getCoverage()
        if self.debug:
            print str('Robot Perception: Got the coverage in ') + str(
                time.time() - start) + str(' seconds.')

        print str('Navigation: Got the map and Coverage in ') + str(
            time.time() - start_) + str(' seconds.')

        ########################################################################
        # Part 1 - OGM Enhancement
        no_points = False
        ogm_limits_before = []

        if self.turtlebot_save_progress_images:
            ####################### Save OGM and Coverage ##########################
            scipy.misc.imsave(
                '/home/vvv/pictures_slam/ogm.png',
                cv2.bitwise_not(
                    exposure.rescale_intensity(np.array(local_ogm,
                                                        dtype=np.uint8),
                                               in_range=(0, 100),
                                               out_range=(0, 255))))
            scipy.misc.imsave('/home/vvv/pictures_slam/coverage.png',
                              local_coverage)

        start_ = time.time()

        print str(self.robot_perception.percentage_explored)

        if self.limit_exploration_flag and self.robot_perception.percentage_explored < 1.0 and not self.first_run_flag:

            #############################################################################
            # Subpart 1 - Find the Useful boundaries of OGM
            #############################################################################
            start = time.time()
            (points, ogm_limits_before) = self.ogm_limit_calculation(
                local_ogm, self.robot_perception.resolution,
                self.robot_perception.origin, 20, 20, 12, self.map_size)
            if points is None or len(points) == 0:
                print str('No Limits Points Calculated ')
                no_points = True
            else:
                print str('Number of Limit Points Calculated is ') + str(len(points)) + str(' in ')\
                      + str(time.time() - start) + str(' seconds.')
                no_points = False

            if self.debug:
                print str('\n') + str(points) + str('\n')
            #############################################################################

            #########################################################################
            # Subpart 2 - Send Drone to Limits and Match the to OGM Map
            #########################################################################
            startt = time.time()

            if not no_points:

                index = 0
                p = points[index, :]
                while len(points):

                    points = np.delete(
                        points, [index],
                        axis=0)  # Delete Point thats Going to be Explored

                    # Send Drone to OGM Limits
                    start = time.time()
                    while not self.sent_drone_to_limit(p[0], p[1]):
                        pass

                    if self.debug:
                        Print.art_print(
                            str('Navigation: Get Image From Drone took ') +
                            str(time.time() - start) + str(' seconds.'),
                            Print.ORANGE)

                    # Image Matching
                    start = time.time()

                    if self.match_with_limit_pose:

                        if self.match_with_drone_pose:
                            [x_p, y_p] = [
                                int((self.drone_map_pose[0] -
                                     self.robot_perception.origin['x']) /
                                    self.robot_perception.resolution),
                                int((self.drone_map_pose[1] -
                                     self.robot_perception.origin['y']) /
                                    self.robot_perception.resolution)
                            ]

                            local_ogm = ImageMatching.ogm_match_with_known_image_pose(
                                ogm=local_ogm,
                                new_data=self.drone_image,
                                coordinates=[x_p, y_p, self.drone_yaw],
                                map_boundries=[
                                    self.map_size[0][0], self.map_size[3][0],
                                    self.map_size[0][1], self.map_size[3][1]
                                ],
                                debug=self.debug)
                        else:
                            local_ogm = ImageMatching.ogm_match_with_known_image_pose(
                                ogm=local_ogm,
                                new_data=self.drone_image,
                                coordinates=[
                                    int(p[2]),
                                    int(p[3]), self.drone_yaw
                                ],
                                map_boundries=[
                                    int(self.map_size[0][0]),
                                    int(self.map_size[3][0]),
                                    int(self.map_size[0][1]),
                                    int(self.map_size[3][1])
                                ],
                                debug=self.debug)

                    else:
                        local_ogm = ImageMatching.template_matching(
                            ogm=local_ogm,
                            drone_image=self.drone_image,
                            lim_x=int(p[2]),
                            lim_y=int(p[3]),
                            drone_yaw=self.drone_yaw,
                            window=200,
                            s_threshold=0.8,
                            debug=self.debug)

                    if self.debug:
                        Print.art_print(
                            str('Navigation: OGM Matching Function  took ') +
                            str(time.time() - start) + str(' seconds.\n'),
                            Print.ORANGE)

                    # Calculate Point for Next Loop!
                    if len(points) > 0:
                        index = self.closest_limit_point(p[:2], points[:, :2])
                        p = points[index, :]
                '''
                print('\x1b[35;1m' + str('Navigation: Taking Image and Matching took ') +
                      str(time.time() - startt) + str(' seconds.') + '\x1b[0m')
                '''
            ########################################################################

            ########################################################################
            # Subpart 3 - -Copy Enhanced Data to ROS OGM
            ########################################################################
            if not no_points:

                start = time.time()
                local_ros_ogm.data = cp_data_to_ros_ogm(
                    np.array(local_ros_ogm.data), local_ogm,
                    local_ros_ogm.info.width, local_ros_ogm.info.height)
                if self.debug:
                    print('\x1b[35;1m' +
                          str('Navigation: Copying OGM Data took ') +
                          str(time.time() - start) + str(' seconds.') +
                          '\x1b[0m')
            ########################################################################

        print('\x1b[38;1m' + str('Navigation: OGM Enhancement took ') +
              str(time.time() - start_) + str(' seconds.') + '\x1b[0m')

        if self.turtlebot_save_progress_images and self.limit_exploration_flag and not no_points:
            ########################## Save Enhance OGM ############################
            scipy.misc.imsave(
                '/home/vvv/pictures_slam/ogm_enhanced.png',
                cv2.bitwise_not(
                    exposure.rescale_intensity(np.array(local_ogm,
                                                        dtype=np.uint8),
                                               in_range=(0, 100),
                                               out_range=(0, 255))))

        ########################################################################
        # Part 2 - Set Map, Get Robot Position and Choose Target

        start = time.time()
        self.target_selection.path_planning.setMap(local_ros_ogm)
        print str('Navigation: Set Map in ') + str(time.time() -
                                                   start) + str(' seconds.')

        # Once the target has been found, find the path to it
        # Get the global robot pose
        start = time.time()
        g_robot_pose = self.robot_perception.getGlobalCoordinates([
            self.robot_perception.robot_pose['x_px'],
            self.robot_perception.robot_pose['y_px']
        ])
        print str('Navigation: Got Robot Pose in ') + str(
            time.time() - start) + str(' seconds.')

        # Call the target selection function to select the next best goal
        self.path = []
        force_random = False
        while len(self.path) == 0:

            # Get Target and Path to It!
            start = time.time()
            self.path = self.target_selection.selectTarget(
                local_ogm, local_coverage, self.robot_perception.robot_pose,
                self.robot_perception.origin, self.robot_perception.resolution,
                g_robot_pose, ogm_limits_before, force_random)
            #self.target_selection.path_planning.createPath(g_robot_pose, target, self.robot_perception.resolution)

            if len(self.path) == 0:
                Print.art_print(
                    'Navigation: Path planning failed. Fallback to random target selection',
                    Print.RED)
                print str('\n')
                force_random = True
            else:
                print str('Navigation: Target Selected and Path to Target found with ') + str(len(self.path)) \
                      + str(' points in ') + str(time.time() - start) + str(' seconds.')

        ########################################################################
        # Part 3 - Create Subgoals and Print Path to RViz

        start = time.time()

        # Reverse the path to start from the robot
        self.path = self.path[::-1]
        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = int(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0

        if self.debug:
            print str('Navigation: The path produced ') + str(len(self.subtargets)) + str(' subgoals in ') \
                  + str(time.time() - start) + str('seconds.')

        # Start timer thread
        self.timerThread.start()

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = self.map_frame
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = self.map_frame
            ps.pose.position.x = 0
            ps.pose.position.y = 0
            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subtargets_mark.append([
                s[0] * self.robot_perception.resolution +
                self.robot_perception.origin['x'],
                s[1] * self.robot_perception.resolution +
                self.robot_perception.origin['y']
            ])
        RvizHandler.printMarker(subtargets_mark, 2, 0, 'map', 'art_subtargets',
                                [0, 0.8, 0.0, 0.8], 0.2)

        # Update NUmber of Targets
        self.robot_perception.num_of_explored_targets += 1

        self.inner_target_exists = True

        # Reset First Run Flag
        if self.first_run_flag:
            self.first_run_flag = False

    # Function that calculates Velocity to next Target
    def velocitiesToNextSubtarget(self):

        # Initialize Speeds
        [linear, angular] = [0, 0]
        # Get Robot Position in Map
        [rx, ry] = [self.robot_perception.robot_pose['x_px'] - self.robot_perception.origin['x'] / self.robot_perception.resolution, \
                    self.robot_perception.robot_pose['y_px'] - self.robot_perception.origin['y'] / self.robot_perception.resolution]
        theta = self.robot_perception.robot_pose['th']
        # If Target Exists Calculate Speeds
        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            # Calculate dx, dy, dth
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            th_rg = np.arctan2(st_y - ry, st_x - rx)
            dth = (th_rg - theta)

            if dth > np.pi:
                omega = (dth - 2 * np.pi) / np.pi
            elif dth < -np.pi:
                omega = (dth + 2 * np.pi) / np.pi
            else:
                omega = (th_rg - theta) / np.pi

            # Nonlinear Relations Derived from Experimentation
            linear = self.max_linear_velocity * ((1 - np.abs(omega))**5)
            angular = self.max_angular_velocity * np.sign(omega) * (abs(omega)
                                                                    **(1 / 5))
            '''
            new_theta = atan2(st_y - ry, st_x - rx)
            if new_theta <= -3:
                new_theta = -new_theta
            if theta <= -3:
                theta = -theta
            delta_theta = new_theta - theta
            if abs(delta_theta) > 3.15:
                delta_theta = -0.2 * np.sign(delta_theta)
            # Assign Speeds Accordingly
            if abs(delta_theta) < 0.1:
                linear = self.max_linear_velocity
                angular = 0
            else:
                linear = 0
                angular = self.max_angular_velocity*np.sign(delta_theta)
            '''
        # Return Speeds
        return [linear, angular]

    # Send SIGNAL to Drone to Explore Limit Function
    def sent_drone_to_limit(self, x, y):

        # Set Flag to Make Drone Image to be Received
        self.drone_take_image = True

        # Create Request with Points in Map of OGM Limits
        request = OgmLimitsRequest()
        request.x = x
        request.y = y

        # Call Service
        rospy.wait_for_service(self.drone_explore_limit_srv_name)
        try:
            response = self.limits_exploration_service(request)
        except rospy.ServiceException, e:
            print "Service call failed: %s" % e

        # Wait for Drone Image to be Received to Return
        while self.drone_take_image:
            pass

        # Return Once Image has been Received
        return response
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.target_selection = TargetSelection()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)


    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # YOUR CODE HERE ------------------------------------------------------
        # Here the next subtarget is checked for proximity. If the robot is too
        # close to the subtarget it is considered approached and the next
        # subtarget is assigned as next. However there are cases where the
        # robot misses one subtarget due to obstacle detection. Enhance the below
        # functionality by checking all the subgoals (and the final goal) for
        # proximity and assign the proper next subgoal

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        # Check if distance is less than 7 px (14 cm)
        if dist < 7:
          print "Sub target reached!"
          self.next_subtarget += 1

          # Check if the final subtarget has been approached
          if self.next_subtarget == len(self.subtargets):
            print "Final goal reached!"
            self.target_exists = False

        # ---------------------------------------------------------------------

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return
        st = Marker()
        st.header.frame_id = "map"
        st.ns = 'as_curr_namespace'
        st.id = 0
        st.header.stamp = rospy.Time(0)
        st.type = 1 # arrow
        st.action = 0 # add
        st.pose.position.x = self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x']
        st.pose.position.y = self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']

        st.color.r = 0
        st.color.g = 0
        st.color.b = 0.8
        st.color.a = 0.8
        st.scale.x = 0.2
        st.scale.y = 0.2
        st.scale.z = 0.2

        self.current_target_publisher.publish(st)

    # Function that seelects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
          return

        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Manually update the coverage field
        self.robot_perception.updateCoverage()
        print "Navigation: Coverage updated"
      
        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
  
        # Call the target selection function to select the next best goal
        target = self.target_selection.selectTarget(\
            local_ogm,\
            local_coverage,\
            self.robot_perception.robot_pose)
        print "Navigation: New target: " + str(target)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
            [self.robot_perception.robot_pose['x_px'],\
            self.robot_perception.robot_pose['y_px']])

        # Need to reverse the path??
        self.path = self.path_planning.createPath(\
            local_ogm,\
            g_robot_pose,\
            target)
        print "Navigation: Path for target found with " + str(len(self.path)) +\
            " points"
        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 10 pixels (0.5m)
        step = 10
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ps.pose.position.x = p[0] * self.robot_perception.resolution + \
                  self.robot_perception.origin['x']
          ps.pose.position.y = p[1] * self.robot_perception.resolution + \
                  self.robot_perception.origin['y']
          ros_path.poses.append(ps)

        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        ros_subgoals = MarkerArray()
        count = 0
        for s in self.subtargets:
            st = Marker()
            st.header.frame_id = "map"
            st.ns = 'as_namespace'
            st.id = count
            st.header.stamp = rospy.Time(0)
            st.type = 2 # sphere
            st.action = 0 # add
            st.pose.position.x = s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x']
            st.pose.position.y = s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']

            st.color.r = 0
            st.color.g = 0.8
            st.color.b = 0
            st.color.a = 0.8
            st.scale.x = 0.2
            st.scale.y = 0.2
            st.scale.z = 0.2
            ros_subgoals.markers.append(st)
            count += 1

        self.subtargets_publisher.publish(ros_subgoals)

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):
        
        [linear, angular] = [0, 0]

        # YOUR CODE HERE ------------------------------------------------------
        # The velocities of the robot regarding the next subtarget should be 
        # computed. The known parameters are the robot pose [x,y,th] from 
        # robot_perception and the next_subtarget [x,y]. From these, you can 
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        # ---------------------------------------------------------------------

        return [linear, angular]
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False
        
        self.TimeOut = False
        self.Reset = False
        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 230 # 23 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.1), self.checkTarget)
        
        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        
        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        
        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
        
    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        self.counter_to_next_sub -= 1
        
        if self.counter_to_next_sub == 0:
          Print.art_print('\n~~~~ Time reset ~~~~',Print.RED) 
          self.inner_target_exists = False
          self.target_exists = False
          if self.Reset == False:
			  self.TimeOut = True
          else:
			  self.Reset = False
          return
        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        
        
        # remove visited sub targets
        self.subtargets = self.subtargets[self.next_subtarget:]
        
        # set the next sub target as the first one that should be visited
        self.next_subtarget = 0
        
        dis_x = [rx - target[0] for target in self.subtargets]
        dis_y = [ry - target[1] for target in self.subtargets]
        
        #for i in range(0,len(dis_x)):
		#	print " x dis " + str(dis_x[i])
		#	print " y dis " + str(dis_y[i])
        
        # target distances
        dist_t = [math.hypot(dis[0],dis[1]) for dis in zip(dis_x,dis_y) ]
        
        # find the closest target
        min_target = 0
        min_dis = dist_t[0]
        for i in range(0,len(dist_t)):
			if min_dis > dist_t[i]:
				min_dis = dist_t[i]
				min_target = i
					
        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the 
        # next subtarget? Alter the code accordingly.
        # print " min target " + str(min_target) +  " min dis " + str(min_dis)
        # Check if distance is less than 7 px (14 cm)
        if min_dis < 5:
          self.next_subtarget = min_target + 1
          self.counter_to_next_sub = self.count_limit
          # print "REACHED"
          # print " next target " + str(self.next_subtarget) + " len " +str(len(self.subtargets))
          
          # Check if the final subtarget has been approached
          if self.next_subtarget == len(self.subtargets):
            self.target_exists = False
        ########################################################################
        
        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        
        
        while self.robot_perception.have_map == False:
          Print.art_print("Navigation: No map yet", Print.RED)
          return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True            
        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm) 

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])
        

        # Call the target selection function to select the next best goal
        # Choose target function
        
        print "TimeOut Flag " +str(self.TimeOut)
        self.path = []
        force_random = False
        
        while len(self.path) == 0:
          start = time.time()
          target = self.target_selection.selectTarget(\
                    local_ogm,\
                    local_coverage,\
                    self.robot_perception.robot_pose,
                    self.robot_perception.origin,
                    self.robot_perception.resolution,
                    force_random)
          
          self.path = self.path_planning.createPath(\
              g_robot_pose,\
              target,
              self.robot_perception.resolution)
          print "Navigation: Path for target found with " + str(len(self.path)) +\
              " points"
          if len(self.path) == 0:
            Print.art_print(\
                "Path planning failed. Fallback to random target selection", \
                Print.RED)
            force_random = True
        # Reverse the path to start from the robot
        self.path = self.path[::-1]
        
        tolerance = 0.000001
        weight_data = 0.5
        weight_smooth = 0.1
        new = deepcopy(self.path)
        dims = 2
        change = tolerance 
        
        # Path smoothing using gradient descent
        if len(self.path) > 3:
			print "PATH SMOOTHING"
			while change >= tolerance:
				change = 0.0
				for i in range(1, len(new) - 1):
					for j in range(dims):
						x_i = self.path[i][j]
						y_i, y_prev, y_next = new[i][j], new[i - 1][j], new[i + 1][j]
						y_i_saved = y_i
						y_i += weight_data * (x_i - y_i) + weight_smooth * (y_next + y_prev - (2 * y_i))
						new[i][j] = y_i
						change += abs(y_i - y_i_saved)
			self.path = new
			
					
        
        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"
        
        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ######################### NOTE: QUESTION  ##############################
          # Fill the ps.pose.position values to show the path in RViz
          # You must understand what self.robot_perception.resolution
          # and self.robot_perception.origin are.
          
          #p[0] * resolution => gives the path's point x-coordinate expressed in the map's coordinate system (meters)
          # As the path's(p) first point-set is the pixels that are occupied by the robot, multyplying it by resolution (meter/pixel)
          # gives the values of that point expressed in meters.
          # Adding the robot's initial position we have a path starting from the robot's position expressed in the robot's frame.
          ps.pose.position.x = p[0]*0.05 + self.robot_perception.origin['x']  
          ps.pose.position.y = p[1]*0.05 + self.robot_perception.origin['y']
          
        #  print " x val " + str(ps.pose.position.x)
        #  print " y  val " + str(ps.pose.position.y )
        #  print " origin x " + str(self.robot_perception.origin['x'])
        #  print " origin y " + str(self.robot_perception.origin['y'])
        #  print " p0 " +str(p[0])
        #  print " p1 " +str(p[1])
          ########################################################################
          ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
          subt = [
            s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
          subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):
        
        [linear, angular] = [0, 0]
        
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be 
        # computed. The known parameters are the robot pose [x,y,th] from 
        # robot_perception and the next_subtarget [x,y]. From these, you can 
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required
        # if there is another subtarget , acquire its position
        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            # align robot with the target
            theta_target = np.arctan2(st_y - ry, st_x - rx)
            delta_theta = (theta_target - theta)
            # find omega
            if delta_theta > np.pi:
                omega = (delta_theta - 2*np.pi)/np.pi
            elif delta_theta < -np.pi:
                omega = (delta_theta + 2*np.pi)/np.pi
            else:
                omega = delta_theta/np.pi
            # maximum magnitude for both velocities is 0.3
            linear = 0.3 * ((1 - np.abs(omega)) ** 5)
            angular = 0.3 * np.sign(omega)

        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
示例#5
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Clear visited subtargets
        self.subtargets = self.subtargets[self.next_subtarget:]
        self.next_subtarget = 0

        # Calculate distance between the robot pose and all remaining subtargets
        dx = [rx - st[0] for st in self.subtargets]
        dy = [ry - st[1] for st in self.subtargets]
        dist = [math.hypot(v[0], v[1]) for v in zip(dx, dy)]

        # Check if any target is in distance
        min_dist, min_idx = min(zip(dist, range(len(dist))))

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        if min_dist < 5:
            self.next_subtarget = min_idx + 1
            self.counter_to_next_sub = self.count_limit
            # Check if the final subtarget has been approached
            if self.next_subtarget == len(self.subtargets):
                self.target_exists = False
        ########################################################################

        # Publish the current target
        if self.next_subtarget >= len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(\
                      local_ogm,\
                      local_ros_ogm,\
                      local_coverage,\
                      self.robot_perception.robot_pose,
                      self.robot_perception.origin,
                      self.robot_perception.resolution,
                      force_random)

            self.path = self.path_planning.createPath(\
                g_robot_pose,\
                target,
                self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) +\
                " points"
            if len(self.path) == 0:
                Print.art_print(\
                    "Path planning failed. Fallback to random target selection", \
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Smooth path
        if len(self.path) > 3:
            x = np.array(self.path)
            y = np.copy(x)
            a = 0.5
            b = 0.2
            e = np.sum(
                np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b *
                       (y[2:, :] - 2 * y[1:-1, :] + y[:-2, :])))
            while e > 1e-3:
                y[1:-1, :] += a * (x[1:-1, :] - y[1:-1, :]) + b * (
                    y[2:, :] - 2 * y[1:-1, :] + y[:-2, :])
                e = np.sum(
                    np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b *
                           (y[2:, :] - 2 * y[1:-1, :] + y[:-2, :])))
            self.path = y.tolist()

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth (answer is just above)
        # and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"

            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']

            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
              s[0] * self.robot_perception.resolution + \
                      self.robot_perception.origin['x'],
              s[1] * self.robot_perception.resolution + \
                      self.robot_perception.origin['y']
              ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        l_max = 0.3
        a_max = 0.3

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            # Fix robot turning problem
            theta_r = np.arctan2(st_y - ry, st_x - rx)
            dtheta = (theta_r - theta)
            if dtheta > np.pi:
                omega = (dtheta - 2 * np.pi) / np.pi
            elif dtheta < -np.pi:
                omega = (dtheta + 2 * np.pi) / np.pi
            else:
                omega = (theta_r - theta) / np.pi
            # Velocities in [-0.3, 0.3] range
            linear = l_max * ((1 - np.abs(omega))**4)
            angular = a_max * np.sign(omega) * (abs(omega)**(1 / 3))
            # omega = (theta_r - theta)/np.pi
            # angular = omega * 0.3 # max angular = 0.3 rad/s
            # linear_r = 1 - np.abs(omega) #may need square
            # linear = linear_r * 0.3 # max linear = 0.3 m/s

        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
示例#6
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        self.laser_aggregation = LaserDataAggregator()
        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is" + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        count = 1
        for i in range(
                self.next_subtarget,
                len(self.subtargets) - 2
        ):  # compare the slope of the points till the end in order to check if there are more than one points at the same line
            try:  # use try except to avoid zerodivisior error
                slope1 = (self.subtargets[i + 1][1] -
                          self.subtargets[i][1]) / (self.subtargets[i + 1][0] -
                                                    self.subtargets[i][0])
            except ZeroDivisionError:
                slope1 = float('Inf')
            try:
                slope2 = (self.subtargets[i + 2][1] - self.subtargets[i + 1][1]
                          ) / (self.subtargets[i + 2][0] -
                               self.subtargets[i + 1][0])
            except ZeroDivisionError:
                slope2 = float('Inf')

            if abs(slope1 - slope2) < 0.3 or (
                    slope1 == float('inf') and slope2 == float('inf')
            ):  # if difference in slopes is less than 0.3 then we assume that the points are in the same line
                count += 1
            else:
                break

        scan = self.laser_aggregation.laser_scan  # take scan measurements
        count_scan = 0
        for i in range(
                250, 450
        ):  # count scanlines with measure more than 2. The range has calculated after experiments.
            if scan[i] > 2:
                count_scan += 1

        if dist < 5:
            if count != 1:
                self.next_subtarget += count  # if there are more than one subtargets that belongs to the same line then the robot goes to the last target of the line
                self.counter_to_next_sub = self.count_limit + count * 100  # increase time limit proportionally to the count in order to avoid time reset
            elif count_scan > 10:  # check if obstacle closer than 4 cm exists, if obstacle does not exist then
                self.next_subtarget += 2  # increase subtarget by 2. This case happens when count = 1 in order to accelerate the process in case of not having subtargets in the same line
                self.counter_to_next_sub = self.count_limit + 300  # increase time limit in order to avoid time reset
                if self.next_subtarget >= len(
                        self.subtargets
                ):  # check if the final subtarget has been approached
                    self.next_subtarget = len(self.subtargets)
            else:
                self.next_subtarget += 1  # else if obstacle exists then increase subtarget by 1
                self.counter_to_next_sub = self.count_limit + 300  # increase time limit in order to avoid time reset
                if self.next_subtarget >= len(
                        self.subtargets
                ):  # check if the final subtarget has been approached
                    self.next_subtarget = len(self.subtargets)

            if self.next_subtarget == len(self.subtargets):
                self.target_exists = False
        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(\
                      local_ogm,\
                      local_coverage,\
                      self.robot_perception.robot_pose,
                      self.robot_perception.origin,
                      self.robot_perception.resolution,
                      force_random)

            self.path = self.path_planning.createPath(\
                g_robot_pose,\
                target,
                self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) +\
                " points"
            if len(self.path) == 0:
                Print.art_print(\
                    "Path planning failed. Fallback to random target selection", \
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ps.pose.position.x = 0
            ps.pose.position.y = 0
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            # p[0] and p[1] are the global coordinates so we multiply these values with resolution and add the origin
            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']
            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
              s[0] * self.robot_perception.resolution + \
                      self.robot_perception.origin['x'],
              s[1] * self.robot_perception.resolution + \
                      self.robot_perception.origin['y']
              ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required
        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            ######################### NOTE: QUESTION  ##############################
            # theta contains yaw angle, hence we convert it to x,y coordinates in order to find the angle in degrees
            x = math.cos(theta)
            y = math.sin(theta)
            deg = math.degrees(math.atan2(y, x))
            delta_theta = math.degrees(math.atan2(st_y - ry, st_x - rx)) - deg

            if delta_theta >= 0 and delta_theta < 180:
                angular = delta_theta / 180

            elif delta_theta > 0 and delta_theta >= 180:
                angular = (delta_theta - 2 * 180) / 180

            elif delta_theta <= 0 and delta_theta > -180:
                angular = delta_theta / 180

            elif delta_theta < 0 and delta_theta < -180:
                angular = (delta_theta + 2 * 180) / 180

            linear = (
                (1 - abs(angular))**25
            )  # power calculated after experiments and we use it in order to avoid overshoot problem
            linear = linear * 0.3  # use tanh to limit the range
            if angular > 0.3:  # angular = angular * 0.3. angular velocity should be multiplied by max angular velocity according to notes
                angular = 0.3
            elif angular < -0.3:
                angular = -0.3
                # but the result was much slower so we didn't multiply it by 0.3
        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200 # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)
        
        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        
        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        
        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
        
    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
          Print.art_print('\n~~~~ Time reset ~~~~',Print.RED) 
          self.inner_target_exists = False
          self.target_exists = False
          return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Find the distance between the robot pose and the next subtarget
        #dist = math.hypot(\
        #    rx - self.subtargets[self.next_subtarget][0], \
        #    ry - self.subtargets[self.next_subtarget][1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the 
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        for i, target in\
                reversed(list(enumerate(self.subtargets[self.next_subtarget:]))):
          # Find the distance between the robot pose and the next subtarget
          dist = math.hypot(rx - target[0], ry - target[1])

          if dist < 5:
            self.next_subtarget += i + 1
            self.counter_to_next_sub = self.count_limit

            # Check if the final subtarget has been approached
            if self.next_subtarget >= len(self.subtargets):
              self.target_exists = False
        ########################################################################
        
        # Publish the current target
        if self.next_subtarget >= len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
          Print.art_print("Navigation: No map yet", Print.RED)
          return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True
              
        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm) 

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
          start = time.time()
          target = self.target_selection.selectTarget(\
                    local_ogm,\
                    local_coverage,\
                    self.robot_perception.robot_pose,
                    self.robot_perception.origin,
                    self.robot_perception.resolution, 
                    force_random)
          
          self.path = self.path_planning.createPath(\
              g_robot_pose,\
              target,
              self.robot_perception.resolution)
          print "Navigation: Path for target found with " + str(len(self.path)) +\
              " points"
          if len(self.path) == 0:
            Print.art_print(\
                "Path planning failed. Fallback to random target selection", \
                Print.RED)
            force_random = True
          
        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"
        
        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        print(self.path)
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ps.pose.position.x = 0
          ps.pose.position.y = 0
          ######################### NOTE: QUESTION  ##############################
          # Fill the ps.pose.position values to show the path in RViz
          # You must understand what self.robot_perception.resolution
          # and self.robot_perception.origin are.
          ps.pose.position.x = p[0]*self.robot_perception.resolution +\
                               + self.robot_perception.origin['x']
          ps.pose.position.y = p[1]*self.robot_perception.resolution +\
                               + self.robot_perception.origin['y']
        
          ########################################################################
          ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
          subt = [
            s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
          subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):
        
        [linear, angular] = [0, 0]
        
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be 
        # computed. The known parameters are the robot pose [x,y,th] from 
        # robot_perception and the next_subtarget [x,y]. From these, you can 
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]
            
            # Translate robot target coordinates to robot coordinates
            st_xv =  (st_x - rx)*math.cos(theta) + (st_y - ry)*math.sin(theta)
            st_yv = -(st_x - rx)*math.sin(theta) + (st_y - ry)*math.cos(theta)

            # Convert target coordinates to polar
            r_g = math.sqrt(st_xv**2 + st_yv**2)
            ph_g = math.atan2(st_yv, st_xv)

            # Straight line
            try:
                r_c = r_g / (2*math.sin(ph_g))
            except ZeroDivisionError:
                return [linear, angular]

            power = 2
            if ph_g > math.pi/2:
                # Linear percentage
                lin_prc = (1 - (math.pi - ph_g) / (math.pi/2)) ** power
                linear = min(lin_prc * 0.3 + 0.05, 0.3)

                th = 2 * (ph_g - math.pi)
                linear = -linear
            elif ph_g < -math.pi/2:
                # Linear percentage
                lin_prc = (1 - (math.pi + ph_g) / (math.pi/2)) ** power
                linear = min(lin_prc * 0.3 + 0.05, 0.3)

                th = 2 * (ph_g + math.pi)
                linear = -linear
            else:
                # Linear percentage
                lin_prc = (1 - abs(ph_g) / (math.pi/2)) ** power 
                linear = min(lin_prc * 0.3 + 0.05, 0.3)

                th = 2 * ph_g

            # The path's arc
            L = r_c*th * self.robot_perception.resolution

            # Find angular velocity from linear
            dt = L / linear
            angular = min(th / dt, 0.3)
            angular = max(angular, -0.3)

        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
class TargetSelection:
    # Constructor
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.path = []
        self.prev_target = [0, 0]
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()
        self.robot_perception = RobotPerception()  # can i use that?

    def selectTarget(self, init_ogm, coverage, robot_pose, origin, \
                     resolution, force_random=False):

        target = [-1, -1]

        ######################### NOTE: QUESTION  ##############################
        # Implement a smart way to select the next target. You have the
        # following tools: ogm_limits, Brushfire field, OGM skeleton,
        # topological nodes.

        # Find only the useful boundaries of OGM. Only there calculations
        # have meaning
        ogm_limits = OgmOperations.findUsefulBoundaries(
            init_ogm, origin, resolution)

        # Blur the OGM to erase discontinuities due to laser rays
        ogm = OgmOperations.blurUnoccupiedOgm(init_ogm, ogm_limits)

        # Calculate Brushfire field
        tinit = time.time()
        brush = self.brush.obstaclesBrushfireCffi(ogm, ogm_limits)
        Print.art_print("Brush time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Calculate skeletonization
        tinit = time.time()
        skeleton = self.topo.skeletonizationCffi(ogm, \
                                                 origin, resolution, ogm_limits)
        Print.art_print("Skeletonization time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Find topological graph
        tinit = time.time()
        nodes = self.topo.topologicalNodes(ogm, skeleton, coverage, origin, \
                                           resolution, brush, ogm_limits)
        Print.art_print("Topo nodes time: " + str(time.time() - tinit),
                        Print.ORANGE)
        print len(nodes)

        # Visualization of topological nodes
        vis_nodes = []
        for n in nodes:
            vis_nodes.append([
                n[0] * resolution + origin['x'],
                n[1] * resolution + origin['y']
            ])
        RvizHandler.printMarker( \
            vis_nodes, \
            1,  # Type: Arrow
            0,  # Action: Add
            "map",  # Frame
            "art_topological_nodes",  # Namespace
            [0.3, 0.4, 0.7, 0.5],  # Color RGBA
            0.1  # Scale
        )

        # Random point
        if self.method == 'random' or force_random == True:
            target = self.selectRandomTarget(ogm, coverage, brush, ogm_limits)

        # CTN
        if self.method == 'CTN':
            target = self.selectNearestTopologyNode(robot_pose=robot_pose,
                                                    resolution=resolution,
                                                    nodes=nodes)
            if target is None:
                target = self.selectRandomTarget(ogm, coverage, brush,
                                                 ogm_limits)
        # target = self.selectNearestTopologyNode(robot_pose=robot_pose, resolution=resolution, nodes=nodes, ogm=ogm,
        #                                         coverage=coverage, brushogm=brush)
        ########################################################################

        return target

    def selectRandomTarget(self, ogm, coverage, brushogm, ogm_limits):
        # The next target in pixels
        tinit = time.time()
        next_target = [0, 0]
        found = False
        while not found:
            x_rand = random.randint(0, ogm.shape[0] - 1)
            y_rand = random.randint(0, ogm.shape[1] - 1)
            if ogm[x_rand][y_rand] < 50 and coverage[x_rand][y_rand] < 50 and \
                            brushogm[x_rand][y_rand] > 5:
                next_target = [x_rand, y_rand]
                found = True
        Print.art_print(
            "Select random target time: " + str(time.time() - tinit),
            Print.ORANGE)
        return next_target

    # way too slow, maybe i didn't quite understood weigthed path find?
    def selectNearestTopologyNode(self, robot_pose, resolution, nodes):
        # The next target in pixels
        tinit = time.time()
        next_target = [0, 0]
        w_dist = 10000
        x_g, y_g = 500, 500
        sigma = 100
        g_robot_pose = self.robot_perception.getGlobalCoordinates(
            [robot_pose['x_px'], robot_pose['y_px']])  # can I use that?
        for node in nodes:
            # print resolution
            self.path = self.path_planning.createPath(
                g_robot_pose, node, resolution)  # can I use that?
            if len(self.path) == 0:
                break
            x_n, y_n = node[0], node[1]
            exp = ((x_n - x_g)**2 + (y_n - y_g)**2) / (2 * (sigma**2))
            scale_factor = 1 / (1 - math.exp(-exp) + 0.01)

            coords = zip(
                *self.path)  # dist is [[x1 x2 x3..],[y1 y2 y3 ..]] #transpose

            coord_shift = np.empty_like(coords)
            coord_shift[0] = coords[0][-1:] + coords[0][:-1]  # (xpi+1)
            coord_shift[1] = coords[1][-1:] + coords[1][:-1]  # (ypi+1)

            coords = np.asarray(coords)

            dist = [
                (a - b)**2 for a, b in zip(coords[:, 1:], coord_shift[:, 1:])
            ]  # (xpi - xpi+1)^2 , (ypi-ypi+1)^2
            dist = sum(np.sqrt(dist[0] + dist[1]))

            w = dist * (1 / scale_factor)
            if w < w_dist and len(self.path) != 0:
                if self.prev_target == node:
                    break
                w_dist = w
                next_target = node

        self.prev_target = next_target  # dont select the same target it we failed already

        Print.art_print(
            "Select nearest node target time: " + str(time.time() - tinit),
            Print.ORANGE)
        return next_target