Example #1
0
    def __init__(self):
        super(Parameter, self).__init__()
        super(Utils, self).__init__()
        super(FieldView, self).__init__()
        super(Draw, self).__init__()
        self.capture = cv2.VideoCapture(camera)
        self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,
                         self.width)  # カメラ画像の横幅を1280に設定
        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,
                         self.height)  # カメラ画像の縦幅を720に設定
        self.table_set = Tables()
        self.planner = PathPlanning(send=self.send)
        self.table_detection = True
        self.detection_success = False
        self.bottle_result = [None, None, None]
        self.standing_result_image = None
        self.quit = False
        self.yukari = Yukari()
        self.remove_separator_middle = False
        self.use_remove_separator = True
        self.points = None
        self.flip_points = None

        self.set_track_bar_pos(self.settings)

        self.ptlist = PointList(NPOINTS)
        self.click_mode = True
        logging.info('START DETECTION')
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.target_selection = TargetSelection()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
Example #3
0
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()
Example #4
0
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()

        # Initialize previous target
        self.previous_target = [-1, -1]
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.path = []
        self.prev_target = [0, 0]
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()
        self.robot_perception = RobotPerception()  # can i use that?
Example #6
0
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method
        if self.method_is_cost_based():
            from robot_perception import RobotPerception
            self.robot_perception = RobotPerception()
            self.cost_based_properties = rospy.get_param("cost_based_properties")
            numpy.set_printoptions(precision=3, threshold=numpy.nan, suppress=True)

        self.brush = Brushfires()
        self.path_planning = PathPlanning()
Example #7
0
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        self.laser_aggregation = LaserDataAggregator()
        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is" + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.target_selection = TargetSelection()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
    def __init__(self, selection_method):

        self.initial_time = time()
        self.method = selection_method
        self.initialize_gains = False

        self.brush = Brushfires()
        self.topology = Topology()
        self.path_planning = PathPlanning()
        self.droneConnector = DroneCommunication()

        # Parameters from YAML File
        self.debug = True  #rospy.get_param('debug')
        self.map_discovery_purpose = rospy.get_param('map_discovery_purpose')
        self.color_evaluation_flag = rospy.get_param('color_rating')
        self.drone_color_evaluation_topic = rospy.get_param(
            'drone_pub_color_rating')
        self.evaluate_potential_targets_srv_name = rospy.get_param(
            'rate_potential_targets_srv')

        # Explore Gains
        self.g_color = 0.0
        self.g_brush = 0.0
        self.g_corner = 0.0
        self.g_distance = 0.0
        self.set_gain()

        if self.color_evaluation_flag:

            # Color Evaluation Service
            self.color_evaluation_service = rospy.ServiceProxy(
                self.evaluate_potential_targets_srv_name, EvaluateTargets)
            # Subscribe to Color Evaluation Topic to Get Results from Color Evaluation
            self.drone_color_evaluation_sub = rospy.Subscriber(
                self.drone_color_evaluation_topic, ColorEvaluationArray,
                self.color_evaluation_cb)
            # Parameters
            self.targets_color_evaluated = False  # Set True Once Color Evaluation of Targets Completed
            self.color_evaluation = []  # Holds the Color Evaluation of Targets
            self.corner_evaluation = [
            ]  # Holds the Number of Corners Near Each Target
 def __init__(self, selection_method):
     self.goals_position = []
     self.goals_value = []
     self.omega = 0.0
     self.radius = 0
     self.method = selection_method
     self.previous_target = []
     self.brush = Brushfires()
     self.topo = Topology()
     self.path_planning = PathPlanning()
     self.previous_target.append(50)
     self.previous_target.append(50)
     self.node2_index_x = 0
     self.node2_index_y = 0
     self.sonar = SonarDataAggregator()
     self.timeout_happened = 0
Example #11
0
import cv2
import numpy as np
from realsense.view import FieldView
from path_planning import PathPlanning

def f(x):
    return x - 1250

planner = PathPlanning(send=False)
size = 720, 1280, 3
v = FieldView()
window_name = 'main'
cv2.namedWindow(window_name)
cv2.createTrackbar('table_1', window_name, f(1250), f(3750), int)
cv2.createTrackbar('table_2', window_name, f(1250), f(3750), int)
cv2.createTrackbar('table_3', window_name, f(1250), f(3750), int)
cv2.createTrackbar('table1_result', window_name, 0, 1, int)
cv2.createTrackbar('table2_result', window_name, 0, 1, int)
cv2.createTrackbar('table3_result', window_name, 0, 1, int)
cv2.createTrackbar('zone', window_name, 0, 1, int)
cv2.setTrackbarPos('table_1', window_name, f(2500))
cv2.setTrackbarPos('table_2', window_name, f(2500))
cv2.setTrackbarPos('table_3', window_name, f(2500))


while True:
    pos1 = cv2.getTrackbarPos('table_1', window_name) + 1250
    pos2 = cv2.getTrackbarPos('table_2', window_name) + 1250
    pos3 = cv2.getTrackbarPos('table_3', window_name) + 1250
    results = [
        cv2.getTrackbarPos('table1_result', window_name),
Example #12
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.time_limit = 20 # seconds

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

        # Timer Thread (initialization)
        self.timerThread = TimerThread(self.time_limit)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        # Check if timer has expired
        if self.timerThread.expired == True:
          Print.art_print('\n~~~~ Time reset ~~~~',Print.RED)
          self.inner_target_exists = False
          self.target_exists = False
          return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        theta_robot = self.robot_perception.robot_pose['th']

        # Clear achieved targets
        self.subtargets = self.subtargets[self.next_subtarget:]
        self.next_subtarget = 0

        # Find the distance between the robot pose and all remaining subtargets
        dx = [rx - st[0] for st in self.subtargets]
        dy = [ry - st[1] for st in self.subtargets]
        dist = [math.hypot(v[0], v[1]) for v in zip(dx, dy)]

        # Try to optimize path only when there is more than one target available
        if len(self.subtargets) > 1:
          ogm = self.robot_perception.getMap()

          theta_goals = np.arctan2(-np.array(dy),-np.array(dx))
          dtheta = theta_goals - theta_robot
          dtheta[dtheta > np.pi] -= 2*np.pi
          dtheta[dtheta < -np.pi] += 2*np.pi

          # Check if there are obstacles between robot and subtarget
          obstacles_subtarget = []
          for st,i in zip(self.subtargets, range(len(self.subtargets))):
            # Find line in pixels between robot and goal
            line_pxls = list(bresenham(int(round(st[0])), int(round(st[1])),\
                                       int(round(rx)), int(round(ry))))
            # Find if there are any obstacles in line
            ogm_line = list(map(lambda pxl: ogm[pxl[0], pxl[1]], line_pxls))
            N_occupied = len(list(filter(lambda x: x > 80, ogm_line)))
            obstacles_subtarget.append(N_occupied)

          # Check if one of next goals is aligned with the current robot theta
          # In this case plan immediately for this goal in order to improve motion behaviour
          for st, i in zip(self.subtargets, range(len(self.subtargets))):
            if np.fabs(dtheta[i]) < np.pi/10 and obstacles_subtarget[i] == 0:
              self.next_subtarget = i
              # Keep resetting  timer, while moving towards target (without any obstacles inbetween)
              self.timerThread.reset()

        # Check if any target is in distance
        min_dist, min_idx = min(zip(dist, range(len(dist))))

        if min_dist < 5:
          self.next_subtarget = min_idx + 1
          # Reset timer if a goal has been reached
          self.timerThread.reset()
          # Check if the final subtarget has been approached
          if self.next_subtarget >= len(self.subtargets):
            self.target_exists = False

        # Publish the current target
        if self.next_subtarget >= len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):

        # Cancel previous goal timer
        self.timerThread.stop()

        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
          Print.art_print("Navigation: No map yet", Print.RED)
          return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False

        # Potential Target
        path_altered = False
        previous_target = None

        while len(self.path) == 0:
          start = time.time()
          target = self.target_selection.selectTarget(\
                    local_ogm,\
                    local_coverage,\
                    self.robot_perception.robot_pose,
                    self.robot_perception.origin,
                    self.robot_perception.resolution,
                    force_random)

          # ipdb.set_trace()
          # Check if there was any path alteration
          if target[0] == True:
              previous_target = target[2]
              target = target[1]
              path_altered = True
          else:
              target = target[1]

          self.path = self.path_planning.createPath(\
              g_robot_pose,\
              target,
              self.robot_perception.resolution)
          print "Navigation: Path for target found with " + str(len(self.path)) +\
              " points"
          if len(self.path) == 0:
            Print.art_print(\
                "Path planning failed. Fallback to random target selection", \
                Print.RED)
            force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        # Start timer thread
        self.timerThread.start()

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ps.pose.position.x = 0
          ps.pose.position.y = 0

          ######################### NOTE: QUESTION  ##############################
          # Fill the ps.pose.position values to show the path in RViz
          # You must understand what self.robot_perception.resolution
          # and self.robot_perception.origin are.
          # ipdb.set_trace()
          ps.pose.position.x = p[0] * self.robot_perception.resolution \
              + self.robot_perception.origin['x']
          ps.pose.position.y = p[1] * self.robot_perception.resolution \
              + self.robot_perception.origin['y']
          ########################################################################

          ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
          subt = [
            s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
          subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        # Print Old and Current Final Target
        if path_altered == True:
            subt = [
            previous_target[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            previous_target[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
            # ipdb.set_trace()

            RvizHandler.printMarker(\
                [subt],\
                3, # Type: Sphere
                0, # Action: Add
                "map", # Frame
                "art_subtargets", # Namespace
                [1.0, 0.0, 0.0, 0.8], # Color RGBA
                0.2 # Scale
            )

        # ipdb.set_trace()
        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']

        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required
        l_max = 0.3
        a_max = 0.3

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
          st_x = self.subtargets[self.next_subtarget][0]
          st_y = self.subtargets[self.next_subtarget][1]

          theta_rg = np.arctan2(st_y - ry, st_x - rx)
          dtheta = (theta_rg - theta)
          if dtheta > np.pi:
            omega = (dtheta - 2*np.pi)/np.pi
          elif dtheta < -np.pi:
            omega = (dtheta + 2*np.pi)/np.pi
          else:
            omega = (theta_rg - theta)/np.pi

          # Nonlinear relations derived from experimentation
          linear = l_max * ((1 - np.abs(omega)) ** 5)
          angular = a_max * np.sign(omega) * (abs(omega) ** (1/5))
        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.

        # If the robot is in the starting point it immediately sets the next subtarget
        if self.next_subtarget == 0:
            self.next_subtarget += 1
            self.counter_to_next_sub = self.count_limit
        else:
            # Check if there is a later subtarget, closer than the next one
            # If one is found, make it the next subtarget and update the time
            for i in range(self.next_subtarget + 1, len(self.subtargets) - 1):
                # Find the distance between the robot pose and the later subtarget
                dist_from_later = math.hypot(\
                    rx - self.subtargets[i][0], \
                    ry - self.subtargets[i][1])
                if dist_from_later < 15:
                    self.next_subtarget = i
                    self.counter_to_next_sub = self.count_limit + 100
                    dist = dist_from_later
                    break

            # If distance from subtarget is less than 5, go to the next one
            if dist < 5:
                self.next_subtarget += 1
                self.counter_to_next_sub = self.count_limit
                # Check if the final subtarget has been approached
                if self.next_subtarget == len(self.subtargets):
                    self.target_exists = False
        ########################################################################

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(\
                      local_ogm,\
                      local_coverage,\
                      self.robot_perception.robot_pose,
                      self.robot_perception.origin,
                      self.robot_perception.resolution,
                      force_random)

            self.path = self.path_planning.createPath(\
                g_robot_pose,\
                target,
                self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) +\
                " points"
            if len(self.path) == 0:
                Print.art_print(\
                    "Path planning failed. Fallback to random target selection", \
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration

        weight_data = 0.5  # How much weight to update the data (a)
        weight_smooth = 0.1  # How much weight to smooth the coordinates (b)
        min_change = 0.0001  # Minimum change per iteration to keep iterating
        new_path = np.copy(np.array(self.path))
        path_length = len(self.path[0])
        change = min_change

        while change >= min_change:
            change = 0.0
            for i in range(1, len(new_path) - 1):
                for j in range(path_length):
                    # Initialize x, y
                    x_i = self.path[i][j]
                    y_i = new_path[i][j]
                    y_prev = new_path[i - 1][j]
                    y_next = new_path[i + 1][j]

                    y_i_saved = y_i

                    # Minimize the distance between coordinates of the original
                    # path (y) and the smoothed path (x). Also minimize the
                    # difference between the coordinates of the smoothed path
                    # at time step i, and neighboring time steps. In order to do
                    # all the minimizations, we use Gradient Ascent.
                    y_i += weight_data * (x_i - y_i) + weight_smooth * (
                        y_next + y_prev - (2 * y_i))
                    new_path[i][j] = y_i

                    change += abs(y_i - y_i_saved)

        self.path = new_path
        ########################################################################

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ps.pose.position.x = 0
            ps.pose.position.y = 0
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            # Multiply subgoal's coordinates with resolution and
            # add robot.origin in order to translate between the (0,0) of the robot
            # pose and (0,0) of the map
            ps.pose.position.x = p[0] * self.robot_perception.resolution + \
                     self.robot_perception.origin['x']
            ps.pose.position.y = p[1] * self.robot_perception.resolution + \
                     self.robot_perception.origin['y']
            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
              s[0] * self.robot_perception.resolution + \
                      self.robot_perception.origin['x'],
              s[1] * self.robot_perception.resolution + \
                      self.robot_perception.origin['y']
              ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']

        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]
            ######################### NOTE: QUESTION  ##############################
            # angular and linear velocities are calculated upon the
            # math types presented at 9.exploration_target_selection.pdf
            dtheta = math.degrees(math.atan2(st_y - ry,
                                             st_x - rx)) - math.degrees(theta)

            if dtheta >= 0 and dtheta < 180:
                angular = dtheta / 180
            elif dtheta > 0 and dtheta >= 180:
                angular = (dtheta - 2 * 180) / 180
            elif dtheta <= 0 and dtheta > -180:
                angular = dtheta / 180
            elif dtheta < 0 and dtheta < -180:
                angular = (dtheta + 2 * 180) / 180
            #should be angular*0.3 but then the robot moves very slow
            #and cannot reach the targets within the time set from the timer
            if angular > 0.3:
                angular = 0.3
            elif angular < -0.3:
                angular = -0.3

            # **20 is used to avoid overshoot problem
            linear = ((1 - abs(angular))**20) * 0.3

        return [linear, angular]
Example #14
0
class TargetSelection:

    # Constructor
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()


    def selectTarget(self, init_ogm, coverage, robot_pose, origin, \
        resolution, force_random = False):

        target = [-1, -1]

        ######################### NOTE: QUESTION  ##############################
        # Implement a smart way to select the next target. You have the
        # following tools: ogm_limits, Brushfire field, OGM skeleton,
        # topological nodes.

        # Find only the useful boundaries of OGM. Only there calculations
        # have meaning
        ogm_limits = OgmOperations.findUsefulBoundaries(
            init_ogm, origin, resolution)

        # Blur the OGM to erase discontinuities due to laser rays
        ogm = OgmOperations.blurUnoccupiedOgm(init_ogm, ogm_limits)

        # Calculate Brushfire field
        tinit = time.time()
        brush = self.brush.obstaclesBrushfireCffi(ogm, ogm_limits)
        Print.art_print("Brush time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Calculate skeletonization
        tinit = time.time()
        skeleton = self.topo.skeletonizationCffi(ogm, \
                   origin, resolution, ogm_limits)
        Print.art_print("Skeletonization time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Find topological graph
        tinit = time.time()
        nodes = self.topo.topologicalNodes(ogm, skeleton, coverage, origin, \
                resolution, brush, ogm_limits)
        Print.art_print("Topo nodes time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Visualization of topological nodes
        vis_nodes = []
        for n in nodes:
            vis_nodes.append([
                n[0] * resolution + origin['x'],
                n[1] * resolution + origin['y']
            ])
        RvizHandler.printMarker(\
            vis_nodes,\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_topological_nodes", # Namespace
            [0.3, 0.4, 0.7, 0.5], # Color RGBA
            0.1 # Scale
        )

        # Check at autonomous_expl.yaml if target_selector = 'random' or 'smart'
        if self.method == 'random' or force_random == True:
            target = self.selectRandomTarget(ogm, coverage, brush, ogm_limits)
        elif self.method == 'smart' and force_random == False:
            tinit = time.time()
            # Get the robot pose in pixels
            [rx, ry] = [\
                robot_pose['x_px'] - \
                        origin['x'] / resolution,\
                robot_pose['y_px'] - \
                        origin['y'] / resolution\
                        ]
            g_robot_pose = [rx, ry]
            # If nodes > 25 the calculation time-cost is very big
            # In order to avoid time-reset we perform sampling on
            # the nodes list and take a half-sized sample
            for i in range(0, len(nodes)):
                nodes[i].append(i)
            if (len(nodes) > 25):
                nodes = random.sample(nodes, int(len(nodes) / 2))

            # Initialize weigths
            w_dist = [0] * len(nodes)
            w_rot = [robot_pose['th']] * len(nodes)
            w_cov = [0] * len(nodes)
            w_topo = [0] * len(nodes)
            # Calculate weights for every node in the topological graph
            for i in range(0, len(nodes)):
                # If path planning fails then give extreme values to weights
                path = self.path_planning.createPath(g_robot_pose, nodes[i],
                                                     resolution)
                if not path:
                    w_dist[i] = sys.maxsize
                    w_rot[i] = sys.maxsize
                    w_cov[i] = sys.maxsize
                    w_topo[i] = sys.maxsize
                else:
                    for j in range(1, len(path)):
                        # Distance cost
                        w_dist[i] += math.hypot(path[j][0] - path[j - 1][0],
                                                path[j][1] - path[j - 1][1])
                        # Rotational cost
                        w_rot[i] += abs(
                            math.atan2(path[j][0] - path[j - 1][0],
                                       path[j][1] - path[j - 1][1]))
                        # Coverage cost
                        w_cov[i] += coverage[int(path[j][0])][int(
                            path[j][1])] / (len(path))
                    # Add the coverage cost of 0-th node of the path
                    w_cov[i] += coverage[int(path[0][0])][int(
                        path[0][1])] / (len(path))
                    # Topological cost
                    # This metric depicts wether the target node
                    # is placed in open space or near an obstacle
                    # We want the metric to be reliable so we also check node's neighbour cells
                    w_topo[i] += brush[nodes[i][0]][nodes[i][1]]
                    w_topo[i] += brush[nodes[i][0] - 1][nodes[i][1]]
                    w_topo[i] += brush[nodes[i][0] + 1][nodes[i][1]]
                    w_topo[i] += brush[nodes[i][0]][nodes[i][-1]]
                    w_topo[i] += brush[nodes[i][0]][nodes[i][+1]]
                    w_topo[i] += brush[nodes[i][0] - 1][nodes[i][1] - 1]
                    w_topo[i] += brush[nodes[i][0] + 1][nodes[i][1] + 1]
                    w_topo[i] = w_topo[i] / 7

            # Normalize weights between 0-1
            for i in range(0, len(nodes)):
                w_dist[i] = 1 - (w_dist[i] - min(w_dist)) / (max(w_dist) -
                                                             min(w_dist))
                w_rot[i] = 1 - (w_rot[i] - min(w_rot)) / (max(w_rot) -
                                                          min(w_rot))
                w_cov[i] = 1 - (w_cov[i] - min(w_cov)) / (max(w_cov) -
                                                          min(w_cov))
                w_topo[i] = 1 - (w_topo[i] - min(w_topo)) / (max(w_topo) -
                                                             min(w_topo))

            # Set cost values
            # We set each cost's priority based on experimental results
            # from "Cost-Based Target Selection Techniques Towards Full Space
            # Exploration and Coverage for USAR Applications
            # in a Priori Unknown Environments" publication
            C1 = w_topo
            C2 = w_dist
            C3 = [1] * len(nodes)
            for i in range(0, len(nodes)):
                C3[i] -= w_cov[i]
            C4 = w_rot
            # Priority Weight
            C_PW = [0] * len(nodes)
            # Smoothing Factor
            C_SF = [0] * len(nodes)
            # Target's Final Priority
            C_FP = [0] * len(nodes)
            for i in range(0, len(nodes)):
                C_PW[i] = 2**3 * (1 - C1[i]) / .5 + 2**2 * (
                    1 - C2[i]) / .5 + 2**1 * (1 - C3[i]) / .5 + 2**0 * (
                        1 - C4[i]) / .5
                C_SF[i] = (2**3 * (1 - C1[i]) + 2**2 * (1 - C2[i]) + 2**1 *
                           (1 - C3[i]) + 2**0 * (1 - C4[i])) / (2**4 - 1)
                C_FP[i] = C_PW[i] * C_SF[i]

            # Select the node with the smallest C_FP value
            val, idx = min((val, idx) for (idx, val) in enumerate(C_FP))
            Print.art_print(
                "Select smart target time: " + str(time.time() - tinit),
                Print.BLUE)
            Print.art_print("Selected Target - Node: " + str(nodes[idx][2]),
                            Print.BLUE)
            target = nodes[idx]
        else:
            Print.art_print(
                "[ERROR] target_selector at autonomous_expl.yaml must be either 'random' or 'smart'",
                Print.RED)
        ########################################################################

        return target

    def selectRandomTarget(self, ogm, coverage, brushogm, ogm_limits):
        # The next target in pixels
        tinit = time.time()
        next_target = [0, 0]
        found = False
        while not found:
            x_rand = random.randint(0, ogm.shape[0] - 1)
            y_rand = random.randint(0, ogm.shape[1] - 1)
            if ogm[x_rand][y_rand] < 50 and coverage[x_rand][y_rand] < 50 and \
                brushogm[x_rand][y_rand] > 5:
                next_target = [x_rand, y_rand]
                found = True
        Print.art_print("Select random target time: " + str(time.time() - tinit), \
            Print.ORANGE)
        return next_target
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.target_selection = TargetSelection()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)


    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # YOUR CODE HERE ------------------------------------------------------
        # Here the next subtarget is checked for proximity. If the robot is too
        # close to the subtarget it is considered approached and the next
        # subtarget is assigned as next. However there are cases where the
        # robot misses one subtarget due to obstacle detection. Enhance the below
        # functionality by checking all the subgoals (and the final goal) for
        # proximity and assign the proper next subgoal

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        # Check if distance is less than 7 px (14 cm)
        if dist < 7:
          print "Sub target reached!"
          self.next_subtarget += 1

          # Check if the final subtarget has been approached
          if self.next_subtarget == len(self.subtargets):
            print "Final goal reached!"
            self.target_exists = False

        # ---------------------------------------------------------------------

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return
        st = Marker()
        st.header.frame_id = "map"
        st.ns = 'as_curr_namespace'
        st.id = 0
        st.header.stamp = rospy.Time(0)
        st.type = 1 # arrow
        st.action = 0 # add
        st.pose.position.x = self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x']
        st.pose.position.y = self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']

        st.color.r = 0
        st.color.g = 0
        st.color.b = 0.8
        st.color.a = 0.8
        st.scale.x = 0.2
        st.scale.y = 0.2
        st.scale.z = 0.2

        self.current_target_publisher.publish(st)

    # Function that seelects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
          return

        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Manually update the coverage field
        self.robot_perception.updateCoverage()
        print "Navigation: Coverage updated"
      
        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
  
        # Call the target selection function to select the next best goal
        target = self.target_selection.selectTarget(\
            local_ogm,\
            local_coverage,\
            self.robot_perception.robot_pose)
        print "Navigation: New target: " + str(target)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
            [self.robot_perception.robot_pose['x_px'],\
            self.robot_perception.robot_pose['y_px']])

        # Need to reverse the path??
        self.path = self.path_planning.createPath(\
            local_ogm,\
            g_robot_pose,\
            target)
        print "Navigation: Path for target found with " + str(len(self.path)) +\
            " points"
        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 10 pixels (0.5m)
        step = 10
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ps.pose.position.x = p[0] * self.robot_perception.resolution + \
                  self.robot_perception.origin['x']
          ps.pose.position.y = p[1] * self.robot_perception.resolution + \
                  self.robot_perception.origin['y']
          ros_path.poses.append(ps)

        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        ros_subgoals = MarkerArray()
        count = 0
        for s in self.subtargets:
            st = Marker()
            st.header.frame_id = "map"
            st.ns = 'as_namespace'
            st.id = count
            st.header.stamp = rospy.Time(0)
            st.type = 2 # sphere
            st.action = 0 # add
            st.pose.position.x = s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x']
            st.pose.position.y = s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']

            st.color.r = 0
            st.color.g = 0.8
            st.color.b = 0
            st.color.a = 0.8
            st.scale.x = 0.2
            st.scale.y = 0.2
            st.scale.z = 0.2
            ros_subgoals.markers.append(st)
            count += 1

        self.subtargets_publisher.publish(ros_subgoals)

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):
        
        [linear, angular] = [0, 0]

        # YOUR CODE HERE ------------------------------------------------------
        # The velocities of the robot regarding the next subtarget should be 
        # computed. The known parameters are the robot pose [x,y,th] from 
        # robot_perception and the next_subtarget [x,y]. From these, you can 
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        # ---------------------------------------------------------------------

        return [linear, angular]
Example #16
0
import cv2
import numpy as np
from realsense.view import FieldView
from path_planning import PathPlanning


def f(x):
    return x - 1250


planner = PathPlanning(send=False)
size = 720, 1280, 3
v = FieldView()
window_name = 'main'
cv2.namedWindow(window_name)
cv2.createTrackbar('table_1', window_name, f(1250), f(3750), int)
cv2.createTrackbar('table_2', window_name, f(1250), f(3750), int)
cv2.createTrackbar('table_3', window_name, f(1250), f(3750), int)
cv2.createTrackbar('zone', window_name, 0, 1, int)
cv2.setTrackbarPos('table_1', window_name, f(2500))
cv2.setTrackbarPos('table_2', window_name, f(2500))
cv2.setTrackbarPos('table_3', window_name, f(2500))

while True:
    pos1 = cv2.getTrackbarPos('table_1', window_name) + 1250
    pos2 = cv2.getTrackbarPos('table_2', window_name) + 1250
    pos3 = cv2.getTrackbarPos('table_3', window_name) + 1250
    zone = cv2.getTrackbarPos('zone', window_name)
    move_table_pos = (pos1, pos2, pos3)
    v.zone = zone
    points, flip_points = planner.main((pos1, pos2, pos3, zone))
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200 # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
          Print.art_print('\n~~~~ Time reset ~~~~',Print.RED)
          self.inner_target_exists = False
          self.target_exists = False
          return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.

        # Challenge 5 

        # Instead of checking the distance of the next target,
        # check the distance of the remaining targets, therefore you may reach to a next subject
        # bypassing current.

        for i in range(self.next_subtarget, len(self.subtargets)):
            # Find the distance between the robot pose and the next subtarget
            dist = math.hypot( rx - self.subtargets[i][0], ry - self.subtargets[i][1])

            # check distance with the i_th target
            if i != (len(self.subtargets)-1):
                if dist < 10:        #if distance found to be small from a terget set as next the target i + 1
                    self.next_subtarget = i + 1
                    self.counter_to_next_sub = self.count_limit
            else:
                if dist < 5:        #if distance found to be small from a terget set as next the target i + 1
                    self.next_subtarget = i + 1
                    self.counter_to_next_sub = self.count_limit

            # Check if the final subtarget has been approached
            if self.next_subtarget == len(self.subtargets):
                self.target_exists = False
        ########################################################################

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
          Print.art_print("Navigation: No map yet", Print.RED)
          return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
          start = time.time()
          target = self.target_selection.selectTarget(\
                    local_ogm,\
                    local_coverage,\
                    self.robot_perception.robot_pose,
                    self.robot_perception.origin,
                    self.robot_perception.resolution,
                    force_random)

          self.path = self.path_planning.createPath(\
              g_robot_pose,\
              target,
              self.robot_perception.resolution)
          print "Navigation: Path for target found with " + str(len(self.path)) +\
              " points"
          if len(self.path) == 0:
            Print.art_print(\
                "Path planning failed. Fallback to random target selection", \
                Print.RED)
            force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        ###################################################
        # Extra Challenge 1
        # Smooth path
        if len(self.path) > 3:
            x = np.array(self.path)
            y = np.copy(x)
            a = 0.5
            b = 0.1

            epsilon = np.sum(np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b * (y[2:, :] - 2*y[1:-1, :] + y[:-2, :])))

            while epsilon > 1e-3:
                y[1:-1, :] += a * (x[1:-1, :] - y[1:-1, :]) + b * (y[2:, :] - 2*y[1:-1, :] + y[:-2, :])

                epsilon = np.sum(np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b * (y[2:, :] - 2*y[1:-1, :] + y[:-2, :])))

            # Copy the smoother path
            self.path = y.tolist()
        ####################################################

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ps.pose.position.x = 0
          ps.pose.position.y = 0
          ######################### NOTE: QUESTION  ##############################
          # Fill the ps.pose.position values to show the path in RViz
          # You must understand what self.robot_perception.resolution
          # and self.robot_perception.origin are.

          # Challenge 2:

          ps.pose.position.x = p[0] * self.robot_perception.resolution + self.robot_perception.origin['x']
          ps.pose.position.y = p[1] * self.robot_perception.resolution + self.robot_perception.origin['y']

          ########################################################################
          ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
          subt = [
            s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
          subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        # Challenge 3

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            # We know goals position (x', y') and robot's position (x,y)
            # It is tan(Theta_RG) = (y'-y)/(x'-x) = lamda
            # Theta_RG = atan(lamda)
            # Theta_RG is the angle between vector RobotGoal and Ox axis
            Theta_RG = math.atan2(st_y - ry, st_x - rx)     # atan2 return the result in rads

            # We can calculate the angle between the direction of robot's movement
            # and the RG vector. The angle will be the difference of RGangle - theta(direction angle of robot's movement)
            D_Theta = Theta_RG - theta

            # Based on presentation 9, D_Theta
            # has to be readjusted in [-pi, pi]
            if (D_Theta < math.pi) and (D_Theta > -math.pi):
                omega = D_Theta / math.pi
            elif D_Theta >= math.pi:
                omega = (D_Theta - 2 * math.pi) / math.pi
            elif D_Theta <= -math.pi:
                omega = (D_Theta + 2 * math.pi) / math.pi

            # Linear Speed Calculation
            # presentation 9: u = umax(1- |omega|)^n
            # larger n -> lower speed
            # max speed = 0.3
            u = ((1 - abs(omega))**(6.00))
            linear = 0.3 * u

            # Robot is steering slowly
            # therefore we had to make steer faster
            # max speed = 0.3
            omega = (math.copysign(abs(omega)**(1.0/6), omega))
            angular = 0.3 * omega
        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
Example #18
0
class TargetSelection:

    # Constructor
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()

        # Initialize previous target
        self.previous_target = [-1, -1]

    def selectTarget(self, init_ogm, ros_ogm, coverage, robot_pose, origin, \
        resolution, force_random = False):

        target = [-1, -1]

        ######################### NOTE: QUESTION  ##############################
        # Implement a smart way to select the next target. You have the
        # following tools: ogm_limits, Brushfire field, OGM skeleton,
        # topological nodes.

        # Find only the useful boundaries of OGM. Only there calculations
        # have meaning
        ogm_limits = OgmOperations.findUsefulBoundaries(
            init_ogm, origin, resolution)

        # Blur the OGM to erase discontinuities due to laser rays
        ogm = OgmOperations.blurUnoccupiedOgm(init_ogm, ogm_limits)

        # Calculate Brushfire field
        tinit = time.time()
        brush = self.brush.obstaclesBrushfireCffi(ogm, ogm_limits)
        Print.art_print("Brush time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Calculate skeletonization
        tinit = time.time()
        skeleton = self.topo.skeletonizationCffi(ogm, \
                   origin, resolution, ogm_limits)
        Print.art_print("Skeletonization time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Find topological graph
        tinit = time.time()
        nodes = self.topo.topologicalNodes(ogm, skeleton, coverage, origin, \
                resolution, brush, ogm_limits)
        Print.art_print("Topo nodes time: " + str(time.time() - tinit),
                        Print.ORANGE)

        # Visualization of topological nodes
        vis_nodes = []
        for n in nodes:
            vis_nodes.append([
                n[0] * resolution + origin['x'],
                n[1] * resolution + origin['y']
            ])
        RvizHandler.printMarker(\
            vis_nodes,\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_topological_nodes", # Namespace
            [0.3, 0.4, 0.7, 0.5], # Color RGBA
            0.1 # Scale
        )
        # Random point
        if self.method == 'random' or force_random == True:
            target = self.selectRandomTarget(ogm, coverage, brush, ogm_limits)
        # Custom point
        elif self.method == 'cost_based':
            self.path_planning.setMap(ros_ogm)
            g_robot_pose = [robot_pose['x_px'] - int(origin['x'] / resolution),\
                            robot_pose['y_px'] - int(origin['y'] / resolution)]

            # Remove all covered nodes from the candidate list
            nodes = np.array(nodes)
            uncovered_idxs = np.array(
                [coverage[n[0], n[1]] == 0 for n in nodes])
            goals = nodes[uncovered_idxs]

            # Initialize costs
            w_dist = np.full(len(goals), np.inf)
            w_turn = np.full(len(goals), np.inf)
            w_topo = np.full(len(goals), np.inf)
            w_cove = np.full(len(goals), np.inf)

            for idx, node in zip(range(len(goals)), goals):
                subgoals = np.array(
                    self.path_planning.createPath(g_robot_pose, node,
                                                  resolution))

                # If path is empty then skip to the next iteration
                if subgoals.size == 0:
                    continue

                # subgoals should contain the robot pose, so we don't need to diff it explicitly
                subgoal_vectors = np.diff(subgoals, axis=0)

                # Distance cost
                dists = [math.hypot(v[0], v[1]) for v in subgoal_vectors]
                w_dist[idx] = np.sum(dists)

                # Turning cost
                w_turn[idx] = 0
                theta = robot_pose['th']

                for v in subgoal_vectors[:-1]:
                    st_x, st_y = v
                    theta2 = math.atan2(st_y - g_robot_pose[1],
                                        st_x - g_robot_pose[0])
                    w_turn[idx] += abs(theta2 - theta)
                    theta = theta2

                # Coverage cost
                w_cove[idx] = sum(coverage[x][y] for x, y in subgoal_vectors)

                # Topology cost
                w_topo[idx] = brush[node[0], node[1]]

            # Normalize weights
            w_dist = (w_dist - min(w_dist)) / (max(w_dist) - min(w_dist))
            w_turn = (w_turn - min(w_turn)) / (max(w_turn) - min(w_turn))
            w_cove = (w_cove - min(w_cove)) / (max(w_cove) - min(w_cove))
            w_topo = (w_topo - min(w_topo)) / (max(w_topo) - min(w_topo))

            # Cost weights
            c_topo = 4
            c_dist = 3
            c_cove = 2
            c_turn = 1

            # Calculate combination cost (final)
            cost = c_dist * w_dist + c_turn * w_turn + c_cove * w_cove + c_topo * w_topo
            min_dist, min_idx = min(zip(cost, range(len(cost))))
            target = nodes[min_idx]

            # Check if next target exists and if it exists, check if is close to previous
            if target is None:
                target = self.selectRandomTarget(ogm, coverage, brush,
                                                 ogm_limits)
            else:
                target_dist = math.hypot(target[0] - self.previous_target[0],
                                         target[1] - self.previous_target[1])
                if target_dist <= 5:
                    target = self.selectRandomTarget(ogm, coverage, brush,
                                                     ogm_limits)
        ########################################################################
        self.previous_target = target
        return target

    def selectRandomTarget(self, ogm, coverage, brushogm, ogm_limits):
        # The next target in pixels
        tinit = time.time()
        next_target = [0, 0]
        found = False
        while not found:
            x_rand = random.randint(0, ogm.shape[0] - 1)
            y_rand = random.randint(0, ogm.shape[1] - 1)
            if ogm[x_rand][y_rand] < 50 and coverage[x_rand][y_rand] < 50 and \
                brushogm[x_rand][y_rand] > 5:
                next_target = [x_rand, y_rand]
                found = True
        Print.art_print("Select random target time: " + str(time.time() - tinit), \
            Print.ORANGE)
        return next_target
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False
        
        self.TimeOut = False
        self.Reset = False
        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 230 # 23 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.1), self.checkTarget)
        
        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        
        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        
        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)
        
    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        self.counter_to_next_sub -= 1
        
        if self.counter_to_next_sub == 0:
          Print.art_print('\n~~~~ Time reset ~~~~',Print.RED) 
          self.inner_target_exists = False
          self.target_exists = False
          if self.Reset == False:
			  self.TimeOut = True
          else:
			  self.Reset = False
          return
        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        
        
        # remove visited sub targets
        self.subtargets = self.subtargets[self.next_subtarget:]
        
        # set the next sub target as the first one that should be visited
        self.next_subtarget = 0
        
        dis_x = [rx - target[0] for target in self.subtargets]
        dis_y = [ry - target[1] for target in self.subtargets]
        
        #for i in range(0,len(dis_x)):
		#	print " x dis " + str(dis_x[i])
		#	print " y dis " + str(dis_y[i])
        
        # target distances
        dist_t = [math.hypot(dis[0],dis[1]) for dis in zip(dis_x,dis_y) ]
        
        # find the closest target
        min_target = 0
        min_dis = dist_t[0]
        for i in range(0,len(dist_t)):
			if min_dis > dist_t[i]:
				min_dis = dist_t[i]
				min_target = i
					
        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the 
        # next subtarget? Alter the code accordingly.
        # print " min target " + str(min_target) +  " min dis " + str(min_dis)
        # Check if distance is less than 7 px (14 cm)
        if min_dis < 5:
          self.next_subtarget = min_target + 1
          self.counter_to_next_sub = self.count_limit
          # print "REACHED"
          # print " next target " + str(self.next_subtarget) + " len " +str(len(self.subtargets))
          
          # Check if the final subtarget has been approached
          if self.next_subtarget == len(self.subtargets):
            self.target_exists = False
        ########################################################################
        
        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        
        
        while self.robot_perception.have_map == False:
          Print.art_print("Navigation: No map yet", Print.RED)
          return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True            
        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm) 

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])
        

        # Call the target selection function to select the next best goal
        # Choose target function
        
        print "TimeOut Flag " +str(self.TimeOut)
        self.path = []
        force_random = False
        
        while len(self.path) == 0:
          start = time.time()
          target = self.target_selection.selectTarget(\
                    local_ogm,\
                    local_coverage,\
                    self.robot_perception.robot_pose,
                    self.robot_perception.origin,
                    self.robot_perception.resolution,
                    force_random)
          
          self.path = self.path_planning.createPath(\
              g_robot_pose,\
              target,
              self.robot_perception.resolution)
          print "Navigation: Path for target found with " + str(len(self.path)) +\
              " points"
          if len(self.path) == 0:
            Print.art_print(\
                "Path planning failed. Fallback to random target selection", \
                Print.RED)
            force_random = True
        # Reverse the path to start from the robot
        self.path = self.path[::-1]
        
        tolerance = 0.000001
        weight_data = 0.5
        weight_smooth = 0.1
        new = deepcopy(self.path)
        dims = 2
        change = tolerance 
        
        # Path smoothing using gradient descent
        if len(self.path) > 3:
			print "PATH SMOOTHING"
			while change >= tolerance:
				change = 0.0
				for i in range(1, len(new) - 1):
					for j in range(dims):
						x_i = self.path[i][j]
						y_i, y_prev, y_next = new[i][j], new[i - 1][j], new[i + 1][j]
						y_i_saved = y_i
						y_i += weight_data * (x_i - y_i) + weight_smooth * (y_next + y_prev - (2 * y_i))
						new[i][j] = y_i
						change += abs(y_i - y_i_saved)
			self.path = new
			
					
        
        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"
        
        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ######################### NOTE: QUESTION  ##############################
          # Fill the ps.pose.position values to show the path in RViz
          # You must understand what self.robot_perception.resolution
          # and self.robot_perception.origin are.
          
          #p[0] * resolution => gives the path's point x-coordinate expressed in the map's coordinate system (meters)
          # As the path's(p) first point-set is the pixels that are occupied by the robot, multyplying it by resolution (meter/pixel)
          # gives the values of that point expressed in meters.
          # Adding the robot's initial position we have a path starting from the robot's position expressed in the robot's frame.
          ps.pose.position.x = p[0]*0.05 + self.robot_perception.origin['x']  
          ps.pose.position.y = p[1]*0.05 + self.robot_perception.origin['y']
          
        #  print " x val " + str(ps.pose.position.x)
        #  print " y  val " + str(ps.pose.position.y )
        #  print " origin x " + str(self.robot_perception.origin['x'])
        #  print " origin y " + str(self.robot_perception.origin['y'])
        #  print " p0 " +str(p[0])
        #  print " p1 " +str(p[1])
          ########################################################################
          ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
          subt = [
            s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
          subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):
        
        [linear, angular] = [0, 0]
        
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be 
        # computed. The known parameters are the robot pose [x,y,th] from 
        # robot_perception and the next_subtarget [x,y]. From these, you can 
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required
        # if there is another subtarget , acquire its position
        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            # align robot with the target
            theta_target = np.arctan2(st_y - ry, st_x - rx)
            delta_theta = (theta_target - theta)
            # find omega
            if delta_theta > np.pi:
                omega = (delta_theta - 2*np.pi)/np.pi
            elif delta_theta < -np.pi:
                omega = (delta_theta + 2*np.pi)/np.pi
            else:
                omega = delta_theta/np.pi
            # maximum magnitude for both velocities is 0.3
            linear = 0.3 * ((1 - np.abs(omega)) ** 5)
            angular = 0.3 * np.sign(omega)

        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200 # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
          return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
          Print.art_print('\n~~~~ Time reset ~~~~',Print.RED)
          self.inner_target_exists = False
          self.target_exists = False
          return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        if dist < 5:
          self.next_subtarget += 1
          self.counter_to_next_sub = self.count_limit
          # Check if the final subtarget has been approached
          if self.next_subtarget == len(self.subtargets):
            self.target_exists = False
        else:
          # Find the next subtarget with the minimum distance
          min = dist # minimum distance from a next subtarget
          jump_to_next_subtarget = self.next_subtarget # the number of the next subtarget to jump to
          next_check = self.next_subtarget + 1 # next subtarget to be checked
          for i in range(self.next_subtarget+1, len(self.subtargets)):
              temp_dist = math.hypot(\
                  rx - self.subtargets[next_check][0], \
                  ry - self.subtargets[next_check][1])
              if temp_dist < min:
                  min = temp_dist
                  jump_to_next_subtarget = i
              next_check += 1
          self.next_subtarget = jump_to_next_subtarget
          self.counter_to_next_sub = self.count_limit
        ########################################################################

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
          Print.art_print("Navigation: No map yet", Print.RED)
          return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
          start = time.time()
          target = self.target_selection.selectTarget(\
                    local_ogm,\
                    local_coverage,\
                    self.robot_perception.robot_pose,
                    self.robot_perception.origin,
                    self.robot_perception.resolution,
                    force_random)

          self.path = self.path_planning.createPath(\
              g_robot_pose,\
              target,
              self.robot_perception.resolution)
          print "Navigation: Path for target found with " + str(len(self.path)) +\
              " points"
          if len(self.path) == 0:
            Print.art_print(\
                "Path planning failed. Fallback to random target selection", \
                Print.RED)
            force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        #########################################
        # Extra challenge #1
        # A. Smooth path planner
        if len(self.path) > 3:
            x = np.array(self.path)
            y = np.copy(x)
            a = 0.5
            b = 0.1

            # FORMULA : y_i = y_i + a * (x_i - y_i) + b * (y_i+1 - 2 * y_i + y_i+1)

            epsilon = np.sum(np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b * (y[2:, :] - 2*y[1:-1, :] + y[:-2, :])))
            while epsilon > 1e-3:
                y[1:-1, :] += a * (x[1:-1, :] - y[1:-1, :]) + b * (y[2:, :] - 2*y[1:-1, :] + y[:-2, :])
                epsilon = np.sum(np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b * (y[2:, :] - 2*y[1:-1, :] + y[:-2, :])))

            # Copy the smoother path
            self.path = y.tolist()

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int) (len(self.path)/step)
        self.subtargets = []
        for i in range(0, n_subgoals):
          self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################


        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
          ps = PoseStamped()
          ps.header.frame_id = "map"
          ps.pose.position.x = 0
          ps.pose.position.y = 0
          ######################### NOTE: QUESTION  ##############################
          # Fill the ps.pose.position values to show the path in RViz
          # You must understand what self.robot_perception.resolution
          # and self.robot_perception.origin are.
          ps.pose.position.x = p[0] * self.robot_perception.resolution + self.robot_perception.origin['x']
          ps.pose.position.y = p[1] * self.robot_perception.resolution + self.robot_perception.origin['y']
          ########################################################################
          ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
          subt = [
            s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x'],
            s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ]
          subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th'] # robot orientation
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            PI = 3.14
            MAX_SPEED = 0.3

            st_x_rel = st_x - rx
            st_y_rel = st_y - ry
            st_th = math.atan2(st_y_rel,  st_x_rel)

            delta = st_th - theta

            if delta >= 0 and delta < PI:
                angular = delta / PI
            elif delta > 0 and delta >= PI:
                angular = (delta - 2 * PI) / PI
            elif delta <= 0 and delta > -PI:
                angular = delta / PI
            elif delta < 0 and delta < -PI:
                angular = (delta + 2 * PI) / PI

            angular *= 5    #add more twisting sensitivity
            angular *= MAX_SPEED
            if angular > 0.3:
                angular = 0.3
            elif angular < -0.3:
                angular = -0.3

            linear = ((1 - abs(angular)) ** 10 ) * MAX_SPEED
        ######################### NOTE: QUESTION  ##############################


        return [linear, angular]
Example #21
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        self.laser_aggregation = LaserDataAggregator()
        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is" + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        count = 1
        for i in range(
                self.next_subtarget,
                len(self.subtargets) - 2
        ):  # compare the slope of the points till the end in order to check if there are more than one points at the same line
            try:  # use try except to avoid zerodivisior error
                slope1 = (self.subtargets[i + 1][1] -
                          self.subtargets[i][1]) / (self.subtargets[i + 1][0] -
                                                    self.subtargets[i][0])
            except ZeroDivisionError:
                slope1 = float('Inf')
            try:
                slope2 = (self.subtargets[i + 2][1] - self.subtargets[i + 1][1]
                          ) / (self.subtargets[i + 2][0] -
                               self.subtargets[i + 1][0])
            except ZeroDivisionError:
                slope2 = float('Inf')

            if abs(slope1 - slope2) < 0.3 or (
                    slope1 == float('inf') and slope2 == float('inf')
            ):  # if difference in slopes is less than 0.3 then we assume that the points are in the same line
                count += 1
            else:
                break

        scan = self.laser_aggregation.laser_scan  # take scan measurements
        count_scan = 0
        for i in range(
                250, 450
        ):  # count scanlines with measure more than 2. The range has calculated after experiments.
            if scan[i] > 2:
                count_scan += 1

        if dist < 5:
            if count != 1:
                self.next_subtarget += count  # if there are more than one subtargets that belongs to the same line then the robot goes to the last target of the line
                self.counter_to_next_sub = self.count_limit + count * 100  # increase time limit proportionally to the count in order to avoid time reset
            elif count_scan > 10:  # check if obstacle closer than 4 cm exists, if obstacle does not exist then
                self.next_subtarget += 2  # increase subtarget by 2. This case happens when count = 1 in order to accelerate the process in case of not having subtargets in the same line
                self.counter_to_next_sub = self.count_limit + 300  # increase time limit in order to avoid time reset
                if self.next_subtarget >= len(
                        self.subtargets
                ):  # check if the final subtarget has been approached
                    self.next_subtarget = len(self.subtargets)
            else:
                self.next_subtarget += 1  # else if obstacle exists then increase subtarget by 1
                self.counter_to_next_sub = self.count_limit + 300  # increase time limit in order to avoid time reset
                if self.next_subtarget >= len(
                        self.subtargets
                ):  # check if the final subtarget has been approached
                    self.next_subtarget = len(self.subtargets)

            if self.next_subtarget == len(self.subtargets):
                self.target_exists = False
        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(\
                      local_ogm,\
                      local_coverage,\
                      self.robot_perception.robot_pose,
                      self.robot_perception.origin,
                      self.robot_perception.resolution,
                      force_random)

            self.path = self.path_planning.createPath(\
                g_robot_pose,\
                target,
                self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) +\
                " points"
            if len(self.path) == 0:
                Print.art_print(\
                    "Path planning failed. Fallback to random target selection", \
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ps.pose.position.x = 0
            ps.pose.position.y = 0
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            # p[0] and p[1] are the global coordinates so we multiply these values with resolution and add the origin
            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']
            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
              s[0] * self.robot_perception.resolution + \
                      self.robot_perception.origin['x'],
              s[1] * self.robot_perception.resolution + \
                      self.robot_perception.origin['y']
              ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required
        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            ######################### NOTE: QUESTION  ##############################
            # theta contains yaw angle, hence we convert it to x,y coordinates in order to find the angle in degrees
            x = math.cos(theta)
            y = math.sin(theta)
            deg = math.degrees(math.atan2(y, x))
            delta_theta = math.degrees(math.atan2(st_y - ry, st_x - rx)) - deg

            if delta_theta >= 0 and delta_theta < 180:
                angular = delta_theta / 180

            elif delta_theta > 0 and delta_theta >= 180:
                angular = (delta_theta - 2 * 180) / 180

            elif delta_theta <= 0 and delta_theta > -180:
                angular = delta_theta / 180

            elif delta_theta < 0 and delta_theta < -180:
                angular = (delta_theta + 2 * 180) / 180

            linear = (
                (1 - abs(angular))**25
            )  # power calculated after experiments and we use it in order to avoid overshoot problem
            linear = linear * 0.3  # use tanh to limit the range
            if angular > 0.3:  # angular = angular * 0.3. angular velocity should be multiplied by max angular velocity according to notes
                angular = 0.3
            elif angular < -0.3:
                angular = -0.3
                # but the result was much slower so we didn't multiply it by 0.3
        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
class Navigation:
    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print("The selected target function is " + self.target_selector)
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'),
                            Path, queue_size=10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),
                            MarkerArray, queue_size=10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),
                            Marker, queue_size=10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or \
                        self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [
            self.robot_perception.robot_pose['x_px'] -
            self.robot_perception.origin['x'] /
            self.robot_perception.resolution,
            self.robot_perception.robot_pose['y_px'] -
            self.robot_perception.origin['y'] /
            self.robot_perception.resolution
        ]

        # # Find the distance between the robot pose and the next subtarget
        # dist = math.hypot(
        #     rx - self.subtargets[self.next_subtarget][0],
        #     ry - self.subtargets[self.next_subtarget][1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        # if dist < 5:
        #     self.next_subtarget += 1
        #     self.counter_to_next_sub = self.count_limit
        #     # Check if the final subtarget has been approached
        #     if self.next_subtarget == len(self.subtargets):
        #         self.target_exists = False

        # find the distance between robot and ALL subtargets
        dists = (math.hypot(rx - self.subtargets[subtarget][0],
                            ry - self.subtargets[subtarget][1])
                 for subtarget, _ in enumerate(self.subtargets))

        # check the distance of each subtarget from the robot
        # go towards the nearest and change index of next_subtarget
        # accordingly
        for idx, dist in enumerate(dists):
            if dist < 5:
                self.next_subtarget = idx + 1
                self.counter_to_next_sub = self.count_limit
                # Check if the final subtarget has been approached
                if self.next_subtarget == len(self.subtargets):
                    self.target_exists = False
        ########################################################################

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [
            self.subtargets[self.next_subtarget][0] *
            self.robot_perception.resolution +
            self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1] *
            self.robot_perception.resolution +
            self.robot_perception.origin['y']
        ]

        RvizHandler.printMarker(
            [subtarget],
            1,  # Type: Arrow
            0,  # Action: Add
            "map",  # Frame
            "art_next_subtarget",  # Namespace
            [0, 0, 0.8, 0.8],  # Color RGBA
            0.2  # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while not self.robot_perception.have_map:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print("\nClearing all markers")
        RvizHandler.printMarker( \
            [[0, 0]], \
            1,  # Type: Arrow
            3,  # Action: delete all
            "map",  # Frame
            "null",  # Namespace
            [0, 0, 0, 0],  # Color RGBA
            0.1  # Scale
        )

        print('\n\n----------------------------------------------------------')
        print("Navigation: Producing new target")
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print("Got the map and Coverage")
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates([
            self.robot_perception.robot_pose['x_px'],
            self.robot_perception.robot_pose['y_px']
        ])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            print(self.robot_perception.resolution)
            target = self.target_selection.selectTarget(
                local_ogm, local_coverage, self.robot_perception.robot_pose,
                self.robot_perception.origin, self.robot_perception.resolution,
                force_random)

            self.path = self.path_planning.createPath(
                g_robot_pose, target, self.robot_perception.resolution)
            print("Navigation: Path for target found with " + str(len(self.path)) + \
                  " points")
            if len(self.path) == 0:
                Print.art_print(
                    "Path planning failed. Fallback to random target selection",
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print("The path produced " + str(len(self.subtargets)) + " subgoals")

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ps.pose.position.x = 0
            ps.pose.position.y = 0
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']

            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
                s[0] * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
                s[1] * self.robot_perception.resolution + \
                self.robot_perception.origin['y']
            ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(
            subtargets_mark,
            2,  # Type: Sphere
            0,  # Action: Add
            "map",  # Frame
            "art_subtargets",  # Namespace
            [0, 0.8, 0.0, 0.8],  # Color RGBA
            0.2  # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [
            self.robot_perception.robot_pose['x_px'] -
            self.robot_perception.origin['x'] /
            self.robot_perception.resolution,
            self.robot_perception.robot_pose['y_px'] -
            self.robot_perception.origin['y'] /
            self.robot_perception.resolution
        ]
        theta = self.robot_perception.robot_pose['th']

        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            # Find the distance between the robot and the next subtarget
            # dist = math.hypot(rx - st_x, ry - st_y)

            # phi is the angle between the robot and the next subtarget
            # we calculate that angle using atan2, which returns the angle
            # between a point [x,y] and the x'x axis, with origin on 0,0
            # So, we move the origin to the robots position by subtracting
            # its coordinates from a given point
            phi = math.atan2(st_y - ry, st_x - rx)

            if 3.2 >= (
                    phi - theta
            ) >= 0.1 and phi > theta:  # if phi is bigger than theta and their difference is
                # lower than pi (they are anti-parallel), turn left
                linear = 0
                angular = 1
            elif 3.2 >= (
                    theta - phi
            ) >= 0.1 and phi < theta:  # if phi is smaller than theta and their difference is
                # lower than pi (they are anti-parallel), turn right
                linear = 0
                angular = -1
            else:  # otherwise, move ahead
                linear = 1
                angular = 0

        ######################### NOTE: QUESTION  #############-#################

        return [linear, angular]
Example #23
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 500  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(
            rospy.get_param('path_pub_topic'), Path, queue_size=10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(
            rospy.get_param('subgoals_pub_topic'), MarkerArray, queue_size=10)

        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(
            rospy.get_param('curr_target_pub_topic'), Marker, queue_size=10)

        self.previous_next_subtarget = None

    def checkTarget(self, event):
        """
        Check if target/sub-target reached.
        :param event:
        :return:
        """
        if self.previous_next_subtarget is None or self.previous_next_subtarget < self.next_subtarget:
            Print.art_print("next_subtarget = %d" % self.next_subtarget,
                            Print.ORANGE)
            self.previous_next_subtarget = self.next_subtarget

        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False \
                or self.move_with_target == False \
                or self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in: pixel units & global (map's) coordinate system
        rp_l_px = [
            self.robot_perception.robot_pose['x_px'],
            self.robot_perception.robot_pose['y_px']
        ]
        # rp_g_px = self.robot_perception.toGlobalCoordinates(rp_l_px)
        [rx, ry] = [
            self.robot_perception.robot_pose['x_px'] -
            self.robot_perception.origin['x'] /
            self.robot_perception.resolution,
            self.robot_perception.robot_pose['y_px'] -
            self.robot_perception.origin['y'] /
            self.robot_perception.resolution
        ]
        rp_g_px = [rx, ry]

        # Find the distance between the robot pose and the next subtarget
        v_g_px = map(operator.sub, rp_g_px,
                     self.subtargets[self.next_subtarget])
        dist = math.hypot(v_g_px[0], v_g_px[1])

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)

        # if dist < 5:
        #     self.next_subtarget += 1
        #     self.counter_to_next_sub = self.count_limit
        #     # Check if the final subtarget has been approached
        #     if self.next_subtarget == len(self.subtargets):
        #         self.target_exists = False

        # Instead of checking only next sub-target check all remaining sub-targets (starting from end up to next
        # sub-target) and decide if robot is closer to another sub-target at later point in path
        for st_i in range(
                len(self.subtargets) - 1, self.next_subtarget - 1, -1):
            # @v_st_g_px: Difference of Global coordinates in Pixels between robot's position
            # and i-th" sub-target's position
            v_st_g_px = map(operator.sub, rp_g_px, self.subtargets[st_i])
            if math.hypot(v_st_g_px[0], v_st_g_px[1]) < 5:
                # reached @st_i, set next sub-target in path as robot's next sub-target
                self.next_subtarget = st_i + 1
                self.counter_to_next_sub = self.count_limit
                if self.next_subtarget == len(self.subtargets):
                    self.target_exists = False

        ########################################################################

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [
            self.subtargets[self.next_subtarget][0] *
            self.robot_perception.resolution +
            self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1] *
            self.robot_perception.resolution +
            self.robot_perception.origin['y']
        ]

        RvizHandler.printMarker(
            [subtarget],
            1,  # Type: Arrow
            0,  # Action: Add
            "map",  # Frame
            "art_next_subtarget",  # Namespace
            [0, 0, 0.8, 0.8],  # Color RGBA
            0.2  # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while not self.robot_perception.have_map:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(
            [[0, 0]],
            1,  # Type: Arrow
            3,  # Action: delete all
            "map",  # Frame
            "null",  # Namespace
            [0, 0, 0, 0],  # Color RGBA
            0.1  # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        # Get robot pose in global (map's) coordinates
        rp_l_px = [
            self.robot_perception.robot_pose['x_px'],
            self.robot_perception.robot_pose['y_px']
        ]
        rp_g_px = self.robot_perception.toGlobalCoordinates(
            rp_l_px, with_resolution=True)
        g_robot_pose = rp_g_px

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(
                local_ogm, local_coverage, self.robot_perception.robot_pose,
                self.robot_perception.origin, self.robot_perception.resolution,
                force_random)

            self.path = self.path_planning.createPath(
                g_robot_pose, target, self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) + \
                  " points"
            if len(self.path) == 0:
                Print.art_print(
                    "Path planning failed. Fallback to random target selection",
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to sub-goals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = int(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            # Convert p from pixel-units to meter
            p_m = self.robot_perception.toMeterUnits(p, False)

            # Convert p_m from global (map's) coordinates to relative (image's) coordinates (using resolution = 1)
            p_m_r = self.robot_perception.toRelativeCoordinates(p_m, False)

            # Fill in $ps object
            ps.pose.position.x = p_m_r[0]
            ps.pose.position.y = p_m_r[1]
            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            st_m = self.robot_perception.toMeterUnits(s, False)
            st_m_r = self.robot_perception.toRelativeCoordinates(st_m, False)
            subtargets_mark.append(st_m_r)

        RvizHandler.printMarker(
            subtargets_mark,
            2,  # Type: Sphere
            0,  # Action: Add
            "map",  # Frame
            "art_subtargets",  # Namespace
            [0, 0.8, 0.0, 0.8],  # Color RGBA
            0.2  # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        # Get global robot coordinates
        rp_l_px = [
            self.robot_perception.robot_pose['x_px'],
            self.robot_perception.robot_pose['y_px']
        ]
        rp_g_px = self.robot_perception.toGlobalCoordinates(rp_l_px)
        theta_r = self.robot_perception.robot_pose['th']
        theta_r_deg = math.degrees(theta_r)
        if theta_r_deg < 0:
            theta_r_deg += 360
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            # st: in pixel units & on global (map's) coordinate system
            st_g_px = self.subtargets[self.next_subtarget]

            # Both, st and rp are in the same coordinate system (global - map's) and in the same units (pixels)
            # So, the vector difference of these two vectors (st - rp) yields the vector that joins these two points
            v_g_px = map(operator.sub, st_g_px, rp_g_px)

            # Robot's current orientations forms $theta_r angle from global coordinate system's x-axis, whereas the
            # vector calculated above forms $theta_rg angle. The latter is defined next:
            theta_rg = math.atan2(v_g_px[1], v_g_px[0])
            # FIX: use degrees to correctly address negative angles
            theta_rg_deg = math.degrees(theta_rg)
            if theta_rg_deg < 0:
                theta_rg_deg += 360

            # Find angle difference, $delta_theta
            delta_theta_deg = theta_rg_deg - theta_r_deg

            # Calculate new angular velocity based on delta_theta
            # (slide #93 - 9th presentation, IRS Course ECE AUTH, [email protected])
            if 0 <= delta_theta_deg < 180:
                angular = delta_theta_deg / 180
            elif delta_theta_deg > 0 and delta_theta_deg >= 180:
                angular = (delta_theta_deg - 360) / 180
            elif -180 < delta_theta_deg <= 0:
                angular = delta_theta_deg / 180
            elif delta_theta_deg < 0 and delta_theta_deg < -180:
                angular = (delta_theta_deg + 360) / 180

            # Calculate new linear velocity based on new angular velocity and relation in slide #97 (for n = 6)
            linear = math.pow(1 - abs(angular), 6)

        ######################### NOTE: QUESTION  ##############################
        return [linear, angular]
Example #24
0
class Navigation(object):
    MAX_LINEAR_VELOCITY = rospy.get_param('max_linear_velocity')
    MAX_ANGULAR_VELOCITY = rospy.get_param('max_angular_velocity')

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False
        self.force_random = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(
            rospy.get_param('path_pub_topic'), Path, queue_size=10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(
            rospy.get_param('subgoals_pub_topic'), MarkerArray, queue_size=10)

        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(
            rospy.get_param('curr_target_pub_topic'), Marker, queue_size=10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if not self.inner_target_exists or not self.move_with_target or self.next_subtarget == len(
                self.subtargets):
            return

        self.counter_to_next_sub -= 1
        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            self.force_random = True
            return

        # Get the robot pose in pixels
        [rx, ry] = [
            self.robot_perception.robot_pose['x_px'] -
            self.robot_perception.origin['x'] /
            self.robot_perception.resolution,
            self.robot_perception.robot_pose['y_px'] -
            self.robot_perception.origin['y'] /
            self.robot_perception.resolution
        ]
        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the next subtarget? Alter the code
        # accordingly. Check if distance is less than 7 px (14 cm). Slice the list of subtargets and then reverse it.
        for idx, st in enumerate(self.subtargets[self.next_subtarget::][::-1]):
            # Right now, idx refers to the sliced & reversed array, fix it.
            idx = len(self.subtargets) - 1 - idx
            assert idx >= self.next_subtarget
            dist = math.hypot(rx - st[0], ry - st[1])
            if dist < 7:
                self.next_subtarget = idx + 1
                self.counter_to_next_sub = self.count_limit
                if self.next_subtarget == len(self.subtargets):
                    self.target_exists = False
                break
        ########################################################################
        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [
            self.subtargets[self.next_subtarget][0] *
            self.robot_perception.resolution +
            self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1] *
            self.robot_perception.resolution +
            self.robot_perception.origin['y']
        ]
        RvizHandler.printMarker(
            [subtarget],
            1,  # Type: Arrow
            0,  # Action: Add
            "map",  # Frame
            "art_next_subtarget",  # Namespace
            [0, 0, 0.8, 0.8],  # Color RGBA
            0.2  # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while not self.robot_perception.have_map:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(
            [[0, 0]],
            1,  # Type: Arrow
            3,  # Action: delete all
            "map",  # Frame
            "null",  # Namespace
            [0, 0, 0, 0],  # Color RGBA
            0.1  # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates([
            self.robot_perception.robot_pose['x_px'],
            self.robot_perception.robot_pose['y_px']
        ])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(
                local_ogm, local_coverage, self.robot_perception.robot_pose,
                self.robot_perception.origin, self.robot_perception.resolution,
                self.force_random)
            self.force_random = False

            self.path = self.path_planning.createPath(
                g_robot_pose, target, self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(
                len(self.path)) + " points"
            if len(self.path) == 0:
                Print.art_print(
                    "Path planning failed. Fallback to random target selection",
                    Print.RED)
                self.force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = int(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.
            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']
            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
                s[0] * self.robot_perception.resolution +
                self.robot_perception.origin['x'],
                s[1] * self.robot_perception.resolution +
                self.robot_perception.origin['y']
            ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(
            subtargets_mark,
            2,  # Type: Sphere
            0,  # Action: Add
            "map",  # Frame
            "art_subtargets",  # Namespace
            [0, 0.8, 0.0, 0.8],  # Color RGBA
            0.2  # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):
        rx = self.robot_perception.robot_pose['x_px'] - \
             self.robot_perception.origin['x'] / self.robot_perception.resolution
        ry = self.robot_perception.robot_pose['y_px'] - \
             self.robot_perception.origin['y'] / self.robot_perception.resolution
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required
        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            theta_g = math.atan2(st_y - ry, st_x - rx)
            delta_theta = theta_g - theta  # All thetas in radians.
            angular_coeff = delta_theta / math.pi + 2 * (
                delta_theta < -math.pi) - 2 * (delta_theta >= math.pi)
            linear_coeff = (1 - abs(angular_coeff))**6
            # Experimental: Recalculate angular speed according to linear.
            angular_coeff = (1 - linear_coeff) * np.sign(angular_coeff)

            linear = self.MAX_LINEAR_VELOCITY * linear_coeff
            angular = self.MAX_ANGULAR_VELOCITY * angular_coeff
            assert abs(angular_coeff
                       ) <= 1, "Angular coefficient outside of range [-1, 1]."
            assert 0 <= linear_coeff <= 1, "Linear coefficient outside of range [0, 1]."
            return linear, angular
        else:
            return 0, 0
Example #25
0
class TargetSelection(object):
    # Constructor
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method
        if self.method_is_cost_based():
            from robot_perception import RobotPerception
            self.robot_perception = RobotPerception()
            self.cost_based_properties = rospy.get_param("cost_based_properties")
            numpy.set_printoptions(precision=3, threshold=numpy.nan, suppress=True)

        self.brush = Brushfires()
        self.path_planning = PathPlanning()

    def method_is_cost_based(self):
        return self.method in ['cost_based', 'cost_based_fallback']

    def selectTarget(self, ogm, coverage, robot_pose, origin, resolution, force_random=False):
        ######################### NOTE: QUESTION  ##############################
        # Implement a smart way to select the next target. You have the following tools: ogm_limits, Brushfire field,
        # OGM skeleton, topological nodes.

        # Find only the useful boundaries of OGM. Only there calculations have meaning.
        ogm_limits = OgmOperations.findUsefulBoundaries(ogm, origin, resolution)

        # Blur the OGM to erase discontinuities due to laser rays
        ogm = OgmOperations.blurUnoccupiedOgm(ogm, ogm_limits)

        # Calculate Brushfire field
        tinit = time.time()
        brush = self.brush.obstaclesBrushfireCffi(ogm, ogm_limits)
        Print.art_print("Brush time: " + str(time.time() - tinit), Print.ORANGE)

        # Calculate skeletonization
        tinit = time.time()
        skeleton = topology.skeletonizationCffi(ogm, origin, resolution, ogm_limits)
        Print.art_print("Skeletonization time: " + str(time.time() - tinit), Print.ORANGE)

        # Find topological graph
        tinit = time.time()
        nodes = topology.topologicalNodes(ogm, skeleton, coverage, brush)
        Print.art_print("Topo nodes time: " + str(time.time() - tinit), Print.ORANGE)

        # Visualization of topological nodes
        vis_nodes = [[n[0] * resolution + origin['x'], n[1] * resolution + origin['y']] for n in nodes]
        RvizHandler.printMarker(
            vis_nodes,
            1,  # Type: Arrow
            0,  # Action: Add
            "map",  # Frame
            "art_topological_nodes",  # Namespace
            [0.3, 0.4, 0.7, 0.5],  # Color RGBA
            0.1  # Scale
        )

        try:
            tinit = time.time()
        except:
            tinit = None
        if self.method == 'random' or force_random:
            target = self.selectRandomTarget(ogm, coverage, brush)
        elif self.method_is_cost_based():
            g_robot_pose = self.robot_perception.getGlobalCoordinates([robot_pose['x_px'], robot_pose['y_px']])
            self.path_planning.setMap(self.robot_perception.getRosMap())

            class MapContainer:
                def __init__(self):
                    self.skeleton = skeleton
                    self.coverage = coverage
                    self.ogm = ogm
                    self.brush = brush
                    self.nodes = [node for node in nodes if TargetSelection.is_good(brush, ogm, coverage, node)]
                    self.robot_px = [robot_pose['x_px'] - origin['x'] / resolution,
                                     robot_pose['y_px'] - origin['y'] / resolution]
                    self.theta = robot_pose['th']

                @staticmethod
                def create_path(path_target):
                    return self.path_planning.createPath(g_robot_pose, path_target, resolution)

            target = self.select_by_cost(MapContainer())
        else:
            assert False, "Invalid target_selector method."
        if tinit is not None:
            Print.art_print("Target method {} time: {}".format(self.method, time.time() - tinit), Print.ORANGE)
        return target

    @staticmethod
    def selectRandomTarget(ogm, coverage, brushogm):
        # The next target in pixels
        while True:
            x_rand = random.randint(0, ogm.shape[0] - 1)
            y_rand = random.randint(0, ogm.shape[1] - 1)
            if ogm[x_rand][y_rand] < 50 and coverage[x_rand][y_rand] < 50 and brushogm[x_rand][y_rand] > 5:
                return x_rand, y_rand

    @staticmethod
    def is_good(brush, ogm, coverage, target=None):
        """

        :rtype: numpy.ndarray
        """
        if target is not None:
            x, y = target
            return ogm[x][y] < 50 and coverage[x][y] < 50 and brush[x][y] > 5
        else:
            return numpy.logical_and(numpy.logical_and(ogm < 50, coverage < 50), brush > 5)

    def select_by_cost(self, map_info):
        nodes, paths, topo_costs = zip(*self.choose_best_nodes(map_info))
        if nodes[0] is None:  # choose_best_node's yield when no path is found will make nodes = (None,)
            return -1, -1
        elif len(nodes) == 1:
            return nodes[0]

        best_path_idx = self.weight_costs(
            topo_costs,
            [self.distance_cost(path, map_info.robot_px) for path in paths],  # Distance
            [self.coverage_cost(path, map_info.coverage) for path in paths],  # Coverage
            [self.rotation_cost(path, map_info.robot_px, map_info.theta) for path in paths]  # Rotational
        ).argmax()
        assert paths[best_path_idx]
        target = nodes[best_path_idx]

        Print.art_print("Best: {}".format(best_path_idx), Print.BLUE)
        return target

    def choose_best_nodes(self, map_info):
        # Since path planning takes a lot of time for many nodes we should reduce the possible result to the nodes
        # with the best distance and topological costs.
        if self.method == 'cost_based_fallback':
            yield self.closer_node(map_info), None, None
            return

        nodes = list(self.cluster_nodes(map_info.nodes, map_info.robot_px, self.cost_based_properties['node_clusters']))
        topo_costs = [self._topological_cost(node, map_info.ogm) for node in nodes]
        best_nodes_idx = self.weight_costs(
            topo_costs,
            [euclidean(node, map_info.robot_px) for node in nodes]
        ).argsort()[::-1]  # We need descending order since we now want to maximize.

        count = 0
        for idx in best_nodes_idx:
            node = nodes[idx]
            path = map_info.create_path(node)
            if path:
                count += 1
                yield node, path, topo_costs[idx]
            if count == self.cost_based_properties['max_paths']:
                break
        if count == 0:
            Print.art_print("Failed to create any path. Falling back to closer unoccupied.", Print.RED)
            self.method = 'cost_based_fallback'
            yield self.closer_node(map_info), None, None

    @staticmethod
    def cluster_nodes(nodes_original, robot_px, n_clusters):
        Print.art_print("Trying to cluster:" + str(nodes_original), Print.BLUE)
        whitened = whiten(nodes_original)
        _, cluster_idx = kmeans2(whitened, n_clusters)
        Print.art_print("Ended with clusters:" + str(cluster_idx), Print.BLUE)

        keyfun = lambda x: cluster_idx[x[0]]
        nodes = sorted(enumerate(nodes_original), key=keyfun)
        for _, group in itertools.groupby(nodes, key=keyfun):
            # For each cluster pick the farthest one.
            max_idx = max(group, key=lambda x: euclidean(robot_px, x[1]))[0]
            yield nodes_original[max_idx]

    def weight_costs(self, *cost_vectors, **kwargs):
        costs = self.normalize_costs(numpy.array(tuple(vector for vector in cost_vectors)))
        if 'weights' in kwargs:
            weights = kwargs['weights']
        else:
            weights = 2 ** numpy.arange(costs.shape[0] - 1, -1, -1)
        return numpy.average(costs, axis=0, weights=weights)

    @staticmethod
    def closer_node(map_info):
        robot_px = numpy.array(map_info.robot_px)
        nodes = numpy.array(numpy.where(TargetSelection.is_good(
            numpy.array(map_info.brush),
            numpy.array(map_info.ogm),
            numpy.array(map_info.coverage),
            target=None
        ))).transpose()
        closer_idx = numpy.linalg.norm(nodes - robot_px, axis=1).argmin()
        return nodes[closer_idx]

    def _topological_cost(self, node, ogm):
        threshold = self.cost_based_properties['topo_threshold']
        return self.topological_cost(node, ogm, threshold)

    @staticmethod
    def topological_cost(node, ogm, threshold):
        result = 0
        for dir_x, dir_y in [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]:
            cost = 0
            idx = 0
            x, y = node
            while cost < threshold:
                idx += 1
                x_c = x + idx * dir_x
                y_c = y + idx * dir_y
                cost = (x - x_c) ** 2 + (y - y_c) ** 2
                if ogm[x_c][y_c] > 50:
                    break
                elif ogm[x_c][y_c] in [50, -1]:
                    Print.art_print("{},{} is unknown!".format(x_c, y_c), Print.RED)
                    cost = threshold
                    break
            result += min(threshold, cost)
        return result

    @staticmethod
    def distance_cost(path, robot_px):
        weighted_distances = (TargetSelection.distance(node1, node2) * TargetSelection.distance_coeff(node1, robot_px)
                              for node1, node2 in zip(path[:-1], path[1:]))
        return sum(weighted_distances)

    @staticmethod
    def distance_coeff(node, robot_px, s=30, epsilon=0.0001):
        x_n, y_n = node
        x_g, y_g = robot_px
        coeff = 1 - math.exp(-((x_n - x_g) ** 2 / (2 * s ** 2) + (y_n - y_g) ** 2 / (2 * s ** 2))) + epsilon
        return 1 / coeff

    @staticmethod
    def distance(node_a, node_b):
        x_1, y_1 = node_a
        x_2, y_2 = node_b
        return math.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2)

    @staticmethod
    def rotation_cost(path, robot_px, theta):
        rotation = 0
        rx, ry = robot_px
        theta_old = theta
        for node in path:
            st_x, st_y = node
            theta_new = math.atan2(st_y - ry, st_x - rx)
            rotation += abs(theta_new - theta_old)
            theta_old = theta_new
        return rotation

    @staticmethod
    def coverage_cost(path, coverage):
        coverage_sum = sum(coverage[x][y] for x, y in path)
        return coverage_sum

    @staticmethod
    def normalize_costs(costs):
        """
        :rtype: numpy.ndarray
        """
        Print.art_print("Costs before normalization:\n" + str(costs), Print.BLUE)
        assert (costs >= 0).all()
        return 1 - (costs.transpose() / numpy.abs(costs).max(axis=1)).transpose()
Example #26
0
class App(
        Parameter,
        Utils,
        FieldView,
        Draw,
        Event,
):
    def __init__(self):
        super(Parameter, self).__init__()
        super(Utils, self).__init__()
        super(FieldView, self).__init__()
        super(Draw, self).__init__()
        self.capture = cv2.VideoCapture(camera)
        self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,
                         self.width)  # カメラ画像の横幅を1280に設定
        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,
                         self.height)  # カメラ画像の縦幅を720に設定
        self.table_set = Tables()
        self.planner = PathPlanning(send=self.send)
        self.table_detection = True
        self.detection_success = False
        self.bottle_result = [None, None, None]
        self.standing_result_image = None
        self.quit = False
        self.yukari = Yukari()
        self.remove_separator_middle = False
        self.use_remove_separator = True
        self.points = None
        self.flip_points = None

        self.set_track_bar_pos(self.settings)

        self.ptlist = PointList(NPOINTS)
        self.click_mode = True
        logging.info('START DETECTION')

    def get_param(self):
        # スライダーの値を取得
        self.h = cv2.getTrackbarPos('H', self.bar_window_name)
        self.s = cv2.getTrackbarPos('S', self.bar_window_name)
        self.v = cv2.getTrackbarPos('V', self.bar_window_name)
        self.lv = cv2.getTrackbarPos('LV', self.bar_window_name)
        self.th = cv2.getTrackbarPos('threshold', self.bar_window_name)
        self.kn = cv2.getTrackbarPos('kernel', self.bar_window_name)
        self.remove_side = cv2.getTrackbarPos('remove_side',
                                              self.bar_window_name)
        self.remove_side_e = cv2.getTrackbarPos('remove_side_e',
                                                self.bar_window_name)
        self.zone = cv2.getTrackbarPos('zone', self.bar_window_name)

    def get_data_from_webcam(self) -> (np.asanyarray, None):
        # ウェブカメラから画像データを取得
        ret, frame = self.capture.read()
        # frame = cv2.flip(frame, -1)
        return frame, None

    def get_data(self):
        return self.get_data_from_webcam()

    def remove_separator(self, color_image):
        if self.click_mode:
            return
        if not self.use_remove_separator:
            return
        # セパレータ消すやつ
        if self.zone:
            if self.remove_separator_middle:
                x = self.height
                y = -(self.remove_side * 20)
                y_x = y / x
                f = lambda a: int(y_x * a - y)

                pts = np.array([[0, 0], [self.remove_side * 20, 0],
                                [f(self.remove_side_e), self.remove_side_e],
                                [0, self.remove_side_e]])
                cv2.fillPoly(color_image, pts=[pts], color=Color.red)

                pts = np.array(
                    [[0, self.remove_side_e + 150],
                     [f(self.remove_side_e + 150), self.remove_side_e + 150],
                     [35, self.height], [0, self.height]])
                cv2.fillPoly(color_image, pts=[pts], color=Color.red)
            else:
                pts = np.array([[0, 0], [self.remove_side * 20, 0],
                                [35, self.height], [0, self.height]])
                cv2.fillPoly(color_image, pts=[pts], color=Color.red)
        else:
            if self.remove_separator_middle:
                x = self.height
                y = -(self.width - self.remove_side * 50)
                y_x = y / x
                f = lambda a: self.width - int(y_x * a - y)

                pts = np.array([[self.remove_side * 50, 0],
                                [f(self.remove_side_e), self.remove_side_e],
                                [self.width, self.remove_side_e],
                                [self.width, 0]])
                cv2.fillPoly(color_image, pts=[pts], color=Color.blue)

                pts = np.array(
                    [[f(self.remove_side_e + 150), self.remove_side_e + 150],
                     [self.width, self.remove_side_e + 150],
                     [self.width, self.height]])
                cv2.fillPoly(color_image, pts=[pts], color=Color.blue)
            else:
                pts = np.array([[self.remove_side * 50, 0], [self.width, 0],
                                [self.width, self.height]])
                cv2.fillPoly(color_image, pts=[pts], color=Color.blue)

    def draw(self, color_image_for_show, thresh):
        # 画面描画
        # 画面枠
        if self.zone:
            cv2.rectangle(color_image_for_show, (0, 0),
                          (self.width, self.height), Color.red, 20)
        else:
            cv2.rectangle(color_image_for_show, (0, 0),
                          (self.width, self.height), Color.blue, 20)

        if self.detection_success:
            # under tableを描画
            color_image_for_show = self.put_info(color_image_for_show,
                                                 self.table_set.under)
            # middle tableを描画
            color_image_for_show = self.put_info(color_image_for_show,
                                                 self.table_set.middle)
            # up tableを描画
            color_image_for_show = self.put_info(color_image_for_show,
                                                 self.table_set.up)

        # 二値をカラーに
        thresh = cv2.applyColorMap(cv2.convertScaleAbs(thresh),
                                   cv2.COLORMAP_BONE)
        # threshウインドウのみthreshを表示
        images_for_thresh = np.hstack((color_image_for_show, thresh))

        if not self.table_detection:
            # 立っているかの判定情報を描画
            self.put_info_by_set(color_image_for_show, self.table_set,
                                 Color.black)
            self.standing_result_image = self.put_standing_detection_result(
                color_image_for_show, self.table_set, self.bottle_result)

        # ウインドウサイズがでかくなりすぎるので、縮小
        images_for_thresh = cv2.resize(images_for_thresh,
                                       (int(1280 * 0.65), int(480 * 0.65)))

        # 表示
        cv2.imshow(self.window_name, color_image_for_show)
        cv2.imshow(self.bar_window_name, images_for_thresh)

        cv2.setMouseCallback(
            self.window_name, self.onMouse,
            [self.window_name, color_image_for_show, self.ptlist])

    def analyze(self):
        # スライダーの値を取得
        self.get_param()

        # データを取得
        color_image, np_depth = self.get_data()

        # 画面に描画するようにcolor_imageをコピーした変数を作成
        color_image_for_show = color_image.copy()

        # 画像保存用にcolor_imageをコピーした変数を作成
        self.color_image_for_save = color_image.copy()

        # チェック用
        for_check = color_image.copy()

        # フィールドを分ける白いやつを消す
        self.remove_separator(color_image)
        self.remove_separator(color_image_for_show)

        # ブラーをかける
        color_image = cv2.medianBlur(color_image, 5)
        # hsv空間に変換
        hsv = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)

        # スライダーの値から白色の上限値、下限値を指定
        upper_white = np.array([self.h, self.s, self.v])
        lower_white = np.array([0, 0, self.lv])

        # 白色でマスク
        mask_white = cv2.inRange(hsv, lower_white, upper_white)
        # 同じ部分だけ抽出
        res_white = cv2.bitwise_and(color_image, color_image, mask=mask_white)
        # グレースケールに変換
        gray = cv2.cvtColor(res_white, cv2.COLOR_RGB2GRAY)
        # 二値化
        ret, thresh = cv2.threshold(gray, self.th, 255, cv2.THRESH_BINARY)

        # 縮小と膨張
        kernel = np.ones((self.kn, self.kn), np.uint8)
        erode = cv2.erode(thresh, kernel)
        thresh = cv2.dilate(erode, kernel)

        # テーブルの検出処理
        if self.table_detection:
            # ペットボトル判定処理から戻ってきたときのためにFalseにする
            self.processing_standing_detection = False

            self.points, self.flip_points = None, None

            tables = []  # テーブルの可能性がある輪郭をここに入れる
            if self.click_mode:
                self.draw_click(color_image_for_show, self.ptlist.get_points())
                if self.ptlist.is_full():
                    for i, point in enumerate(self.ptlist.get_points()):
                        table = Table(point, (lambda x: 60 if x == 0 else
                                              (70 if x == 1 else 100))(i), 0,
                                      point)
                        tables.append(table)

            else:
                # 輪郭抽出
                imgEdge, contours, hierarchy = cv2.findContours(
                    thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

                # 見つかった輪郭をリストに入れる
                contours.sort(key=cv2.contourArea, reverse=True)
                for cnt in contours:
                    t = self.calc_circle_level(cnt, cv2.contourArea(cnt))
                    if t > 0.8:
                        (x, y), radius = cv2.minEnclosingCircle(cnt)
                        center = (int(x), int(y))
                        radius = int(radius)
                        color_image_for_show = cv2.circle(
                            color_image_for_show, center, radius, (0, 255, 0),
                            2)
                        table = Table(center, radius, 0, (x, y))
                        if table.is_table():  # 本当にテーブルかチェック
                            tables.append(table)  # テーブルだったらリストに追加

            # 半径が大きい順にソート
            tables = sorted(tables, reverse=True)

            # 大きい3つだけを抽出
            tables = tables[:3]

            # Y座標が小さい順にソート
            tables = sorted(tables, key=attrgetter('y'))

            # 3つ見つかったら
            self.detection_success = (len(tables) == 3)  # Trueが成功
            if self.detection_success:
                self.table_set.update(tables[0], tables[1], tables[2])

        # 検出処理が終わっていたら
        else:
            # ペットボトルが立っているかの検出
            if self.use_standing_detection:
                self.put_info_by_set(color_image_for_show, self.table_set,
                                     Color.black)
                self.put_standing_detection_result(color_image_for_show,
                                                   self.table_set,
                                                   self.bottle_result)
                if not self.processing_standing_detection:
                    self.check_standing(for_check, self.table_set)
                    # self.table_set.reset_standing_result()

                if not np.all(self.table_set.result is None):
                    play_result = []
                    if self.planner.shot[UNDER_NUM]:
                        if self.bottle_result[
                                UNDER_NUM] != self.table_set.result[
                                    UNDER_NUM] and not (
                                        self.bottle_result[UNDER_NUM] and
                                        not self.table_set.result[UNDER_NUM]):
                            play_result.append(
                                [UNDER_NUM, self.table_set.result[UNDER_NUM]])
                    else:
                        self.table_set.under.standing = None
                    if self.planner.shot[MIDDLE_NUM]:
                        if self.bottle_result[
                                MIDDLE_NUM] != self.table_set.result[
                                    MIDDLE_NUM] and not (
                                        self.bottle_result[MIDDLE_NUM] and
                                        not self.table_set.result[MIDDLE_NUM]):
                            play_result.append([
                                MIDDLE_NUM, self.table_set.result[MIDDLE_NUM]
                            ])
                    else:
                        self.table_set.middle.standing = None
                    if self.planner.shot[UP_NUM]:
                        if self.bottle_result[UP_NUM] != self.table_set.result[
                                UP_NUM] and not (
                                    self.bottle_result[UP_NUM]
                                    and not self.table_set.result[UP_NUM]):
                            play_result.append(
                                [UP_NUM, self.table_set.result[UP_NUM]])
                    else:
                        self.table_set.up.standing = None
                    if play_result:
                        self.yukari.play_results(play_result)
                    self.bottle_result = self.table_set.result
                    # 立っていたらTrue、立っていなかったらFalse
                    self.planner.set_result_by_list(self.bottle_result)

        if self.detection_success and not self.table_detection:
            global timer
            if time.time() - timer > 0.6:  # 0.5秒に1回実行
                # 画面内の座標を送信する座標に変換
                ret = self.make_distance_to_send(self.table_set)
                # 経路計画
                if self.points is None:
                    self.points, self.flip_points = self.planner.main(
                        [ret.under, ret.middle, ret.up, self.zone])

                if not self.planner.retry_start:
                    self.planner.send(self.points, self.flip_points, False)
                    # フィールド描画
                    field_view = self.draw_field(
                        (ret.under, ret.middle, ret.up), self.points)
                    cv2.imshow(self.field_window_name, field_view)
                else:
                    retry_points, retry_flip_points = self.planner.retry(
                        (ret.under, ret.middle, ret.up, self.zone),
                        self.bottle_result)
                    self.planner.send(retry_points, retry_flip_points, True)
                    field_view = self.draw_retry(
                        (ret.under, ret.middle, ret.up), self.points,
                        retry_points, self.bottle_result)
                    cv2.imshow(self.field_window_name, field_view)
                self.yukari.play_finish_path_planning()
                timer = time.time()

        # 描画
        self.draw(color_image_for_show, thresh)

    def key_check(self):
        # キーの入力
        key = cv2.waitKey(1)

        # ペットボトル判定シーケンスに移行
        if key == ord('n') and self.table_detection and self.detection_success:
            # self.yukari.play_move_to_check_standing_sequence()
            self.table_detection = False
            logging.info('END DETECTION')

        # テーブル検出シーケンスに移行
        if key == ord('b') and not self.table_detection:
            self.yukari.play_detecting_table()
            self.table_detection = True
            self.planner.retry_start = False

        # 画像収集
        if key == ord('r') and not self.table_detection:
            global sc
            logging.info(f'STORED:{sc}')
            self.save_table_images(image=self.color_image_for_save,
                                   table_set=self.table_set,
                                   x_offset=20,
                                   y_offset=20)
            sc += 1

        # 終了
        if key == ord('q'):
            logging.info('QUIT DETECTION')
            self.quit = True

        # パラメータの保存
        if key == ord('s'):
            logging.info('SAVED PARAMETER')
            self.save_param(self.h, self.s, self.v, self.lv, self.th, self.kn,
                            self.remove_side)

        if key == ord('e'):
            # セパレータ削除における中央を消すかどうかの切り替え
            self.remove_separator_middle = not self.remove_separator_middle

        if key == ord('c'):
            # クリックモード切替
            self.click_mode = not self.click_mode

        if key == ord('z'):
            # クリックした座標をリセット
            self.ptlist.reset_points()

        if key == ord('i'):
            # 強制二週目
            self.planner.retry_start = True

        if key == ord('f'):
            # セパレータ削除の切り替え
            self.use_remove_separator = not self.use_remove_separator

    def run(self):
        try:
            while True:
                try:
                    # 画像認識
                    self.analyze()

                    # キーボードの入力
                    self.key_check()

                    if self.quit:
                        break

                except Exception as error:
                    logging.error(error)
        except:
            pass
Example #27
0
class TargetSelection:

    # Constructor
    def __init__(self, selection_method):
        self.goals_position = []
        self.goals_value = []
        self.omega = 0.0
        self.radius = 0
        self.method = selection_method
        self.previous_target = [-1, -1]

        self.brush = Brushfires()
        self.topo = Topology()
        self.path_planning = PathPlanning()


    def selectTarget(self, init_ogm, coverage, robot_pose, origin, \
        resolution, force_random = False):

        target = [-1, -1]

        ######################### NOTE: QUESTION  ##############################
        # Implement a smart way to select the next target. You have the
        # following tools: ogm_limits, Brushfire field, OGM skeleton,
        # topological nodes.

        # Find only the useful boundaries of OGM. Only there calculations
        # have meaning
        ogm_limits = OgmOperations.findUsefulBoundaries(init_ogm, origin, resolution)

        # Blur the OGM to erase discontinuities due to laser rays
        ogm = OgmOperations.blurUnoccupiedOgm(init_ogm, ogm_limits)

        # Calculate Brushfire field
        tinit = time.time()
        brush = self.brush.obstaclesBrushfireCffi(ogm, ogm_limits)
        Print.art_print("Brush time: " + str(time.time() - tinit), Print.ORANGE)

        # Calculate skeletonization
        tinit = time.time()
        skeleton = self.topo.skeletonizationCffi(ogm, \
                   origin, resolution, ogm_limits)
        Print.art_print("Skeletonization time: " + str(time.time() - tinit), Print.ORANGE)

        # Find topological graph
        tinit = time.time()
        nodes = self.topo.topologicalNodes(ogm, skeleton, coverage, origin, \
                resolution, brush, ogm_limits)
        Print.art_print("Topo nodes time: " + str(time.time() - tinit), Print.ORANGE)

        # Visualization of topological nodes
        vis_nodes = []
        for n in nodes:
            vis_nodes.append([
                n[0] * resolution + origin['x'],
                n[1] * resolution + origin['y']
            ])
        RvizHandler.printMarker(\
            vis_nodes,\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_topological_nodes", # Namespace
            [0.3, 0.4, 0.7, 0.5], # Color RGBA
            0.1 # Scale
        )

        # Random point
        if self.method == 'random' or force_random == True:
            target = self.selectRandomTarget(ogm, coverage, brush, ogm_limits)
        ########################################################################

        # Challenge 6. Smart point selection demands autonomous_expl.yaml->target_selector: 'smart'
        # Smart point selection
        if self.method == 'smart' and force_random == False:
            nextTarget = self.selectSmartTarget(coverage, brush, robot_pose, resolution, origin, nodes)

            # Check if selectSmartTarget found a target
            if nextTarget is not None:
                # Check if the next target is the same as the previous
                dist = math.hypot( nextTarget[0] - self.previous_target[0], nextTarget[1] - self.previous_target[1])
                if dist > 5:
                    target = nextTarget
                else:
                    target = self.selectRandomTarget(ogm, coverage, brush, ogm_limits)
            else:
                # No target found. Choose a random
                target = self.selectRandomTarget(ogm, coverage, brush, ogm_limits)


        self.previous_target = target
        return target

    #############################################################################
    # Challenge 6. select Smart Target Function
    # this function follows the methodology presented
    # on lecture 9.
    def selectSmartTarget(self, coverage, brush, robot_pose, resolution, origin, nodes):
        tinit = time.time()

        # Get the robot pose in pixels
        [rx, ry] = [int(round(robot_pose['x_px'] - origin['x'] / resolution)), int(round(robot_pose['y_px'] - origin['y'] / resolution))]

        # Initialize weights matrix
        weights = []

        # Do procedure described in presentation 9
        # for each node.
        for i, node in enumerate(nodes):

            # Calculate the path
            path = np.flipud(self.path_planning.createPath([rx, ry], node, resolution))

            # Check if it found a path
            if path.shape[0] > 2:
                # Vectors of the path
                vectors = path[1:, :] - path[:-1, :]

                # Calculate paths weighted distance
                vectorsMean = vectors.mean(axis=0)
                vectorsVar = vectors.var(axis=0)
                dists = np.sqrt(np.einsum('ij,ij->i', vectors, vectors))
                weightCoeff = 1 / (1 - np.exp(-np.sum((vectors - vectorsMean)**2 / (2 * vectorsVar), axis=1)) + 1e-4)
                weightDists = np.sum(weightCoeff + dists)

                # Topological weight
                weightTopo = brush[node[0], node[1]]

                # Cosine of the angles
                c = np.sum(vectors[1:, :] * vectors[:-1, :], axis=1) / np.linalg.norm(vectors[1:, :], axis=1) / np.linalg.norm(vectors[:-1, :], axis=1)

                # Sum of all angles
                weightTurn = np.sum(abs(np.arccos(np.clip(c, -1, 1))))

                # Calculate the coverage weight
                pathIndex = np.rint(path).astype(int)
                weightCove = 1 - np.sum(coverage[pathIndex[:, 0], pathIndex[:, 1]]) / (path.shape[0] * 255)

                weights.append([i, weightDists, weightTopo, weightTurn, weightCove])


        if len(weights) > 0:
            weight = np.array(weights)

            # Normalize the weights at [0,1]
            weight[:,1:] = 1 - ((weight[:,1:] - np.min(weight[:,1:], axis=0)) / (np.max(weight[:,1:], axis=0) - np.min(weight[:,1:], axis=0)))

            # Calculatete the final weights
            finalWeights = 8 * weight[:, 2] + 4 * weight[:, 1] + 2 * weight[:, 4] + weight[:, 3]

            # Find the best path
            index = int(weight[max(xrange(len(finalWeights)), key=finalWeights.__getitem__)][0])

            target = nodes[index]

            Print.art_print("Smart target selection time: " + str(time.time() - tinit), Print.ORANGE)

            return target
        else:
            Print.art_print("Smart target selection failed!!! Time: " + str(time.time() - tinit), Print.ORANGE)

            return None

    ############################################################################

    def selectRandomTarget(self, ogm, coverage, brushogm, ogm_limits):
      # The next target in pixels
        tinit = time.time()
        next_target = [0, 0]
        found = False
        while not found:
          x_rand = random.randint(0, ogm.shape[0] - 1)
          y_rand = random.randint(0, ogm.shape[1] - 1)
          if ogm[x_rand][y_rand] < 50 and coverage[x_rand][y_rand] < 50 and \
              brushogm[x_rand][y_rand] > 5:
            next_target = [x_rand, y_rand]
            found = True
        Print.art_print("Select random target time: " + str(time.time() - tinit), \
            Print.ORANGE)
        return next_target
Example #28
0
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 200  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # Clear visited subtargets
        self.subtargets = self.subtargets[self.next_subtarget:]
        self.next_subtarget = 0

        # Calculate distance between the robot pose and all remaining subtargets
        dx = [rx - st[0] for st in self.subtargets]
        dy = [ry - st[1] for st in self.subtargets]
        dist = [math.hypot(v[0], v[1]) for v in zip(dx, dy)]

        # Check if any target is in distance
        min_dist, min_idx = min(zip(dist, range(len(dist))))

        ######################### NOTE: QUESTION  ##############################
        # What if a later subtarget or the end has been reached before the
        # next subtarget? Alter the code accordingly.
        # Check if distance is less than 7 px (14 cm)
        if min_dist < 5:
            self.next_subtarget = min_idx + 1
            self.counter_to_next_sub = self.count_limit
            # Check if the final subtarget has been approached
            if self.next_subtarget == len(self.subtargets):
                self.target_exists = False
        ########################################################################

        # Publish the current target
        if self.next_subtarget >= len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(\
                      local_ogm,\
                      local_ros_ogm,\
                      local_coverage,\
                      self.robot_perception.robot_pose,
                      self.robot_perception.origin,
                      self.robot_perception.resolution,
                      force_random)

            self.path = self.path_planning.createPath(\
                g_robot_pose,\
                target,
                self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) +\
                " points"
            if len(self.path) == 0:
                Print.art_print(\
                    "Path planning failed. Fallback to random target selection", \
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Smooth path
        if len(self.path) > 3:
            x = np.array(self.path)
            y = np.copy(x)
            a = 0.5
            b = 0.2
            e = np.sum(
                np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b *
                       (y[2:, :] - 2 * y[1:-1, :] + y[:-2, :])))
            while e > 1e-3:
                y[1:-1, :] += a * (x[1:-1, :] - y[1:-1, :]) + b * (
                    y[2:, :] - 2 * y[1:-1, :] + y[:-2, :])
                e = np.sum(
                    np.abs(a * (x[1:-1, :] - y[1:-1, :]) + b *
                           (y[2:, :] - 2 * y[1:-1, :] + y[:-2, :])))
            self.path = y.tolist()

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth (answer is just above)
        # and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"

            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin are.

            ps.pose.position.x = p[
                0] * self.robot_perception.resolution + self.robot_perception.origin[
                    'x']
            ps.pose.position.y = p[
                1] * self.robot_perception.resolution + self.robot_perception.origin[
                    'y']

            ########################################################################
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
              s[0] * self.robot_perception.resolution + \
                      self.robot_perception.origin['x'],
              s[1] * self.robot_perception.resolution + \
                      self.robot_perception.origin['y']
              ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        l_max = 0.3
        a_max = 0.3

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            # Fix robot turning problem
            theta_r = np.arctan2(st_y - ry, st_x - rx)
            dtheta = (theta_r - theta)
            if dtheta > np.pi:
                omega = (dtheta - 2 * np.pi) / np.pi
            elif dtheta < -np.pi:
                omega = (dtheta + 2 * np.pi) / np.pi
            else:
                omega = (theta_r - theta) / np.pi
            # Velocities in [-0.3, 0.3] range
            linear = l_max * ((1 - np.abs(omega))**4)
            angular = a_max * np.sign(omega) * (abs(omega)**(1 / 3))
            # omega = (theta_r - theta)/np.pi
            # angular = omega * 0.3 # max angular = 0.3 rad/s
            # linear_r = 1 - np.abs(omega) #may need square
            # linear = linear_r * 0.3 # max linear = 0.3 m/s

        ######################### NOTE: QUESTION  ##############################

        return [linear, angular]
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.target_selection = TargetSelection()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)
        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]

        # YOUR CODE HERE ------------------------------------------------------
        # Here the next subtarget is checked for proximity. If the robot is too
        # close to the subtarget it is considered approached and the next
        # subtarget is assigned as next. However there are cases where the
        # robot misses one subtarget due to obstacle detection. Enhance the below
        # functionality by checking all the subgoals (and the final goal) for
        # proximity and assign the proper next subgoal

        # Find the distance between the robot pose and the next subtarget
        dist = math.hypot(\
            rx - self.subtargets[self.next_subtarget][0], \
            ry - self.subtargets[self.next_subtarget][1])

        # Check if distance is less than 7 px (14 cm)
        if dist < 7:
            print "Sub target reached!"
            self.next_subtarget += 1

            # Check if the final subtarget has been approached
            if self.next_subtarget == len(self.subtargets):
                print "Final goal reached!"
                self.target_exists = False

        # ---------------------------------------------------------------------

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return
        st = Marker()
        st.header.frame_id = "map"
        st.ns = 'as_curr_namespace'
        st.id = 0
        st.header.stamp = rospy.Time(0)
        st.type = 1  # arrow
        st.action = 0  # add
        st.pose.position.x = self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x']
        st.pose.position.y = self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']

        st.color.r = 0
        st.color.g = 0
        st.color.b = 0.8
        st.color.a = 0.8
        st.scale.x = 0.2
        st.scale.y = 0.2
        st.scale.z = 0.2

        self.current_target_publisher.publish(st)

    # Function that seelects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            return

        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Manually update the coverage field
        self.robot_perception.updateCoverage()
        print "Navigation: Coverage updated"

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"

        # Call the target selection function to select the next best goal
        target = self.target_selection.selectTarget(\
            local_ogm,\
            local_coverage,\
            self.robot_perception.robot_pose)
        print "Navigation: New target: " + str(target)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
            [self.robot_perception.robot_pose['x_px'],\
            self.robot_perception.robot_pose['y_px']])

        # Need to reverse the path??
        self.path = self.path_planning.createPath(\
            local_ogm,\
            g_robot_pose,\
            target)
        print "Navigation: Path for target found with " + str(len(self.path)) +\
            " points"
        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 10 pixels (0.5m)
        step = 10
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ps.pose.position.x = p[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x']
            ps.pose.position.y = p[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']
            ros_path.poses.append(ps)

        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        ros_subgoals = MarkerArray()
        count = 0
        for s in self.subtargets:
            st = Marker()
            st.header.frame_id = "map"
            st.ns = 'as_namespace'
            st.id = count
            st.header.stamp = rospy.Time(0)
            st.type = 2  # sphere
            st.action = 0  # add
            st.pose.position.x = s[0] * self.robot_perception.resolution + \
                    self.robot_perception.origin['x']
            st.pose.position.y = s[1] * self.robot_perception.resolution + \
                    self.robot_perception.origin['y']

            st.color.r = 0
            st.color.g = 0.8
            st.color.b = 0
            st.color.a = 0.8
            st.scale.x = 0.2
            st.scale.y = 0.2
            st.scale.z = 0.2
            ros_subgoals.markers.append(st)
            count += 1

        self.subtargets_publisher.publish(ros_subgoals)

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        # YOUR CODE HERE ------------------------------------------------------
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        # ---------------------------------------------------------------------

        return [linear, angular]
Example #30
0
    def __init__(self):

        # Initializations of Objects Needed
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Parameters from YAML File
        self.move_with_target = rospy.get_param("calculate_target")
        self.target_selector = rospy.get_param("target_selector")
        self.turtlebot_path_topic = rospy.get_param('turtlebot_path_topic')
        self.turtlebot_subgoals_topic = rospy.get_param(
            'turtlebot_subgoals_topic')
        self.turtlebot_curr_target_topic = rospy.get_param(
            'turtlebot_curr_target_topic')
        self.map_frame = rospy.get_param('map_frame')
        self.debug = rospy.get_param('debug')
        self.turtlebot_save_progress_images = rospy.get_param(
            'turtlebot_save_progress_images')
        self.limit_exploration_flag = rospy.get_param(
            'limit_exploration')  # Flag to Enable/ Disable OGM Enhancement

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0
        self.count_limit = 100  # 20 sec
        self.counter_to_next_sub = self.count_limit

        # Timer Thread (initialization)
        self.time_limit = 150  # in seconds
        self.timerThread = TimerThread(self.time_limit)

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = rospy.Publisher(self.turtlebot_path_topic,
                                              Path,
                                              queue_size=10)
        # ROS Publisher for the subtargets
        self.subtargets_publisher = rospy.Publisher(
            self.turtlebot_subgoals_topic, MarkerArray, queue_size=10)

        # ROS Publisher for the current target
        self.current_target_publisher = rospy.Publisher(
            self.turtlebot_curr_target_topic, Marker, queue_size=10)

        # For Testing Purpose
        self.timer_flag = True  # True to Enable Timer for resetting Navigation

        # Speed Parameter
        self.max_linear_velocity = rospy.get_param('max_linear_speed')
        self.max_angular_velocity = rospy.get_param('max_angular_speed')

        # Parameters of OGM Enhancement
        self.first_run_flag = True  # Flag to Control first Enhancement in OGM Map

        # Map Container Visualize Message
        self.map_size = np.array(
            [[rospy.get_param('x_min_size'),
              rospy.get_param('y_min_size')],
             [rospy.get_param('x_min_size'),
              rospy.get_param('y_max_size')],
             [rospy.get_param('x_max_size'),
              rospy.get_param('y_min_size')],
             [rospy.get_param('x_max_size'),
              rospy.get_param('y_max_size')]],
            dtype=np.float64)

        if self.limit_exploration_flag:

            # Create CV Bridge Object
            self.bridge = CvBridge()
            # Parameters
            self.drone_image = []  # Hold the Drone OGM Image converted in CV!
            self.ogm_in_cv = []  # Hold the OGM in CV Compatible Version
            self.drone_yaw = 0.0  # Holds the current Drone Yaw
            self.drone_map_pose = []  # Holds the current Drone Position in Map
            # Service
            self.drone_explore_limit_srv_name = rospy.get_param(
                'drone_explore_limit_srv')
            self.limits_exploration_service = rospy.ServiceProxy(
                self.drone_explore_limit_srv_name, OgmLimits)
            # Subscriber
            self.drone_pub_image_topic = rospy.get_param(
                'drone_pub_image_topic')
            self.drone_image_sub = rospy.Subscriber(self.drone_pub_image_topic,
                                                    Image, self.drone_image_cb)
            # Flags
            self.done_enhancement = False  # Flag for Controlling OGM Enhancement Loop
            self.drone_take_image = False  # Flag for Controlling When to Read Image From Drone Camera
            # FIXME: Testing
            self.match_with_limit_pose = True  # True/False to Match with Known Location/Template Matching
            self.match_with_drone_pose = False  # True if you match with Drone Real Pose
class Navigation:

    # Constructor
    def __init__(self):

        # Initializations
        self.robot_perception = RobotPerception()
        self.path_planning = PathPlanning()

        # Check if the robot moves with target or just wanders
        self.move_with_target = rospy.get_param("calculate_target")

        # Flag to check if the vehicle has a target or not
        self.target_exists = False
        self.select_another_target = 0
        self.inner_target_exists = False

        # Container for the current path
        self.path = []
        # Container for the subgoals in the path
        self.subtargets = []

        # Container for the next subtarget. Holds the index of the next subtarget
        self.next_subtarget = 0

        self.count_limit = 400  # 20 sec

        self.counter_to_next_sub = self.count_limit

        # Check if subgoal is reached via a timer callback
        rospy.Timer(rospy.Duration(0.10), self.checkTarget)

        # Read the target function
        self.target_selector = rospy.get_param("target_selector")
        print "The selected target function is " + self.target_selector
        self.target_selection = TargetSelection(self.target_selector)

        # ROS Publisher for the path
        self.path_publisher = \
            rospy.Publisher(rospy.get_param('path_pub_topic'), \
            Path, queue_size = 10)

        # ROS Publisher for the subtargets
        self.subtargets_publisher = \
            rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\
            MarkerArray, queue_size = 10)

        # ROS Publisher for the current target
        self.current_target_publisher = \
            rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\
            Marker, queue_size = 10)

    def checkTarget(self, event):
        # Check if we have a target or if the robot just wanders
        if self.inner_target_exists == False or self.move_with_target == False or\
                self.next_subtarget == len(self.subtargets):
            return

        self.counter_to_next_sub -= 1

        if self.counter_to_next_sub == 0:
            Print.art_print('\n~~~~ Time reset ~~~~', Print.RED)
            self.inner_target_exists = False
            self.target_exists = False
            return

        # Get the robot pose in pixels
        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        for i in range(self.next_subtarget, len(self.subtargets)):
            # Find the distance between the robot pose and the next subtarget
            dist = math.hypot(\
                rx - self.subtargets[i][0], \
                ry - self.subtargets[i][1])

            ######################### NOTE: QUESTION  ##############################
            # What if a later subtarget or the end has been reached before the
            # next subtarget? Alter the code accordingly.
            # Check if distance is less than 7 px (14 cm)
            if dist < 7:
                self.next_subtarget = 1 + i
                self.counter_to_next_sub = self.count_limit
                # Check if the final subtarget has been approached
                if self.next_subtarget == len(self.subtargets):
                    self.target_exists = False

        ########################################################################

        # Publish the current target
        if self.next_subtarget == len(self.subtargets):
            return

        subtarget = [\
            self.subtargets[self.next_subtarget][0]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['x'],
            self.subtargets[self.next_subtarget][1]\
                * self.robot_perception.resolution + \
                self.robot_perception.origin['y']\
            ]

        RvizHandler.printMarker(\
            [subtarget],\
            1, # Type: Arrow
            0, # Action: Add
            "map", # Frame
            "art_next_subtarget", # Namespace
            [0, 0, 0.8, 0.8], # Color RGBA
            0.2 # Scale
        )

    # Function that selects the next target, produces the path and updates
    # the coverage field. This is called from the speeds assignment code, since
    # it contains timer callbacks
    def selectTarget(self):
        # IMPORTANT: The robot must be stopped if you call this function until
        # it is over
        # Check if we have a map
        while self.robot_perception.have_map == False:
            Print.art_print("Navigation: No map yet", Print.RED)
            return

        print "\nClearing all markers"
        RvizHandler.printMarker(\
            [[0, 0]],\
            1, # Type: Arrow
            3, # Action: delete all
            "map", # Frame
            "null", # Namespace
            [0,0,0,0], # Color RGBA
            0.1 # Scale
        )

        print '\n\n----------------------------------------------------------'
        print "Navigation: Producing new target"
        # We are good to continue the exploration
        # Make this true in order not to call it again from the speeds assignment
        self.target_exists = True

        # Gets copies of the map and coverage
        local_ogm = self.robot_perception.getMap()
        local_ros_ogm = self.robot_perception.getRosMap()
        local_coverage = self.robot_perception.getCoverage()
        print "Got the map and Coverage"
        self.path_planning.setMap(local_ros_ogm)

        # Once the target has been found, find the path to it
        # Get the global robot pose
        g_robot_pose = self.robot_perception.getGlobalCoordinates(\
              [self.robot_perception.robot_pose['x_px'],\
              self.robot_perception.robot_pose['y_px']])

        # Call the target selection function to select the next best goal
        # Choose target function
        self.path = []
        force_random = False
        while len(self.path) == 0:
            start = time.time()
            target = self.target_selection.selectTarget(\
                      local_ogm,\
                      local_coverage,\
                      self.robot_perception.robot_pose,
                      self.robot_perception.origin,
                      self.robot_perception.resolution,
                      force_random)

            self.path = self.path_planning.createPath(\
                g_robot_pose,\
                target,
                self.robot_perception.resolution)
            print "Navigation: Path for target found with " + str(len(self.path)) +\
                " points"
            if len(self.path) == 0:
                Print.art_print(\
                    "Path planning failed. Fallback to random target selection", \
                    Print.RED)
                force_random = True

        # Reverse the path to start from the robot
        self.path = self.path[::-1]

        # Break the path to subgoals every 2 pixels (1m = 20px)
        step = 1
        n_subgoals = (int)(len(self.path) / step)
        self.subtargets = []
        for i in range(0, n_subgoals):
            self.subtargets.append(self.path[i * step])
        self.subtargets.append(self.path[-1])
        self.next_subtarget = 0
        print "The path produced " + str(len(self.subtargets)) + " subgoals"

        ######################### NOTE: QUESTION  ##############################
        # The path is produced by an A* algorithm. This means that it is
        # optimal in length but 1) not smooth and 2) length optimality
        # may not be desired for coverage-based exploration
        ########################################################################

        self.counter_to_next_sub = self.count_limit

        # Publish the path for visualization purposes
        ros_path = Path()
        ros_path.header.frame_id = "map"
        for p in self.path:
            ps = PoseStamped()
            ps.header.frame_id = "map"
            ps.pose.position.x = 0
            ps.pose.position.y = 0
            ######################### NOTE: QUESTION  ##############################
            # Fill the ps.pose.position values to show the path in RViz
            # You must understand what self.robot_perception.resolution
            # and self.robot_perception.origin arE
            ps.pose.position.x = self.robot_perception.origin['x'] /\
           self.robot_perception.resolution
            ps.pose.position.y = self.robot_perception.origin['y'] /\
           self.robot_perception.resolution
            ros_path.poses.append(ps)
        self.path_publisher.publish(ros_path)

        # Publish the subtargets for visualization purposes
        subtargets_mark = []
        for s in self.subtargets:
            subt = [
              s[0] * self.robot_perception.resolution + \
                      self.robot_perception.origin['x'],
              s[1] * self.robot_perception.resolution + \
                      self.robot_perception.origin['y']
              ]
            subtargets_mark.append(subt)

        RvizHandler.printMarker(\
            subtargets_mark,\
            2, # Type: Sphere
            0, # Action: Add
            "map", # Frame
            "art_subtargets", # Namespace
            [0, 0.8, 0.0, 0.8], # Color RGBA
            0.2 # Scale
        )

        self.inner_target_exists = True

    def velocitiesToNextSubtarget(self):

        [linear, angular] = [0, 0]

        [rx, ry] = [\
            self.robot_perception.robot_pose['x_px'] - \
                    self.robot_perception.origin['x'] / self.robot_perception.resolution,\
            self.robot_perception.robot_pose['y_px'] - \
                    self.robot_perception.origin['y'] / self.robot_perception.resolution\
                    ]
        theta = self.robot_perception.robot_pose['th']
        ######################### NOTE: QUESTION  ##############################
        # The velocities of the robot regarding the next subtarget should be
        # computed. The known parameters are the robot pose [x,y,th] from
        # robot_perception and the next_subtarget [x,y]. From these, you can
        # compute the robot velocities for the vehicle to approach the target.
        # Hint: Trigonometry is required

        if self.subtargets and self.next_subtarget <= len(self.subtargets) - 1:
            st_x = self.subtargets[self.next_subtarget][0]
            st_y = self.subtargets[self.next_subtarget][1]

            wmega = math.atan2((st_y - ry), (st_x - rx)) - theta
            print(wmega)
            print(math.atan2((st_y - ry), (st_x - rx)))
            print(theta)
            if abs(wmega) >= 0.1:
                angular = np.sign(wmega) * 0.3  #(wmega/(3.14))*0.3
                linear = 0  #(1-(wmega/(3.14)))*0.3
            else:
                angular = (wmega / (3.14)) * 0.3
                linear = math.tanh(
                    math.sqrt(pow((st_y - ry), 2) + pow((st_x - rx), 2))) * 0.3

        ######################### NOTE: QUESTION  ##############################
        print(angular)
        return [linear, angular]
class TargetSelection:

    # Constructor
    def __init__(self, selection_method):

        self.initial_time = time()
        self.method = selection_method
        self.initialize_gains = False

        self.brush = Brushfires()
        self.topology = Topology()
        self.path_planning = PathPlanning()
        self.droneConnector = DroneCommunication()

        # Parameters from YAML File
        self.debug = True  #rospy.get_param('debug')
        self.map_discovery_purpose = rospy.get_param('map_discovery_purpose')
        self.color_evaluation_flag = rospy.get_param('color_rating')
        self.drone_color_evaluation_topic = rospy.get_param(
            'drone_pub_color_rating')
        self.evaluate_potential_targets_srv_name = rospy.get_param(
            'rate_potential_targets_srv')

        # Explore Gains
        self.g_color = 0.0
        self.g_brush = 0.0
        self.g_corner = 0.0
        self.g_distance = 0.0
        self.set_gain()

        if self.color_evaluation_flag:

            # Color Evaluation Service
            self.color_evaluation_service = rospy.ServiceProxy(
                self.evaluate_potential_targets_srv_name, EvaluateTargets)
            # Subscribe to Color Evaluation Topic to Get Results from Color Evaluation
            self.drone_color_evaluation_sub = rospy.Subscriber(
                self.drone_color_evaluation_topic, ColorEvaluationArray,
                self.color_evaluation_cb)
            # Parameters
            self.targets_color_evaluated = False  # Set True Once Color Evaluation of Targets Completed
            self.color_evaluation = []  # Holds the Color Evaluation of Targets
            self.corner_evaluation = [
            ]  # Holds the Number of Corners Near Each Target

    # Target Selection Function
    def selectTarget(self,
                     init_ogm,
                     coverage,
                     robot_pose,
                     origin,
                     resolution,
                     g_robot_pose,
                     previous_limits=[],
                     force_random=False):

        # Initialize Target
        target = [-1, -1]
        if self.running_time() > 15:
            print('\x1b[37;1m' + str('15 Minutes Constraint Passed!!!') +
                  '\x1b[0m')

        # Find only the useful boundaries of OGM
        start = time()
        ogm_limits = OgmOperations.findUsefulBoundaries(init_ogm,
                                                        origin,
                                                        resolution,
                                                        print_result=True,
                                                        step=20)
        if self.debug:
            print('\x1b[34;1m' + str('Target Selection: OGM Boundaries ') +
                  str(ogm_limits) + str(' in ') + str(time() - start) +
                  str(' seconds.') + '\x1b[0m')

        # Blur the OGM to erase discontinuities due to laser rays
        start = time()
        ogm = OgmOperations.blurUnoccupiedOgm(init_ogm, ogm_limits)
        if self.debug:
            print('\x1b[34;1m' + str('Target Selection: OGM Blurred in ') +
                  str(time() - start) + str(' seconds.') + '\x1b[0m')

        # Calculate Brushfire field
        start = time()
        brush = self.brush.obstaclesBrushfireCffi(ogm, ogm_limits)
        if self.debug:
            print('\x1b[34;1m' + str('Target Selection: Brush in ') +
                  str(time() - start) + str(' seconds.') + '\x1b[0m')

        # Calculate Robot Position
        [rx, ry] = [
            robot_pose['x_px'] - origin['x'] / resolution,
            robot_pose['y_px'] - origin['y'] / resolution
        ]

        # Calculate Skeletonization
        start = time()
        skeleton = self.topology.skeletonizationCffi(ogm, origin, resolution,
                                                     ogm_limits)
        if self.debug:
            print('\x1b[34;1m' + str('Target Selection: Skeletonization in ') +
                  str(time() - start) + str(' seconds.') + '\x1b[0m')

        # Find Topological Graph
        start = time()
        potential_targets = self.topology.topologicalNodes(
            ogm,
            skeleton,
            coverage,
            brush,
            final_num_of_nodes=25,
            erase_distance=100,
            step=15)
        if self.debug:
            print('\x1b[34;1m' +
                  str('Target Selection: Topological Graph in ') +
                  str(time() - start) + str(' seconds.') + '\x1b[0m')
            print('\x1b[34;1m' +
                  str("The Potential Targets to be Checked are ") +
                  str(len(potential_targets)) + '\x1b[0m')

        if len(potential_targets) == 0:
            print('\x1b[32;1m' +
                  str('\n------------------------------------------') +
                  str("Finished Space Exploration!!! ") +
                  str('------------------------------------------\n') +
                  '\x1b[0m')
            sleep(10000)

        # Visualization of Topological Graph
        vis__potential_targets = []
        for n in potential_targets:
            vis__potential_targets.append([
                n[0] * resolution + origin['x'],
                n[1] * resolution + origin['y']
            ])
        RvizHandler.printMarker(vis__potential_targets, 1, 0, "map",
                                "art_topological_nodes", [0.3, 0.4, 0.7, 0.5],
                                0.1)

        # Check if we have given values to Gains
        if not self.initialize_gains:
            self.set_gain()

        # Random Point Selection if Needed
        if self.method == 'random' or force_random:

            # Get Distance from Potential Targets
            distance = np.zeros((len(potential_targets), 1), dtype=np.float32)
            for idx, target in enumerate(potential_targets):
                distance[idx] = hypot(rx - target[0], ry - target[1])
            distance *= 255.0 / distance.max()

            path = self.selectRandomTarget(ogm, coverage, brush, ogm_limits,
                                           potential_targets, distance,
                                           resolution, g_robot_pose)

            if path is not None:
                return path
            else:
                return []

        # Sent Potential Targets for Color Evaluation (if Flag is Enable)
        if self.color_evaluation_flag:
            start_color = time()
            while not self.sent_potential_targets_for_color_evaluation(
                    potential_targets, resolution, origin):
                pass

        # Initialize Arrays for Target Selection
        id = np.array(range(0, len(potential_targets))).reshape(-1, 1)
        brushfire = np.zeros((len(potential_targets), 1), dtype=np.float32)
        distance = np.zeros((len(potential_targets), 1), dtype=np.float32)
        color = np.zeros((len(potential_targets), 1), dtype=np.float32)
        corners = np.zeros((len(potential_targets), 1), dtype=np.float32)
        score = np.zeros((len(potential_targets), 1), dtype=np.float32)

        # Calculate Distance and Brush Evaluation
        start = time()
        for idx, target in enumerate(potential_targets):
            distance[idx] = hypot(rx - target[0], ry - target[1])
            brushfire[idx] = brush[target[0], target[1]]

        if self.debug:
            print('\x1b[35;1m' +
                  str('Distance and Brush Evaluation Calculated in ') +
                  str(time() - start) + str(' seconds.') + '\x1b[0m')

        # Wait for Color Evaluation to be Completed
        if self.color_evaluation_flag:
            while not self.targets_color_evaluated:
                pass
            color = np.array(self.color_evaluation).reshape(-1, 1)
            corners = np.array(self.corner_evaluation,
                               dtype=np.float64).reshape(-1, 1)
            # Reset Flag for Next Run
            self.targets_color_evaluated = False
            if self.debug:
                print('\x1b[35;1m' + str('Color Evaluation Calculated in ') +
                      str(time() - start_color) + str(' seconds.') + '\x1b[0m')

        # Normalize Evaluation Arrays to [0, 255]
        distance *= 255.0 / distance.max()
        brushfire *= 255.0 / brushfire.max()
        if self.color_evaluation_flag:
            # color max is 640*320 = 204800
            color *= 255.0 / color.max()
            color = 255.0 - color
            corners *= 255.0 / corners.max()

        # Calculate Score to Choose Best Target
        # Final Array = [ Id. | [X] | [Y] | Color | Brush | Dist. | Num. of Corners | Score ]
        #                  0     1     2     3       4       5            6             7
        # Max is: 255 + 255 -  0  -  0  = +510
        # Min is:  0  +  0  - 255 - 255 = -510
        evaluation = np.concatenate((id, potential_targets, color, brushfire,
                                     distance, corners, score),
                                    axis=1)
        for e in evaluation:
            # Choose Gains According to Type of Exploration (Default is Exploration)
            if self.map_discovery_purpose == 'coverage':
                e[7] = self.g_color * e[3] + self.g_brush * e[
                    4] - self.g_distance * e[5] - self.g_corner * e[6]
            elif self.map_discovery_purpose == 'combined':
                # Gains Change over Time
                self.set_gain()
                e[7] = self.g_color * e[3] + self.g_brush * e[
                    4] - self.g_distance * e[5] - self.g_corner * e[6]
            else:
                e[7] = self.g_color * e[3] + self.g_brush * e[
                    4] - self.g_distance * e[5] - self.g_corner * e[6]

        # Normalize Score to [0, 255] and Sort According to Best Score (Increasingly)
        evaluation[:, 7] = self.rescale(
            evaluation[:,
                       7], (-self.g_distance * 255.0 - self.g_corner * 255.0),
            (self.g_color * 255.0 + self.g_brush * 255.0), 0.0, 255.0)
        evaluation = evaluation[evaluation[:, 7].argsort()]

        # Best Target is in the Bottom of Evaluation Table Now
        target = [
            evaluation[[len(potential_targets) - 1], [1]],
            evaluation[[len(potential_targets) - 1], [2]]
        ]

        # Print The Score of the Target Selected
        if len(previous_limits) != 0:
            if not previous_limits['min_x'] < target[0] < previous_limits['max_x'] and not\
                                    previous_limits['min_y'] < target[1] < previous_limits['max_y']:

                print('\x1b[38;1m' +
                      str('Selected Target was inside Explored Area.') +
                      '\x1b[0m')

        print('\x1b[35;1m' + str('Selected Target was ') +
              str(int(evaluation.item(
                  (len(potential_targets) - 1), 0))) + str(' with score of ') +
              str(evaluation.item(
                  (len(potential_targets) - 1), 7)) + str('.') + '\x1b[0m')

        return self.path_planning.createPath(g_robot_pose, target, resolution)

    # Send SIGNAL to Drone to Color Evaluate Potential Targets Function
    def sent_potential_targets_for_color_evaluation(self, targets, resolution,
                                                    origin):

        # Calculate Position of Targets in Map
        targets = np.asarray(targets, dtype=np.float64)
        for target in targets:
            target[0] = target[0] * resolution + origin['x']
            target[1] = target[1] * resolution + origin['y']
        # Create Color Evaluation Request Message
        color_evaluation_request_msg = EvaluateTargetsRequest()
        color_evaluation_request_msg.pos_x = np.array(targets[:, 0])
        color_evaluation_request_msg.pos_y = np.array(targets[:, 1])
        # Call Service
        rospy.wait_for_service(self.evaluate_potential_targets_srv_name)
        try:
            response = self.color_evaluation_service(
                color_evaluation_request_msg)
        except rospy.ServiceException, e:
            print "Service call failed: %s" % e

        return response