コード例 #1
0
ファイル: roaming_agent.py プロジェクト: lardemua/ros_bridge
class RoamingAgent(Agent):
    """
    RoamingAgent implements a basic agent that navigates scenes making random
    choices when facing an intersection.
    This agent respects traffic lights and other vehicles.
    """
    def __init__(self, vehicle):
        """
        Constructor for RoamingAgent class
        :param vehicle: actor to apply to local planner logic onto
        """
        super(RoamingAgent, self).__init__(vehicle)
        self._proximity_threshold = 10.0  # meters
        self._state = AgentState.NAVIGATING
        self._local_planner = LocalPlanner(self._vehicle)

    def run_step(self, debug=False):
        """
        Function used to execute one step of navigation.
        :return: carla.VehicleControl
        """
        # is there an obstacle in front of us?
        hazard_detected = False
        # retrieve relevant elements for safe navigation, i.e.: traffic lights
        # and other vehicles
        actor_list = self._world.get_actors()
        vehicle_list = actor_list.filter("*vehicle*")
        lights_list = actor_list.filter("*traffic_light*")
        # check possible obstacles
        vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
        if vehicle_state:
            if debug:
                print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))
            self._state = AgentState.BLOCKED_BY_VEHICLE
            hazard_detected = True
        # check for the state of the traffic lights
        light_state, traffic_light = self._is_light_red(lights_list)
        if light_state:
            if debug:
                print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))
            self._state = AgentState.BLOCKED_RED_LIGHT
            hazard_detected = True
        if hazard_detected:
            control = self.emergency_stop()
        else:
            self._state = AgentState.NAVIGATING
            # standard local planner behavior
            control = self._local_planner.run_step()
        return control
コード例 #2
0
ファイル: basic_agent.py プロジェクト: haerba/carla_era
class BasicAgent(Agent):
    """
    BasicAgent implements a basic agent that navigates scenes to reach a given
    target destination. This agent respects traffic lights and other vehicles.
    """

    def __init__(self, role_name, ego_vehicle_id, avoid_risk=True):
        """
        """
        super(BasicAgent, self).__init__(role_name, ego_vehicle_id, avoid_risk)

        self._avoid_risk = avoid_risk
        self._current_speed = 0.0  # Km/h
        self._current_pose = Pose()
        self._proximity_threshold = 10.0  # meters
        self._state = AgentState.NAVIGATING
        args_lateral_dict = {
            'K_P': 0.9,
            'K_D': 0.0,
            'K_I': 0.1}
        self._local_planner = LocalPlanner(opt_dict={'lateral_control_dict': args_lateral_dict})

        if self._avoid_risk:
            self._vehicle_id_list = []
            self._lights_id_list = []
            self._actors_subscriber = rospy.Subscriber(
                "/carla/actor_list", CarlaActorList, self.actors_updated)
            self._objects = []
            self._objects_subscriber = rospy.Subscriber(
                "/carla/{}/objects".format(role_name), ObjectArray, self.objects_updated)
            self._get_actor_waypoint_client = rospy.ServiceProxy(
                '/carla_waypoint_publisher/{}/get_actor_waypoint'.format(role_name),
                GetActorWaypoint)

        self._odometry_subscriber = rospy.Subscriber(
            "/carla/{}/odometry".format(role_name), Odometry, self.odometry_updated)

    def get_actor_waypoint(self, actor_id):
        """
        helper method to get waypoint for actor via ros service
        Only used if risk should be avoided.
        """
        try:
            response = self._get_actor_waypoint_client(actor_id)
            return response.waypoint
        except (rospy.ServiceException, rospy.ROSInterruptException) as e:
            if not rospy.is_shutdown:
                rospy.logwarn("Service call failed: {}".format(e))

    def odometry_updated(self, odo):
        """
        callback on new odometry
        """
        self._current_speed = math.sqrt(odo.twist.twist.linear.x ** 2 +
                                        odo.twist.twist.linear.y ** 2 +
                                        odo.twist.twist.linear.z ** 2) * 3.6
        self._current_pose = odo.pose.pose
        super(BasicAgent, self).odometry_updated(odo)

    def actors_updated(self, actors):
        """
        callback on new actor list
        Only used if risk should be avoided.
        """
        # retrieve relevant elements for safe navigation, i.e.: traffic lights
        # and other vehicles
        self._vehicle_id_list = []
        self._lights_id_list = []
        for actor in actors.actors:
            if "vehicle" in actor.type:
                self._vehicle_id_list.append(actor.id)
            elif "traffic_light" in actor.type:
                self._lights_id_list.append(
                    (actor.id, self.get_actor_waypoint(actor.id)))

    def objects_updated(self, objects):
        """
        callback on new objects
        Only used if risk should be avoided.
        """
        self._objects = objects.objects

    def run_step(self, target_speed):
        """
        Execute one step of navigation.
        :return: carla.VehicleControl
        """
        finished = False

        # is there an obstacle in front of us?
        hazard_detected = False

        if self._avoid_risk:
            # check possible obstacles
            vehicle_state, vehicle = self._is_vehicle_hazard(  # pylint: disable=unused-variable
                self._vehicle_id_list, self._objects)
            if vehicle_state:
                #rospy.loginfo('=== Vehicle blocking ahead [{}])'.format(vehicle))
                self._state = AgentState.BLOCKED_BY_VEHICLE
                hazard_detected = True

            # check for the state of the traffic lights
            light_state, traffic_light = self._is_light_red(  # pylint: disable=unused-variable
                self._lights_id_list)
            if light_state:
                #rospy.loginfo('=== Red Light ahead [{}])'.format(traffic_light))

                self._state = AgentState.BLOCKED_RED_LIGHT
                hazard_detected = True

        if hazard_detected:
            control = self.emergency_stop()
        else:
            self._state = AgentState.NAVIGATING
            # standard local planner behavior
            control, finished = self._local_planner.run_step(
                target_speed, self._current_speed, self._current_pose)

        return control, finished
コード例 #3
0
ファイル: basic_agent.py プロジェクト: vicky139222/AutoPilot
class BasicAgent(Agent):
    """
    BasicAgent implements a basic agent that navigates scenes to reach a given
    target destination. This agent respects traffic lights and other vehicles.
    """
    def __init__(self, vehicle, target_speed=20):
        """

        :param vehicle: actor to apply to local planner logic onto
        """
        super(BasicAgent, self).__init__(vehicle)

        self._proximity_threshold = 10.0  # meters
        self._state = AgentState.NAVIGATING
        args_lateral_dict = {'K_P': 1, 'K_D': 0.02, 'K_I': 0, 'dt': 1.0 / 20.0}
        self._local_planner = LocalPlanner(self._vehicle,
                                           opt_dict={
                                               'target_speed':
                                               target_speed,
                                               'lateral_control_dict':
                                               args_lateral_dict
                                           })
        self._hop_resolution = 2.0
        self._path_seperation_hop = 2
        self._path_seperation_threshold = 0.5
        self._target_speed = target_speed
        self._grp = None

    def set_destination(self, location):
        """
        This method creates a list of waypoints from agent's position to destination location
        based on the route returned by the global router
        """

        print('set_destination..')

        start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
        end_waypoint = self._map.get_waypoint(
            carla.Location(location[0], location[1], location[2]))

        route_trace = self._trace_route(start_waypoint, end_waypoint)
        assert route_trace

        self._local_planner.set_global_plan(route_trace)

    def _trace_route(self, start_waypoint, end_waypoint):
        """
        This method sets up a global router and returns the optimal route
        from start_waypoint to end_waypoint
        """

        # Setting up global router
        if self._grp is None:
            dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(),
                                        self._hop_resolution)
            grp = GlobalRoutePlanner(dao)
            grp.setup()
            self._grp = grp

        # Obtain route plan
        route = self._grp.trace_route(start_waypoint.transform.location,
                                      end_waypoint.transform.location)

        return route

    def run_step(self, debug=False):
        """
        Execute one step of navigation.
        :return: carla.VehicleControl
        """
        #print('basic: run step')
        # is there an obstacle in front of us?
        hazard_detected = False

        # retrieve relevant elements for safe navigation, i.e.: traffic lights
        # and other vehicles
        actor_list = self._world.get_actors()
        vehicle_list = actor_list.filter("*vehicle*")
        lights_list = actor_list.filter("*traffic_light*")

        # check possible obstacles
        vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
        if vehicle_state:
            if debug:
                print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))

            self._state = AgentState.BLOCKED_BY_VEHICLE
            hazard_detected = True

        # check for the state of the traffic lights
        light_state, traffic_light = self._is_light_red(lights_list)
        if light_state:
            if debug:
                print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))

            self._state = AgentState.BLOCKED_RED_LIGHT
            hazard_detected = True

        ## ignore trafic
        hazard_detected = False

        if hazard_detected:
            control = self.emergency_stop()
        else:
            self._state = AgentState.NAVIGATING
            # standard local planner behavior
            control = self._local_planner.run_step()

        return control
コード例 #4
0
class AffordancesAgent(object):
    def __init__(self, path_to_config_file):
        # params for now it is not used but we might want to use this to set

        self.setup(path_to_config_file)
        self.save_attentions = False

    def setup(self, path_to_config_file):
        self._agent = None
        self.route_assigned = False
        self.count = 0

        exp_dir = os.path.join(
            '/', os.path.join(*path_to_config_file.split('/')[:-1]))

        yaml_conf, checkpoint_number, agent_name, encoder_params = checkpoint_parse_configuration_file(
            path_to_config_file)

        if encoder_params == "None":
            encoder_params = None

        g_conf.immutable(False)
        merge_with_yaml(
            os.path.join('/',
                         os.path.join(*path_to_config_file.split('/')[:-4]),
                         yaml_conf), encoder_params)

        if g_conf.MODEL_TYPE in ['one-step-affordances']:
            # one step training, no need to retrain FC layers, we just get the output of encoder model as prediciton
            self._model = EncoderModel(g_conf.ENCODER_MODEL_TYPE,
                                       g_conf.ENCODER_MODEL_CONFIGURATION)
            self.checkpoint = torch.load(
                os.path.join(exp_dir, 'checkpoints',
                             str(checkpoint_number) + '.pth'))
            print("Affordances Model ",
                  str(checkpoint_number) + '.pth', "loaded from ",
                  os.path.join(exp_dir, 'checkpoints'))
            self._model.load_state_dict(self.checkpoint['state_dict'])
            self._model.cuda()
            self._model.eval()

        elif g_conf.MODEL_TYPE in ['separate-affordances']:
            if encoder_params is not None:
                self.encoder_model = EncoderModel(
                    g_conf.ENCODER_MODEL_TYPE,
                    g_conf.ENCODER_MODEL_CONFIGURATION)
                self.encoder_model.cuda()
                # Here we load the pre-trained encoder (not fine-tunned)
                if g_conf.FREEZE_ENCODER:
                    encoder_checkpoint = torch.load(
                        os.path.join(
                            os.path.join(
                                '/',
                                os.path.join(
                                    *path_to_config_file.split('/')[:-4])),
                            '_logs', encoder_params['encoder_folder'],
                            encoder_params['encoder_exp'], 'checkpoints',
                            str(encoder_params['encoder_checkpoint']) +
                            '.pth'))
                    print(
                        "Encoder model ",
                        str(encoder_params['encoder_checkpoint']),
                        "loaded from ",
                        os.path.join('_logs', encoder_params['encoder_folder'],
                                     encoder_params['encoder_exp'],
                                     'checkpoints'))
                    self.encoder_model.load_state_dict(
                        encoder_checkpoint['state_dict'])
                    self.encoder_model.eval()
                    for param_ in self.encoder_model.parameters():
                        param_.requires_grad = False

                else:
                    encoder_checkpoint = torch.load(
                        os.path.join(exp_dir, 'checkpoints',
                                     str(checkpoint_number) + '_encoder.pth'))
                    print("FINE TUNNED encoder model ",
                          str(checkpoint_number) + '_encoder.pth',
                          "loaded from ", os.path.join(exp_dir, 'checkpoints'))
                    self.encoder_model.load_state_dict(
                        encoder_checkpoint['state_dict'])
                    self.encoder_model.eval()
                    for param_ in self.encoder_model.parameters():
                        param_.requires_grad = False
            else:
                raise RuntimeError(
                    'encoder_params can not be None in MODEL_TYPE --> separate-affordances'
                )

            self._model = CoILModel(g_conf.MODEL_TYPE,
                                    g_conf.MODEL_CONFIGURATION,
                                    g_conf.ENCODER_MODEL_CONFIGURATION)
            self.checkpoint = torch.load(
                os.path.join(exp_dir, 'checkpoints',
                             str(checkpoint_number) + '.pth'))
            print("Affordances Model ",
                  str(checkpoint_number) + '.pth', "loaded from ",
                  os.path.join(exp_dir, 'checkpoints'))
            self._model.load_state_dict(self.checkpoint['state_dict'])
            self._model.cuda()
            self._model.eval()

    def get_sensors_dict(self):
        """
        The agent sets the sensors that it is going to use. That has to be
        set into the environment for it to produce this data.
        """
        sensors_dict = [{
            'type': 'sensor.camera.rgb',
            'x': 2.0,
            'y': 0.0,
            'z': 1.40,
            'roll': 0.0,
            'pitch': -15.0,
            'yaw': 0.0,
            'width': 800,
            'height': 600,
            'fov': 100,
            'id': 'rgb_central'
        }]

        return sensors_dict

    # TODO we set the sensors here directly.
    def sensors(self):
        return self._sensors_dict

    def get_state(self, exp_list, target_speed=20.0):
        """
            Based on the exp object it makes all the affordances.
        :param exp:
        :return:
        """
        exp = exp_list[0]
        self._vehicle = exp._ego_actor

        if self._agent is None:
            self._agent = True
            self._state = AgentState.NAVIGATING
            args_lateral_dict = {
                'K_P': 1,
                'K_D': 0.02,
                'K_I': 0,
                'dt': 1.0 / 20.0
            }
            self._local_planner = LocalPlanner(self._vehicle,
                                               opt_dict={
                                                   'target_speed':
                                                   target_speed,
                                                   'lateral_control_dict':
                                                   args_lateral_dict
                                               })
            self._hop_resolution = 2.0
            self._path_seperation_hop = 2
            self._path_seperation_threshold = 0.5
            self._grp = None

        if not self.route_assigned:
            plan = []
            for transform, road_option in exp._route:
                wp = exp._ego_actor.get_world().get_map().get_waypoint(
                    transform.location)
                plan.append((wp, road_option))

            self._local_planner.set_global_plan(plan)
            self.route_assigned = True

        input_data = exp._sensor_interface.get_data()
        input_data = self._process_sensors(
            input_data['rgb_central'][1])  #torch.Size([1, 3, 88, 200]

        if g_conf.MODEL_TYPE in ['one-step-affordances']:
            c_output, r_output, layers = self._model.forward_outputs(
                input_data.cuda(),
                torch.cuda.FloatTensor(
                    [exp._forward_speed / g_conf.SPEED_FACTOR]).unsqueeze(0),
                torch.cuda.FloatTensor(encode_directions(
                    exp._directions)).unsqueeze(0))
        elif g_conf.MODEL_TYPE in ['separate-affordances']:
            if g_conf.ENCODER_MODEL_TYPE in [
                    'action_prediction', 'stdim', 'ETEDIM', 'FIMBC',
                    'one-step-affordances'
            ]:
                e, layers = self.encoder_model.forward_encoder(
                    input_data.cuda(),
                    torch.cuda.FloatTensor([
                        exp._forward_speed / g_conf.SPEED_FACTOR
                    ]).unsqueeze(0),
                    torch.cuda.FloatTensor(encode_directions(
                        exp._directions)).unsqueeze(0))
                c_output, r_output = self._model.forward_test(e)
            elif g_conf.ENCODER_MODEL_TYPE in [
                    'ETE', 'ETE_inverse_model', 'forward', 'ETE_stdim'
            ]:
                e, layers = self.encoder_model.forward_encoder(
                    input_data.cuda(),
                    torch.cuda.FloatTensor([
                        exp._forward_speed / g_conf.SPEED_FACTOR
                    ]).unsqueeze(0),
                    torch.cuda.FloatTensor(encode_directions(
                        exp._directions)).unsqueeze(0))
                c_output, r_output = self._model.forward_test(e)

        if self.save_attentions:
            exp_params = exp._exp_params
            attentions_full_path = os.path.join(
                os.environ["SRL_DATASET_PATH"], exp_params['package_name'],
                exp_params['env_name'],
                str(exp_params['env_number']) + '_' + exp._agent_name,
                str(exp_params['exp_number']))
            save_attentions(input_data.cuda(),
                            layers,
                            self.count,
                            attentions_full_path,
                            save_input=False,
                            big_size=False)

        self.count += 1

        affordances = {}

        output_relative_angle = torch.squeeze(
            r_output[0]).cpu().detach().numpy() * 1.0

        is_pedestrian_hazard = False
        if c_output[0][0, 0] < c_output[0][0, 1]:
            is_pedestrian_hazard = True

        is_red_tl_hazard = False
        if c_output[1][0, 0] < c_output[1][0, 1]:
            is_red_tl_hazard = True

        is_vehicle_hazard = False
        if (c_output[2][0, 0] < c_output[2][0, 1]):
            is_vehicle_hazard = True

        affordances.update({'is_pedestrian_hazard': is_pedestrian_hazard})
        affordances.update({'is_red_tl_hazard': is_red_tl_hazard})
        affordances.update({'is_vehicle_hazard': is_vehicle_hazard})
        affordances.update({'relative_angle': output_relative_angle})
        # Now we consider all target speed to be 20.0
        affordances.update({'target_speed': target_speed})

        #affordances.update({'GT_is_pedestrian_hazard': })
        #affordances.update({'GT_is_red_tl_hazard': })
        #affordances.update({'GT_is_vehicle_hazard': })
        gt_relative_angle = compute_relative_angle(
            self._vehicle, self._local_planner.get_target_waypoint())
        affordances.update({'GT_relative_angle': gt_relative_angle})
        affordances.update({
            'ERROR_relative_angle':
            output_relative_angle - gt_relative_angle
        })

        return affordances

    def make_reward(self, exp):
        # Just basically return None since the reward is not used for a non

        return None

    def step(self, affordances):
        hazard_detected = False
        is_vehicle_hazard = affordances['is_vehicle_hazard']
        is_red_tl_hazard = affordances['is_red_tl_hazard']
        is_pedestrian_hazard = affordances['is_pedestrian_hazard']
        relative_angle = affordances['relative_angle']
        target_speed = affordances['target_speed']
        # once we meet a speed limit sign, the target speed changes

        #if target_speed != self._local_planner._target_speed:
        #    self._local_planner.set_speed(target_speed)
        #forward_speed = affordances['forward_speed']

        if is_vehicle_hazard:
            self._state = AgentState.BLOCKED_BY_VEHICLE
            hazard_detected = True

        if is_red_tl_hazard:
            self._state = AgentState.BLOCKED_RED_LIGHT
            hazard_detected = True

        if is_pedestrian_hazard:
            self._state = AgentState.BLOCKED_BY_PEDESTRIAN
            hazard_detected = True

        if hazard_detected:
            control = self.emergency_stop()

        else:
            self._state = AgentState.NAVIGATING
            control = self._local_planner.run_step(relative_angle,
                                                   target_speed)

        logging.debug("Output %f %f %f " %
                      (control.steer, control.throttle, control.brake))

        return control

    def reinforce(self, rewards):
        """
        This agent cannot learn so there is no reinforce
        """
        pass

    def reset(self):
        print(" Correctly reseted the agent")
        self.route_assigned = False
        self._agent = None
        self.count = 0

    def emergency_stop(self):
        """
        Send an emergency stop command to the vehicle
        :return:
        """
        control = carla.VehicleControl()
        control.steer = 0.0
        control.throttle = 0.0
        control.brake = 1.0
        control.hand_brake = False

        return control

    def _process_sensors(self, sensor):
        sensor = sensor[:, :, 0:3]  # BGRA->BRG drop alpha channel
        sensor = sensor[g_conf.IMAGE_CUT[0]:g_conf.IMAGE_CUT[1], :, :]  # crop
        sensor = scipy.misc.imresize(sensor,
                                     (g_conf.SENSORS['rgb_central'][1],
                                      g_conf.SENSORS['rgb_central'][2]))
        self.latest_image = sensor

        sensor = np.swapaxes(sensor, 0, 1)
        sensor = np.transpose(sensor, (2, 1, 0))
        sensor = torch.from_numpy(sensor / 255.0).type(
            torch.FloatTensor).cuda()
        image_input = sensor.unsqueeze(0)
        self.latest_image_tensor = image_input

        return image_input
コード例 #5
0
 # ---  PID control  --- #
 #########################
 clock = pygame.time.Clock()           # track amount of time or to manage
 world.on_tick(hud.on_world_tick)      # calculate the server fps
 while True:
     clock.tick(15)
     current_timestamp += 1
     # client each timestamp: show the information
     hud.tick(world, vehicle, clock)
     # show the screen
     camera_manager.render(display)
     hud.render(display)
     # update the screen
     pygame.display.flip()
     # --- control PID --- #
     control = local_planner.run_step()
     if control:
         vehicle.apply_control(control)
         # --- Update live plotter with new feedback --- #
         current_pos = vehicle.get_location()
         current_speed = np.sqrt(np.square(vehicle.get_velocity().x) + np.square(vehicle.get_velocity().y))
         cmd_throttle, cmd_steer = control.throttle, control.steer
         # --- show the vehicle information --- #
         x_history.append(current_pos.x)
         y_history.append(current_pos.y)
         speed_history.append(current_speed)
         time_history.append(current_speed)
         # trajectory
         trajectory_fig.roll("trajectory", current_pos.x, current_pos.y)
         trajectory_fig.roll("car", current_pos.x, current_pos.y)
         # speed, throttle, steer