コード例 #1
0
    def test_compute(self):

        # This is is the last one, generate many cases, corner cases, to be tested.

        metrics_obj = Metrics(self._metrics_parameters, [3])

        # Lets start testing a general file, not from a real run
        # The case is basically an empty case
        poses_to_test = [[24, 32], [34, 36], [54, 67]]
        path = self._generate_test_case(poses_to_test)

        summary_dict = metrics_obj.compute(path)

        number_of_colisions_vehicles = sum_matrix(
            summary_dict['collision_vehicles'][1.0])
        number_of_colisions_general = sum_matrix(
            summary_dict['collision_other'][1.0])
        number_of_colisions_pedestrians = sum_matrix(
            summary_dict['collision_pedestrians'][1.0])
        number_of_intersection_offroad = sum_matrix(
            summary_dict['intersection_offroad'][1.0])
        number_of_intersection_otherlane = sum_matrix(
            summary_dict['intersection_otherlane'][1.0])

        self.assertEqual(number_of_colisions_vehicles, 0)
        self.assertEqual(number_of_colisions_general, 0)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 0)
        self.assertEqual(number_of_intersection_otherlane, 0)

        # Now lets make a collision test on a premade file

        path = 'test/unit_tests/test_data/testfile_collisions'

        summary_dict = metrics_obj.compute(path)

        number_of_colisions_vehicles = sum_matrix(
            summary_dict['collision_vehicles'][3.0])
        number_of_colisions_general = sum_matrix(
            summary_dict['collision_other'][3.0])
        number_of_colisions_pedestrians = sum_matrix(
            summary_dict['collision_pedestrians'][3.0])
        number_of_intersection_offroad = sum_matrix(
            summary_dict['intersection_offroad'][3.0])
        number_of_intersection_otherlane = sum_matrix(
            summary_dict['intersection_otherlane'][3.0])

        self.assertEqual(number_of_colisions_vehicles, 2)
        self.assertEqual(number_of_colisions_general, 9)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 1)
        self.assertEqual(number_of_intersection_otherlane, 3)
コード例 #2
0
ファイル: test_metrics.py プロジェクト: cyy1991/carla
    def test_compute(self):

        # This is is the last one, generate many cases, corner cases, to be tested.

        metrics_obj = Metrics(self._metrics_parameters,[3])


        # Lets start testing a general file, not from a real run
        # The case is basically an empty case
        poses_to_test = [[24, 32], [34, 36], [54, 67]]
        path = self._generate_test_case(poses_to_test)



        summary_dict = metrics_obj.compute(path)


        number_of_colisions_vehicles = sum_matrix(summary_dict['collision_vehicles'][1.0])
        number_of_colisions_general = sum_matrix(summary_dict['collision_other'][1.0])
        number_of_colisions_pedestrians = sum_matrix(summary_dict['collision_pedestrians'][1.0])
        number_of_intersection_offroad = sum_matrix(summary_dict['intersection_offroad'][1.0])
        number_of_intersection_otherlane = sum_matrix(summary_dict['intersection_otherlane'][1.0])



        self.assertEqual(number_of_colisions_vehicles, 0)
        self.assertEqual(number_of_colisions_general, 0)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 0)
        self.assertEqual(number_of_intersection_otherlane, 0)


        # Now lets make a collision test on a premade file

        path = 'test/unit_tests/test_data/testfile_collisions'

        summary_dict = metrics_obj.compute(path)

        number_of_colisions_vehicles = sum_matrix(summary_dict['collision_vehicles'][3.0])
        number_of_colisions_general = sum_matrix(summary_dict['collision_other'][3.0])
        number_of_colisions_pedestrians = sum_matrix(summary_dict['collision_pedestrians'][3.0])
        number_of_intersection_offroad = sum_matrix(summary_dict['intersection_offroad'][3.0])
        number_of_intersection_otherlane = sum_matrix(summary_dict['intersection_otherlane'][3.0])



        self.assertEqual(number_of_colisions_vehicles, 2)
        self.assertEqual(number_of_colisions_general, 9)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 1)
        self.assertEqual(number_of_intersection_otherlane, 3)
コード例 #3
0
    def test_divide_by_episodes(self):


        metrics_obj = Metrics(self._metrics_parameters,[3])

        poses_to_test = [[24, 32], [34, 36], [54, 67]]
        path = self._generate_test_case(poses_to_test)

        # We start by reading the summary header file and the measurements header file.
        with open(os.path.join(path, 'summary.csv'), "r") as f:
            header = f.readline()
            header = header.split(',')
            header[-1] = header[-1][:-2]



        with  open(os.path.join(path,'measurements.csv'), "r") as f:

            header_metrics = f.readline()
            header_metrics = header_metrics.split(',')
            header_metrics[-1] = header_metrics[-1][:-2]


        result_matrix = np.loadtxt(os.path.join(path, 'summary.csv'), delimiter=",", skiprows=1)

        # Corner Case: The presented test just had one episode
        if result_matrix.ndim == 1:
            result_matrix = np.expand_dims(result_matrix, axis=0)


        tasks = np.unique(result_matrix[:, header.index('exp_id')])


        all_weathers = np.unique(result_matrix[:, header.index('weather')])

        measurements_matrix = np.loadtxt(os.path.join(path, 'measurements.csv'), delimiter=",", skiprows=1)


        episodes = metrics_obj._divide_by_episodes(measurements_matrix,header_metrics)


        self.assertEqual(len(episodes),3)
コード例 #4
0
ファイル: test_metrics.py プロジェクト: cyy1991/carla
    def test_divide_by_episodes(self):


        metrics_obj = Metrics(self._metrics_parameters,[3])

        poses_to_test = [[24, 32], [34, 36], [54, 67]]
        path = self._generate_test_case(poses_to_test)

        # We start by reading the summary header file and the measurements header file.
        with open(os.path.join(path, 'summary.csv'), "r") as f:
            header = f.readline()
            header = header.split(',')
            header[-1] = header[-1][:-2]



        with  open(os.path.join(path,'measurements.csv'), "r") as f:

            header_metrics = f.readline()
            header_metrics = header_metrics.split(',')
            header_metrics[-1] = header_metrics[-1][:-2]


        result_matrix = np.loadtxt(os.path.join(path, 'summary.csv'), delimiter=",", skiprows=1)

        # Corner Case: The presented test just had one episode
        if result_matrix.ndim == 1:
            result_matrix = np.expand_dims(result_matrix, axis=0)


        tasks = np.unique(result_matrix[:, header.index('exp_id')])


        all_weathers = np.unique(result_matrix[:, header.index('weather')])

        measurements_matrix = np.loadtxt(os.path.join(path, 'measurements.csv'), delimiter=",", skiprows=1)


        episodes = metrics_obj._divide_by_episodes(measurements_matrix,header_metrics)


        self.assertEqual(len(episodes),3)
コード例 #5
0
    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    self._episode_number += 1
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info('Episode Number: %d', self._episode_number)
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)
コード例 #6
0
    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        data = OrderedDict()
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')
        cols = [
            'Start_position', 'End_position', 'Total-Distance', 'Time',
            "Distance-Travelled", "Follow_lane", "Straight", "Left", "Right",
            "Avg-Speed", "Collision-Pedestrian", "Collision-Vehicle",
            "Collision-Other", "Intersection-Lane", "Intersection-Offroad",
            "Traffic_Light_Infraction", "Success"
        ]
        df = pd.DataFrame(columns=cols)
        ts = str(int(time.time()))

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                data['Start_position'] = pose[0]
                data['End_position'] = pose[1]

                #print(df)

                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])
                    data["Total-Distance"] = initial_distance
                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, data) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index) , data)
                    data["Time"] = final_time
                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    data['Success'] = result
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)

                    else:
                        logging.info('----- Timeout! -----')
                #data['Start_position'] = start_index
                #data['End_position'] = end_index
                #df = df.append(data , ignore_index=True)
                #print(df)
                data["Avg-Speed"] = 3.6 * (data['Total-Distance'] /
                                           data['Time'])
                df = df.append(data, ignore_index=True)
                df = df[cols]
                try:
                    df.to_csv("Test_result_" + ts + '.csv',
                              columns=cols,
                              index=True)
                except:
                    print("File being used will save later")
            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)
コード例 #7
0
                                config.entropy_coef,
                                lr=config.lr,
                                eps=config.eps,
                                max_grad_norm=config.max_grad_norm)

    else:
        raise NotImplementedError

    if checkpoint is not None:
        load_modules(agent.optimizer, agent.model, checkpoint)

    vec_norm = get_vec_normalize(envs)
    #if vec_norm is not None:
    #    vec_norm.ob_rms = get_vec_normalize(envs).ob_rms

    metrics_object = Metrics(experiment_suite.metrics_parameters,
                             experiment_suite.dynamic_tasks)
    recording = Recording(name_to_save=args.save_dir,
                          continue_experiment=False,
                          save_images=True)

    logging.info('START')
    iter = True
    obs = None
    while iter:
        try:
            if obs is None:
                print('should be reset')
                obs = envs.reset()
            elif obs is False:
                print('end of the experiments')
                iter = False
コード例 #8
0
                           '--path',
                           metavar='P',
                           default='test',
                           help='Path to all log files')

    args = argparser.parse_args()

    log_level = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)

    logging.info('sarting the calculations %s',
                 "0")  #TODO: add time instead on zero

    experiment_suite = CoRL2017("Town01")

    metrics_object = Metrics(experiment_suite.metrics_parameters,
                             experiment_suite.dynamic_tasks)

    # Improve readability by adding a weather dictionary
    weather_name_dict = {
        1: 'Clear Noon',
        3: 'After Rain Noon',
        6: 'Heavy Rain Noon',
        8: 'Clear Sunset',
        4: 'Cloudy After Rain',
        14: 'Soft Rain Sunset'
    }

    # names for all the test logs
    pathNames = {
        0: '_Test01_CoRL2017_Town01',
        1: '_Test02_CoRL2017_Town01',
コード例 #9
0
    def test_init(self):

        # Metric should instantiate with parameters
        Metrics(self._metrics_parameters,[3])
コード例 #10
0
ファイル: driving_benchmark.py プロジェクト: cyy1991/carla
    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments()[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index], positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(experiment, rep, pose, reward_vec,
                                                               control_vec)
                    if result > 0:
                        logging.info('+++++ Target achieved in %f seconds! +++++',
                                     final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)