Example #1
0
    def test_compute(self):

        # This is is the last one, generate many cases, corner cases, to be tested.

        metrics_obj = Metrics(self._metrics_parameters, [3])

        # Lets start testing a general file, not from a real run
        # The case is basically an empty case
        poses_to_test = [[24, 32], [34, 36], [54, 67]]
        path = self._generate_test_case(poses_to_test)

        summary_dict = metrics_obj.compute(path)

        number_of_colisions_vehicles = sum_matrix(
            summary_dict['collision_vehicles'][1.0])
        number_of_colisions_general = sum_matrix(
            summary_dict['collision_other'][1.0])
        number_of_colisions_pedestrians = sum_matrix(
            summary_dict['collision_pedestrians'][1.0])
        number_of_intersection_offroad = sum_matrix(
            summary_dict['intersection_offroad'][1.0])
        number_of_intersection_otherlane = sum_matrix(
            summary_dict['intersection_otherlane'][1.0])

        self.assertEqual(number_of_colisions_vehicles, 0)
        self.assertEqual(number_of_colisions_general, 0)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 0)
        self.assertEqual(number_of_intersection_otherlane, 0)

        # Now lets make a collision test on a premade file

        path = 'test/unit_tests/test_data/testfile_collisions'

        summary_dict = metrics_obj.compute(path)

        number_of_colisions_vehicles = sum_matrix(
            summary_dict['collision_vehicles'][3.0])
        number_of_colisions_general = sum_matrix(
            summary_dict['collision_other'][3.0])
        number_of_colisions_pedestrians = sum_matrix(
            summary_dict['collision_pedestrians'][3.0])
        number_of_intersection_offroad = sum_matrix(
            summary_dict['intersection_offroad'][3.0])
        number_of_intersection_otherlane = sum_matrix(
            summary_dict['intersection_otherlane'][3.0])

        self.assertEqual(number_of_colisions_vehicles, 2)
        self.assertEqual(number_of_colisions_general, 9)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 1)
        self.assertEqual(number_of_intersection_otherlane, 3)
Example #2
0
    def test_compute(self):

        # This is is the last one, generate many cases, corner cases, to be tested.

        metrics_obj = Metrics(self._metrics_parameters,[3])


        # Lets start testing a general file, not from a real run
        # The case is basically an empty case
        poses_to_test = [[24, 32], [34, 36], [54, 67]]
        path = self._generate_test_case(poses_to_test)



        summary_dict = metrics_obj.compute(path)


        number_of_colisions_vehicles = sum_matrix(summary_dict['collision_vehicles'][1.0])
        number_of_colisions_general = sum_matrix(summary_dict['collision_other'][1.0])
        number_of_colisions_pedestrians = sum_matrix(summary_dict['collision_pedestrians'][1.0])
        number_of_intersection_offroad = sum_matrix(summary_dict['intersection_offroad'][1.0])
        number_of_intersection_otherlane = sum_matrix(summary_dict['intersection_otherlane'][1.0])



        self.assertEqual(number_of_colisions_vehicles, 0)
        self.assertEqual(number_of_colisions_general, 0)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 0)
        self.assertEqual(number_of_intersection_otherlane, 0)


        # Now lets make a collision test on a premade file

        path = 'test/unit_tests/test_data/testfile_collisions'

        summary_dict = metrics_obj.compute(path)

        number_of_colisions_vehicles = sum_matrix(summary_dict['collision_vehicles'][3.0])
        number_of_colisions_general = sum_matrix(summary_dict['collision_other'][3.0])
        number_of_colisions_pedestrians = sum_matrix(summary_dict['collision_pedestrians'][3.0])
        number_of_intersection_offroad = sum_matrix(summary_dict['intersection_offroad'][3.0])
        number_of_intersection_otherlane = sum_matrix(summary_dict['intersection_otherlane'][3.0])



        self.assertEqual(number_of_colisions_vehicles, 2)
        self.assertEqual(number_of_colisions_general, 9)
        self.assertEqual(number_of_colisions_pedestrians, 0)
        self.assertEqual(number_of_intersection_offroad, 1)
        self.assertEqual(number_of_intersection_otherlane, 3)
    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    self._episode_number += 1
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info('Episode Number: %d', self._episode_number)
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)
Example #4
0
    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        data = OrderedDict()
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')
        cols = [
            'Start_position', 'End_position', 'Total-Distance', 'Time',
            "Distance-Travelled", "Follow_lane", "Straight", "Left", "Right",
            "Avg-Speed", "Collision-Pedestrian", "Collision-Vehicle",
            "Collision-Other", "Intersection-Lane", "Intersection-Offroad",
            "Traffic_Light_Infraction", "Success"
        ]
        df = pd.DataFrame(columns=cols)
        ts = str(int(time.time()))

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                data['Start_position'] = pose[0]
                data['End_position'] = pose[1]

                #print(df)

                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])
                    data["Total-Distance"] = initial_distance
                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, data) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index) , data)
                    data["Time"] = final_time
                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    data['Success'] = result
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)

                    else:
                        logging.info('----- Timeout! -----')
                #data['Start_position'] = start_index
                #data['End_position'] = end_index
                #df = df.append(data , ignore_index=True)
                #print(df)
                data["Avg-Speed"] = 3.6 * (data['Total-Distance'] /
                                           data['Time'])
                df = df.append(data, ignore_index=True)
                df = df[cols]
                try:
                    df.to_csv("Test_result_" + ts + '.csv',
                              columns=cols,
                              index=True)
                except:
                    print("File being used will save later")
            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)
Example #5
0
                else:
                    logging.info('----- Collision! -----')

            # Write the details of this episode.
            print('end of the while loop')
            recording.write_measurements_results(experiments[exp_idx], rep,
                                                 pose, reward_vec, control_vec)

            print('experiment ended')
            recording.log_end()
        except:
            break
    recording.write_measurements_results(experiments[exp_idx], rep, pose,
                                         reward_vec, control_vec)

    benchmark_summary = metrics_object.compute(recording.path)

    print("")
    print("")
    print(
        "----- Printing results for training weathers (Seen in Training) -----"
    )
    print("")
    print("")
    print_summary(benchmark_summary, experiment_suite.train_weathers,
                  recording.path)

    print("")
    print("")
    print(
        "----- Printing results for test weathers (Unseen in Training) -----")
Example #6
0
        # This will make life easier for calculating std
        dataListTable1 = [[] for i in range(len(tasksSuccessRate))]
        dataListTable2 = [[[] for i in range(len(tasksSuccessRate))]
                          for i in range(len(tasksInfractions))]
        dataListTable3 = [[[] for i in range(len(tasksSuccessRate))]
                          for i in range(len(tasksInfractions))]
        #logging.debug("Data list table 2 init: %s",dataListTable2)

        # calculate metrics : episodes_fully_completed
        for p in allPath:
            if addSlashFlag == True:
                path = args.path + '/' + extractedPath + p
            else:
                path = args.path + extractedPath + p
            metrics_summary = metrics_object.compute(path)
            number_of_tasks = len(
                list(metrics_summary[metrics_to_average[0]].items())[0][1])
            values = metrics_summary[
                metrics_to_average[0]]  # episodes_fully_completed

            if (table1Flag):
                logging.debug("Working on table 1 ...")
                metric_sum_values = np.zeros(number_of_tasks)
                for w, tasks in values.items():
                    if w in set(weathers):
                        count = 0
                        for tIdx, t in enumerate(tasks):
                            #print(weathers[tIdx]) #float(sum(t)) / float(len(t)))
                            metric_sum_values[count] += (float(sum(t)) / float(
                                len(t))) * 1.0 / float(len(weathers))
Example #7
0
    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments()[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index], positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(experiment, rep, pose, reward_vec,
                                                               control_vec)
                    if result > 0:
                        logging.info('+++++ Target achieved in %f seconds! +++++',
                                     final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)