Esempio n. 1
0
def scenarios_detector_comparison(output_dir):
    """Compare different scenarios."""
    data_parser = simulation_data_parser.SimulationDataParser()
    visualizer = map_visualizer.MapVisualizer()

    fig = pylab.figure(figsize=(8, 6))
    ax = fig.add_subplot(111)

    load_input = file_util.load_variable(
        'Paradise_reverse_roads/demands/demands_taz_tuple.pkl')
    load_input = sorted(load_input)
    demand_time_line, _, demand_car_count = list(zip(*load_input))
    cumulative_values = np.cumsum(demand_car_count)
    pylab.plt.plot(np.array(demand_time_line) / 3600, cumulative_values)
    # print(cumulative_values[-1])
    # print(np.sum(demand_car_count))
    # visualizer.add_pertentage_interception_lines(
    #     np.array(demand_time_line) / 3600, demand_car_count, [0.5, .9, .95])

    detector_trajectory_folder = 'Paradise_reverse_roads/output/detector/detector_trajectory/'
    (time_line,
     arrival_car_count) = file_util.load_variable(detector_trajectory_folder +
                                                  'all_arrival_flow.pkl')
    cumulative_values = np.cumsum(arrival_car_count)
    print(cumulative_values[-1])
    pylab.plt.plot(time_line, cumulative_values)
    visualizer.add_pertentage_interception_lines(time_line, arrival_car_count,
                                                 [0.5, .9, .95])

    detector_trajectory_folder = 'Paradise_auto_routing/output/detector/detector_trajectory/'
    (time_line,
     arrival_car_count) = file_util.load_variable(detector_trajectory_folder +
                                                  'all_arrival_flow.pkl')
    cumulative_values = np.cumsum(arrival_car_count)
    print(cumulative_values[-1])
    pylab.plt.plot(time_line / 3600, cumulative_values)
    # visualizer.add_pertentage_interception_lines(
    #     time_line, arrival_car_count, [0.5, .9, .95])

    detector_trajectory_folder = 'Paradise_2s_baseline/output/detector/detector_trajectory/'
    (time_line,
     arrival_car_count) = file_util.load_variable(detector_trajectory_folder +
                                                  'all_arrival_flow.pkl')
    cumulative_values = np.cumsum(arrival_car_count)
    print(cumulative_values[-1])
    pylab.plt.plot(time_line, cumulative_values)

    pylab.plt.xlabel('Time [h]')
    pylab.plt.ylabel('Cummulative vehicles')
    ax.autoscale_view(True, True, True)
    pylab.savefig(os.path.join(output_dir, 'scenarios_arrival_comparison.pdf'))
Esempio n. 2
0
  def visualize_fcd_on_map(self):
    """Plot metric maps.

    Pay attention to the map.
    """
    net = sumolib.net.readNet(self._sumo_net_file)
    visualizer = map_visualizer.MapVisualizer(net)
    plot_edges = net.getEdges()

    trajectory_folder = os.path.join(self._output_dir, 'trajectory/')
    output_folder = os.path.join(trajectory_folder, 'trajectory_fig/')
    if not file_util.f_exists(output_folder):
      file_util.f_mkdir(output_folder)

    trajectory_file_list = os.listdir(trajectory_folder)
    # trajectory_file_list = [
    #     'edge_id_to_trajectory_9000_10800.pkl']

    for trajectory_file in trajectory_file_list:
      if not trajectory_file.endswith('.pkl'):
        continue
      trajectory_pkl_file = os.path.join(trajectory_folder, trajectory_file)
      print('Loading file: ', trajectory_pkl_file)
      edge_id_to_trajectory = file_util.load_variable(trajectory_pkl_file)
      print('Time range: ', edge_id_to_trajectory['time_interval'])
      output_figure_path = (output_folder + 'speed_map_%s_%s.pdf' %
                            (int(edge_id_to_trajectory['time_interval'][0]),
                             int(edge_id_to_trajectory['time_interval'][1])))

      visualizer.plot_edge_trajectory_histogram_on_map(
          plot_edges,
          edge_id_to_trajectory,
          output_figure_path=output_figure_path,
          plot_max_speed=13.4112)
    def plot_detector_flow_density_by_group(cls,
                                            detector_pkl_files_by_group,
                                            figure_labels=None,
                                            ylim=None,
                                            output_figure_folder=None):
        """Plots detectors' data by group.

    Args:
      detector_pkl_files_by_group:
      figure_labels:
      ylim:
      output_figure_folder:
    """
        for group_id, pkl_files in enumerate(detector_pkl_files_by_group):
            vehicle_count_series = 0
            for pkl_file in pkl_files:
                detector_data = file_util.load_variable(pkl_file)
                vehicle_count_series += np.array(detector_data['nVehContrib'])
            time_line = np.array(detector_data['begin']) / 3600
            figure_label = (figure_labels[group_id]
                            if figure_labels else 'Group_' + str(group_id))
            output_figure = (figure_labels[group_id] +
                             '_car_count.pdf' if figure_labels else 'Group_' +
                             str(group_id) + '.pdf')
            logging.info('Saved figure: %s.', output_figure)
            print('Saved figure: ', output_figure)
            cls.plot_detector_flow_density(
                time_line,
                vehicle_count_series,
                figure_label=figure_label,
                ylim=ylim,
                output_figure_path=os.path.join(output_figure_folder +
                                                output_figure))
Esempio n. 4
0
    def _analyze_summary_demands_vs_evacuation(self,
                                               demand_file,
                                               summary_file,
                                               output_dir=None):
        """Plot summary vs demands."""
        data_parser = simulation_data_parser.SimulationDataParser()
        visualizer = map_visualizer.MapVisualizer()

        demands = file_util.load_variable(demand_file)
        sorted_demands = sorted(demands, key=lambda x: x.time)
        demand_time_line = [x.time for x in sorted_demands]
        demand_time_line = np.array(demand_time_line) / 3600
        demand_car_count = [x.num_cars for x in sorted_demands]
        demand_cumulative_values = (np.cumsum(demand_car_count) /
                                    sum(demand_car_count))

        summary = data_parser.parse_summary_file(summary_file)
        summary_time_line = np.array(summary['time']) / 3600
        summary_cumulative_values = (np.array(summary['ended']) /
                                     sum(demand_car_count))

        # Calculate the gap between them.
        gap_area = visualizer.calculate_gap_area_between_cummulative_curves(
            demand_time_line, demand_cumulative_values, summary_time_line,
            summary_cumulative_values)

        if not output_dir:
            return (demand_time_line, demand_cumulative_values,
                    summary_time_line, summary_cumulative_values, gap_area)

        # Plot demands v.s. evacuation.
        fig = pylab.figure(figsize=(8, 6))
        ax = fig.add_subplot(111)
        pylab.plt.plot(demand_time_line,
                       demand_cumulative_values,
                       label='Demands')
        pylab.plt.plot(summary_time_line,
                       summary_cumulative_values,
                       label='Evacuation')
        visualizer.add_pertentage_interception_lines(
            summary_time_line, summary_cumulative_values, [0.5, .9, .95])
        pylab.plt.xlabel('Time [h]')
        pylab.plt.ylabel('Cummulative percentage of total vehicles')
        pylab.plt.legend()
        ax.autoscale_view(True, True, True)
        output_figure_path = os.path.join(output_dir, 'evacuation_curve.pdf')
        pylab.savefig(output_figure_path)

        return (demand_time_line, demand_cumulative_values, summary_time_line,
                summary_cumulative_values, gap_area)
    def test_get_save_detector_data(self):
        detector_file = 'detector.xml'
        output_file = os.path.join(self._output_dir, 'detector.pkl')
        detector_file_path = _load_file(self._testdata_dir, detector_file)
        self._data_parser.get_and_save_detector_data(detector_file_path,
                                                     output_file)

        actual_dictionary = file_util.load_variable(output_file)
        print(actual_dictionary)
        self.assertListEqual(actual_dictionary['nVehEntered'],
                             [0.0, 0.0, 0.0, 12.0, 7.0, 14.0, 11.0])
        self.assertListEqual(actual_dictionary['speed'],
                             [-1.0, -1.0, -1.0, 22.2, 22.99, 19.39, 22.68])
        self.assertListEqual(actual_dictionary['id'],
                             ['e1Detector_10293408#4_0_2'] * 7)
Esempio n. 6
0
    def compare_demands_difference(self):
        """Compared the differences between demands and evacuations."""
        x = file_util.load_variable(
            'demands/demands_shortest_path_tuple_std_1.5.pkl')
        y = file_util.load_variable('demands/demands_taz_tuple_std_1.5.pkl')

        # x = [(a.origin, a.num_cars) for a in x]
        # y = [(a.origin, a.num_cars) for a in y]
        x_cars = [a.num_cars for a in x]
        y_cars = [a.num_cars for a in y]
        print(sum(x_cars), sum(y_cars))
        x = [(a.origin, a.num_cars, a.time) for a in x]
        y = [(a.origin, a.num_cars, a.time) for a in y]
        x = set(x)
        y = set(y)
        print(len(x), len(y))
        print(x.issubset(y))

        common = x.intersection(y)
        print(len(common))
        x_ = x.difference(common)
        y_ = y.difference(common)
        print(x_)
        print(y_)
Esempio n. 7
0
def scenarios_summary_comparison(output_dir):
    """Compare different scenarios."""
    data_parser = simulation_data_parser.SimulationDataParser()
    visualizer = map_visualizer.MapVisualizer()

    fig = pylab.figure(figsize=(8, 6))
    ax = fig.add_subplot(111)

    demands = file_util.load_variable(
        'MillValley_template/demands/demands_taz_tuple_std_0.5_portion_1.pkl')
    sorted_demands = sorted(demands, key=lambda x: x.time)
    demand_time_line = [x.time for x in sorted_demands]
    demand_car_count = [x.num_cars for x in sorted_demands]

    cumulative_values = np.cumsum(demand_car_count) / sum(demand_car_count)
    pylab.plt.plot(np.array(demand_time_line) / 3600,
                   cumulative_values,
                   label='Demands')

    summary = data_parser.parse_summary_file(
        'MillValley_RevRd_noTFL/output_std_0.5_portion_1/summary.xml')
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line, cumulative_values, label='New scenario')

    summary = data_parser.parse_summary_file(
        'MillValley_auto_routing_baseline/output_std_0.5_portion_1/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line, cumulative_values, label='Baseline auto-routing')

    summary = data_parser.parse_summary_file(
        'MillValley_shortest_path_baseline/output_std_0.5_portion_1/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line, cumulative_values, label='Baseline fixed path')
    visualizer.add_pertentage_interception_lines(time_line, cumulative_values,
                                                 [0.5, .9, .95])

    pylab.plt.xlabel('Time [h]')
    pylab.plt.ylabel('Cummulative vehicles')
    ax.autoscale_view(True, True, True)
    # pylab.plt.xlim(0, 8)
    pylab.plt.legend(loc='lower right')
    pylab.savefig(
        os.path.join(output_dir, 'MV_evacuation_curve_std_0.5_comparison.pdf'))
    def plot_individual_detector(cls,
                                 detector_trajectory_folder=None,
                                 output_figure_folder=None):
        """Plot individual detector flow density.

    It is highly recommanded to set all detectors with the same time bin width,
    so that the data can be processed with the same script.
    `detector_trajectory_folder` has the *.pkl files processed by function
    `get_save_detector_data` from
    `research/simulation/traffic:simulation_data_parser`. Since this function
    will process all *.pkl files in the folder, irrelavant files should not be
    put in the folder. There are two types of vehicle counts in the detector
    output xml file, `nVehContrib` and `nVehEntered`. `nVehContrib` is the
    number of vehicles that have completely passed the detector within the
    interval. `nVehEntered` All vehicles that have touched the detector.
    Includes vehicles which have not passed the detector completely (and which
    do not contribute to collected values). This function uses the first one to
    avoide repeated counts for a same cars.

    Args:
      detector_trajectory_folder:
      output_figure_folder:
    """
        detector_pkl_file_list = os.listdir(detector_trajectory_folder)
        for trajectory_file in detector_pkl_file_list:
            if not trajectory_file.endswith('.pkl'):
                continue
            print('Plotting file: ', trajectory_file)
            detector_data = file_util.load_variable(
                detector_trajectory_folder + trajectory_file)
            detector_name = os.path.splitext(trajectory_file)[0]
            output_figure = detector_name + '.pdf'
            fig = pylab.figure(figsize=(8, 6))
            fig.add_subplot(111)
            pylab.plt.plot(
                np.array(detector_data['begin']) / 3600,
                np.array(detector_data['nVehContrib']))
            pylab.plt.xlabel('Time [h]')
            pylab.plt.ylabel('Traffic flow [number of cars / min]')
            pylab.plt.title(detector_name)
            if output_figure_folder is not None:
                logging.info('Save figure to %s.',
                             output_figure_folder + output_figure)
                pylab.savefig(output_figure_folder + output_figure)
            pylab.plt.close()
 def plot_detector_arrival_time_by_group(cls, detector_pkl_files,
                                         output_figure_folder):
     """Plot the data of all detectors."""
     vehicle_count_series = 0
     for pkl_file in detector_pkl_files:
         detector_data = file_util.load_variable(pkl_file)
         vehicle_count_series += np.array(detector_data['nVehContrib'])
     time_line = np.array(detector_data['begin']) / 3600
     cls.plot_detector_arrival_time(
         time_line,
         vehicle_count_series,
         evacuation_density_figure_path=(output_figure_folder +
                                         'detector_evacuation_density.pdf'),
         evacuation_cumulative_figure_path=(
             output_figure_folder + 'detector_evacuation_cumulative.pdf'))
     logging.info('Totoal number of cars: %s.',
                  np.sum(vehicle_count_series))
     print('Totoal number of cars: ', np.sum(vehicle_count_series))
    def test_save_batch_edge_id_to_trajectory(self):
        # Case 1: Extract and save the data into batches.
        fcd_file = 'freeway_sparse.fcd.xml'
        fcd_file_path = _load_file(self._testdata_dir, fcd_file)
        time_segment_length = 3
        time_range = [0, 10]
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There are 3 output files.
        actual_output_1 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_0_3.pkl')
        actual_output_2 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_3_6.pkl')
        actual_output_3 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_6_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output_1))
        self.assertTrue(file_util.f_exists(actual_output_2))
        self.assertTrue(file_util.f_exists(actual_output_3))

        actual_dictionary = file_util.load_variable(actual_output_1)
        self.assertListEqual(
            actual_dictionary['27628577#0']['time'],
            [0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0])
        actual_dictionary = file_util.load_variable(actual_output_2)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0,
            6.0, 6.0, 6.0, 6.0, 6.0
        ])
        actual_dictionary = file_util.load_variable(actual_output_3)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
            9.0
        ])

        # Case 2: Extract and save the file as a single batch.
        time_segment_length = None
        time_range = [0, 20]
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There is 1 output file.
        actual_output = os.path.join(self._output_dir,
                                     'edge_id_to_trajectory_0_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output))
        actual_dictionary = file_util.load_variable(actual_output)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
            4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0,
            6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
            9.0, 9.0, 9.0
        ])

        # Case 3: Extract and save the file as a single batch without specifying the
        # `time_range`. By default, it will use the smallest and largest time point
        # as its lower and upper bound.
        time_segment_length = None
        time_range = None
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There is 1 output file.
        actual_output = os.path.join(self._output_dir,
                                     'edge_id_to_trajectory_0_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output))
        actual_dictionary = file_util.load_variable(actual_output)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
            4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0,
            6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
            9.0, 9.0, 9.0
        ])

        # Case 4: Extract and save the file as a single batch without specifying the
        # `time_range` but with the `time_segment_length`. By default, it will use
        # the smallest and largest time point as its lower and upper bound
        # respectively.
        time_segment_length = 4
        time_range = None
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There are 3 output files.
        actual_output_1 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_0_4.pkl')
        actual_output_2 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_4_8.pkl')
        actual_output_3 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_8_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output_1))
        self.assertTrue(file_util.f_exists(actual_output_2))
        self.assertTrue(file_util.f_exists(actual_output_3))
        actual_dictionary = file_util.load_variable(actual_output_1)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
            4.0, 4.0
        ])
        actual_dictionary = file_util.load_variable(actual_output_2)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
            7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0
        ])
        actual_dictionary = file_util.load_variable(actual_output_3)
        self.assertListEqual(
            actual_dictionary['27628577#0']['time'],
            [9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0])
def scenarios_summary_comparison(output_dir):
    """Compare different scenarios."""
    data_parser = simulation_data_parser.SimulationDataParser()
    visualizer = map_visualizer.MapVisualizer()

    fig = pylab.figure(figsize=(8, 6))
    ax = fig.add_subplot(111)

    demands = file_util.load_variable(
        'Paradise_template/demands/demands_taz_tuple_std_0.7.pkl')
    sorted_demands = sorted(demands, key=lambda x: x.time)
    demand_time_line = [x.time for x in sorted_demands]
    demand_car_count = [x.num_cars for x in sorted_demands]

    cumulative_values = np.cumsum(demand_car_count) / sum(demand_car_count)
    pylab.plt.plot(np.array(demand_time_line) / 3600,
                   cumulative_values,
                   ':',
                   label='Demands',
                   color='black')

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL/output_std_0.7/summary.xml')
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line, cumulative_values, '--', label='No block')

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL_road_blocker/output_std_0.7_road_block_21600/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line,
                   cumulative_values,
                   label='t=5 h',
                   color=pylab.plt.cm.jet(1 / 6))

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL_road_blocker/output_std_0.7_road_block_18000/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line,
                   cumulative_values,
                   label='t=4 h',
                   color=pylab.plt.cm.jet(2 / 6))

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL_road_blocker/output_std_0.7_road_block_10800/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line,
                   cumulative_values,
                   label='t=3 h',
                   color=pylab.plt.cm.jet(3 / 6))

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL_road_blocker/output_std_0.7_road_block_7200/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line,
                   cumulative_values,
                   label='t=2 h',
                   color=pylab.plt.cm.jet(4 / 6))

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL_road_blocker/output_std_0.7_road_block_3600/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line,
                   cumulative_values,
                   label='t=1 h',
                   color=pylab.plt.cm.jet(5 / 6))

    summary = data_parser.parse_summary_file(
        'Paradise_RevRd_noTFL_road_blocker/output_std_0.7_road_block_0/summary.xml'
    )
    time_line = np.array(summary['time']) / 3600
    cumulative_values = np.array(summary['ended']) / sum(demand_car_count)
    pylab.plt.plot(time_line,
                   cumulative_values,
                   label='t=0 h',
                   color=pylab.plt.cm.jet(0.95))
    # visualizer.add_pertentage_interception_lines(
    #     time_line, cumulative_values, [0.5, .9, .95])

    pylab.plt.xlabel('Time [h]')
    pylab.plt.ylabel('Cummulative vehicles')
    ax.autoscale_view(True, True, True)
    pylab.plt.xlim(1, 6)
    pylab.plt.legend(loc='lower right')
    pylab.savefig(
        os.path.join(output_dir,
                     'evacuation_curve_std_0.7_road_block_comparison.pdf'))
Esempio n. 12
0
    def test_save_load_variable(self):
        file_path = os.path.join(self._output_dir, 'test_output_data.pkl')

        # Case 1: Nested dictionary.
        data = {'zz': 1, 'b': 234, 123: 'asdfa', 'dict': {'a': 123, 't': 123}}
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        self.assertEqual(data, actual_variable)
        self.assertIsInstance(actual_variable, dict)

        # Case 2: 2-level nested dictionary.
        data = collections.defaultdict(lambda: collections.defaultdict(list))
        data['first']['A'] = [1, 2, 3]
        data['first']['B'] = [1, 2, 3]
        data['second']['B'] = [1, 2, 3]
        data['second']['C'] = [1, 2, 3]
        data['third']['C'] = [1, 2, 3]
        data['third']['D'] = [1, 2, 3]
        data['path'] = 'asdfas/asdf/asdfasdf/'
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        self.assertEqual(data, actual_variable)
        self.assertIsInstance(actual_variable, dict)

        # Case 3: Large array. If the size is too large, the test will timeout.
        data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10000
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        self.assertListEqual(data, actual_variable)
        self.assertIsInstance(actual_variable, list)

        # Case 4: numpy array.
        data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10
        data = np.array(data)
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        np.testing.assert_array_equal(data, actual_variable)
        self.assertIsInstance(actual_variable, np.ndarray)

        # Case 5: A list of tuples.
        x = [1, 2, 3]
        y = ['a', 'b', 'c']
        data = zip(x, y)
        # Saving zip variable does not affect the iterative variable.
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        # python2 treats `actual_variable` as a list, however, python3 treats it as
        # an iterative object.
        self.assertListEqual(list(actual_variable), list(data))

        # Case 6: In python2, the itertools.tee cannot be saved by cPickle. However,
        # in python3, it can be saved.
        x = [1, 2, 3]
        y = ['a', 'b', 'c']
        data = zip(x, y)
        data_tee, _ = itertools.tee(data)
        python_version = sys.version_info[0]
        try:
            file_util.save_variable(file_path, data_tee)
            pickle_save_correctly = True
        except cPickle.PicklingError:
            pickle_save_correctly = False
        self.assertTrue((pickle_save_correctly and python_version == 3)
                        or (not pickle_save_correctly and python_version == 2))