def get_and_save_detector_data(cls, detector_file, output_file=None):
     """Gets and saves detector recordings."""
     detector_data = collections.defaultdict(list)
     for interval in sumolib.xml.parse_fast(detector_file, 'interval',
                                            _DEFAULT_DETECTOR_FILE_ENTRIES):
         for attribute in _DEFAULT_DETECTOR_FILE_ENTRIES:
             attribute_item = operator.attrgetter(attribute)(interval)
             # If the item is a number, then convert it to the number. Otherwise,
             # just keep it as a string.
             try:
                 attribute_item = float(attribute_item)
             except ValueError:
                 pass
             detector_data[attribute].append(attribute_item)
     if output_file is not None:
         file_util.save_variable(output_file, detector_data)
    def save_batch_edge_id_to_trajectory(cls,
                                         fcd_file,
                                         edge_id_list,
                                         time_range=None,
                                         time_segment_length=None,
                                         parse_time_step=10,
                                         output_folder=None):
        """Extracts and saves the information from the FCD file by edges.

    Args:
      fcd_file: input simulation report fcd file.
      edge_id_list: if this is not specified, the function will collect
      information for all edges.
      time_range: A time interval. If `time_range` is None, it will be set as
          [None, inf]. If the lower bound is set as None, it will be set as the
          smallest time point. It can also be set as something like [None, 3600]
          with specific upper bound, but None lower bound. So it will
          automatically find the smallest time point. If the upper bound is set
          as inf, then the file will be scanned to the end.
      time_segment_length: The step size of the time segment. By default it is
          infinity.
      parse_time_step: The code only reads time slot which is multiple of the
          `parse_time_step`.
      output_folder:
    """
        if isinstance(edge_id_list, str):
            edge_id_list = [edge_id_list]
        for i, edge in enumerate(edge_id_list):
            if isinstance(edge, str) or isinstance(edge, six.text_type):
                continue
            elif isinstance(edge, sumolib.net.edge.Edge):
                edge_id_list[i] = edge.getID()
            else:
                raise TypeError(
                    'Edge list can be either string, or Edge class.')

        if time_segment_length is None:
            time_segment_length = float('inf')
        # If `time_range` is set as None, it will automatically fit the range, and
        # the range is not known yet. The lower bound is acquired after getting into
        # the for loop.
        if time_range is None:
            time_range = [None, float('inf')]
        else:
            current_interval = [
                time_range[0], time_range[0] + time_segment_length
            ]
        edge_id_to_trajectory = collections.defaultdict(
            lambda: collections.defaultdict(list))
        timestamp = 0.
        for timestep, vehicle in sumolib.xml.parse_fast_nested(
                fcd_file, 'timestep', ['time'], 'vehicle',
                _DEFAULT_FCD_FILE_ENTRIES):
            # Read time point which is only the multiple of `parse_time_step`.
            # If the read time point has not reached the `time_range`, skip.
            timestamp = float(timestep.time)
            if time_range[0] is None:  # Automatically adjust the lower bound.
                time_range[0] = timestamp
                current_interval = [timestamp, timestamp + time_segment_length]
            if (int(timestamp % parse_time_step) != 0
                    or timestamp < time_range[0]):
                continue
            # Quit after reaching the end of `time_range`.
            if timestamp > time_range[1]:
                break
            # Move on to a new time segment, then reset the `current_interval` and
            # `edge_id_to_trajectory` for the new segment.
            if timestamp > current_interval[1]:
                logging.info('Currently reading at time: %s', timestamp)
                # Save the current data.
                edge_id_to_trajectory['time_interval'] = current_interval
                file_name = (
                    'edge_id_to_trajectory_%s_%s.pkl' %
                    (int(current_interval[0]), int(current_interval[1])))
                file_path = os.path.join(output_folder, file_name)
                logging.info('Saving file: %s', file_path)
                file_util.save_variable(file_path, edge_id_to_trajectory)
                # Set the new interval.
                new_interval_left_end = current_interval[
                    0] + time_segment_length
                if time_range[
                        1] < current_interval[0] + time_segment_length * 2:
                    new_interval_right_end = time_range[1]
                else:
                    new_interval_right_end = current_interval[
                        0] + time_segment_length * 2
                current_interval = [
                    new_interval_left_end, new_interval_right_end
                ]
                logging.info('New interval: %s', current_interval)
                # Clear the `edge_id_to_trajectory` for the new time segment.
                edge_id_to_trajectory = collections.defaultdict(
                    lambda: collections.defaultdict(list))
            # Load each time point data.
            edge_id = vehicle.lane.split('_')[0]
            if edge_id not in edge_id_list:
                continue
            edge_id_to_trajectory[edge_id]['time'].append(timestamp)
            edge_id_to_trajectory[edge_id]['vehicle'].append(vehicle.id)
            edge_id_to_trajectory[edge_id]['speed'].append(float(
                vehicle.speed))
            edge_id_to_trajectory[edge_id]['pos'].append(float(vehicle.pos))
            edge_id_to_trajectory[edge_id]['x'].append(float(vehicle.x))
            edge_id_to_trajectory[edge_id]['y'].append(float(vehicle.y))
            edge_id_to_trajectory[edge_id]['angle'].append(float(
                vehicle.angle))

        # Save the last interval's data.
        if edge_id_to_trajectory:
            current_interval[
                1] = timestamp  # The last time point is the upper bound.
            edge_id_to_trajectory['time_interval'] = current_interval
            file_name = ('edge_id_to_trajectory_%s_%s.pkl' %
                         (int(current_interval[0]), int(current_interval[1])))
            file_path = os.path.join(output_folder, file_name)
            logging.info('Saving file: %s', file_path)
            file_util.save_variable(file_path, edge_id_to_trajectory)
        else:
            logging.warning('Empty output variable.')
Ejemplo n.º 3
0
    def generate_evacuation_shortest_path_demands(
            self, residential_car_density, serving_car_density,
            evacuation_edges, demand_mean_hours, demand_stddev_hours,
            population_portion):
        """Generates evacuation demands."""
        net = sumolib.net.readNet(self._sumo_net_file)
        traffic_generator = random_traffic_generator.RandomTrafficGenerator(
            net)
        visualizer = map_visualizer.MapVisualizer(net)

        print('Generating TAZ demands with STD: ', demand_stddev_hours,
              ' Portion: ', population_portion)

        # Calculate the distance to the evacuation exits.
        evacuation_path_trees = {}
        evacuation_path_length = {}
        for exit_edge in evacuation_edges:
            evacuation_path_trees[exit_edge], evacuation_path_length[
                exit_edge] = (
                    net.getRestrictedShortestPathsTreeToEdge(exit_edge))

        # Demands from residential roads.
        residential_edge_type = ['highway.residential']
        residential_edges = net.filterEdges(residential_edge_type)
        demand_mean_seconds = demand_mean_hours * 60 * 60
        demand_stddev_seconds = demand_stddev_hours * 60 * 60
        time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(
            demand_mean_seconds, demand_stddev_seconds)
        car_per_meter_residential = residential_car_density * population_portion

        np.random.seed(FLAGS.random_seed)
        residential = traffic_generator.create_evacuation_shortest_path_demands(
            residential_edges, time_sampler_parameters,
            car_per_meter_residential, evacuation_edges, evacuation_path_trees,
            evacuation_path_length)

        # Demands from parking roads.
        parking_edge_type = ['highway.service']
        parking_edges = net.filterEdges(parking_edge_type)
        time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(
            demand_mean_seconds, demand_stddev_seconds)
        car_per_meter_parking = serving_car_density * population_portion

        parking = traffic_generator.create_evacuation_shortest_path_demands(
            parking_edges, time_sampler_parameters, car_per_meter_parking,
            evacuation_edges, evacuation_path_trees, evacuation_path_length)

        all_demands = residential + parking
        departure_time_points = [x.time for x in all_demands]
        cars_per_time_point = [x.num_cars for x in all_demands]
        departure_time_points = np.array(departure_time_points) / 3600
        print('Shortest path demands. Total vehicles: ',
              sum(cars_per_time_point))

        # Output the demand xml file.
        demands_dir = os.path.join(self._output_dir, _DEMANDS)
        file_util.f_makedirs(demands_dir)
        output_hist_figure_path = os.path.join(
            demands_dir,
            'departure_time_histogram_shortest_path_std_%s_portion_%s.pdf' %
            (demand_stddev_hours, population_portion))
        output_cumulative_figure_path = os.path.join(
            demands_dir,
            'departure_time_cumulative_shortest_path_std_%s_portion_%s.pdf' %
            (demand_stddev_hours, population_portion))
        pkl_file = os.path.join(
            demands_dir, 'demands_shortest_path_tuple_std_%s_portion_%s.pkl' %
            (demand_stddev_hours, population_portion))
        routes_file = os.path.join(
            demands_dir, 'demands_shortest_path_std_%s_portion_%s.rou.xml' %
            (demand_stddev_hours, population_portion))

        visualizer.plot_demands_departure_time(
            departure_time_points,
            cars_per_time_point,
            output_hist_figure_path=output_hist_figure_path,
            output_cumulative_figure_path=output_cumulative_figure_path)
        file_util.save_variable(pkl_file, all_demands)
        traffic_generator.write_evacuation_vehicle_path_demands(
            all_demands, routes_file)
Ejemplo n.º 4
0
    def generate_evacuation_taz_demands(self, residential_car_density,
                                        serving_car_density, demand_mean_hours,
                                        demand_stddev_hours,
                                        population_portion):
        """Generates evacuation TAZ demands."""

        # TODO(yusef): Fix map + total number of cars.
        # To make the demands consistent, use the default map, paradise_type.net.xml
        # as the input map instead of the reversed. For Paradise map, an easy way to
        # check is that the total number of cars is 11072.
        net = sumolib.net.readNet(self._sumo_net_file)
        traffic_generator = random_traffic_generator.RandomTrafficGenerator(
            net)
        visualizer = map_visualizer.MapVisualizer(net)

        print('Generating TAZ demands with STD: ', demand_stddev_hours,
              ' Portion: ', population_portion)

        # Demands from residential roads.
        residential_edge_type = ['highway.residential']
        residential_edges = net.filterEdges(residential_edge_type)
        demand_mean_seconds = demand_mean_hours * 60 * 60
        demand_stddev_seconds = demand_stddev_hours * 60 * 60
        time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(
            demand_mean_seconds, demand_stddev_seconds)
        car_per_meter_residential = residential_car_density * population_portion

        np.random.seed(FLAGS.random_seed)
        residential = traffic_generator.create_evacuation_auto_routing_demands(
            residential_edges, time_sampler_parameters,
            car_per_meter_residential)

        # Demands from parking roads.
        parking_edge_type = ['highway.service']
        parking_edges = net.filterEdges(parking_edge_type)
        time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(
            demand_mean_seconds, demand_stddev_seconds)
        car_per_meter_parking = serving_car_density * population_portion

        parking = traffic_generator.create_evacuation_auto_routing_demands(
            parking_edges, time_sampler_parameters, car_per_meter_parking)

        all_demands = residential + parking
        departure_time_points = [x.time for x in all_demands]
        cars_per_time_point = [x.num_cars for x in all_demands]
        departure_time_points = np.array(departure_time_points) / 3600
        print('TAZ demands. Total vehicles: ', sum(cars_per_time_point))

        # TODO(yusef): reconcile.
        demands_dir = os.path.join(self._output_dir, _DEMANDS)
        file_util.f_makedirs(demands_dir)
        output_hist_figure_path = os.path.join(
            demands_dir, 'departure_time_histogram_taz_std_%s_portion_%s.pdf' %
            (demand_stddev_hours, population_portion))
        output_cumulative_figure_path = os.path.join(
            demands_dir,
            'departure_time_cumulative_taz_std_%s_portion_%s.pdf' %
            (demand_stddev_hours, population_portion))
        pkl_file = os.path.join(
            demands_dir, 'demands_taz_tuple_std_%s_portion_%s.pkl' %
            (demand_stddev_hours, population_portion))
        routes_file = os.path.join(
            demands_dir, 'demands_taz_std_%s_portion_%s.rou.xml' %
            (demand_stddev_hours, population_portion))

        # Output the demand xml file.
        visualizer.plot_demands_departure_time(
            departure_time_points,
            cars_per_time_point,
            output_hist_figure_path=output_hist_figure_path,
            output_cumulative_figure_path=output_cumulative_figure_path)
        file_util.save_variable(pkl_file, all_demands)
        exit_taz = 'exit_taz'
        traffic_generator.write_evacuation_vehicle_auto_routing_demands(
            all_demands, exit_taz, routes_file)
Ejemplo n.º 5
0
    def test_save_load_variable(self):
        file_path = os.path.join(self._output_dir, 'test_output_data.pkl')

        # Case 1: Nested dictionary.
        data = {'zz': 1, 'b': 234, 123: 'asdfa', 'dict': {'a': 123, 't': 123}}
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        self.assertEqual(data, actual_variable)
        self.assertIsInstance(actual_variable, dict)

        # Case 2: 2-level nested dictionary.
        data = collections.defaultdict(lambda: collections.defaultdict(list))
        data['first']['A'] = [1, 2, 3]
        data['first']['B'] = [1, 2, 3]
        data['second']['B'] = [1, 2, 3]
        data['second']['C'] = [1, 2, 3]
        data['third']['C'] = [1, 2, 3]
        data['third']['D'] = [1, 2, 3]
        data['path'] = 'asdfas/asdf/asdfasdf/'
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        self.assertEqual(data, actual_variable)
        self.assertIsInstance(actual_variable, dict)

        # Case 3: Large array. If the size is too large, the test will timeout.
        data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10000
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        self.assertListEqual(data, actual_variable)
        self.assertIsInstance(actual_variable, list)

        # Case 4: numpy array.
        data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10
        data = np.array(data)
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        np.testing.assert_array_equal(data, actual_variable)
        self.assertIsInstance(actual_variable, np.ndarray)

        # Case 5: A list of tuples.
        x = [1, 2, 3]
        y = ['a', 'b', 'c']
        data = zip(x, y)
        # Saving zip variable does not affect the iterative variable.
        file_util.save_variable(file_path, data)
        actual_variable = file_util.load_variable(file_path)
        # python2 treats `actual_variable` as a list, however, python3 treats it as
        # an iterative object.
        self.assertListEqual(list(actual_variable), list(data))

        # Case 6: In python2, the itertools.tee cannot be saved by cPickle. However,
        # in python3, it can be saved.
        x = [1, 2, 3]
        y = ['a', 'b', 'c']
        data = zip(x, y)
        data_tee, _ = itertools.tee(data)
        python_version = sys.version_info[0]
        try:
            file_util.save_variable(file_path, data_tee)
            pickle_save_correctly = True
        except cPickle.PicklingError:
            pickle_save_correctly = False
        self.assertTrue((pickle_save_correctly and python_version == 3)
                        or (not pickle_save_correctly and python_version == 2))