Пример #1
0
  def visualize_fcd_on_map(self):
    """Plot metric maps.

    Pay attention to the map.
    """
    net = sumolib.net.readNet(self._sumo_net_file)
    visualizer = map_visualizer.MapVisualizer(net)
    plot_edges = net.getEdges()

    trajectory_folder = os.path.join(self._output_dir, 'trajectory/')
    output_folder = os.path.join(trajectory_folder, 'trajectory_fig/')
    if not file_util.f_exists(output_folder):
      file_util.f_mkdir(output_folder)

    trajectory_file_list = os.listdir(trajectory_folder)
    # trajectory_file_list = [
    #     'edge_id_to_trajectory_9000_10800.pkl']

    for trajectory_file in trajectory_file_list:
      if not trajectory_file.endswith('.pkl'):
        continue
      trajectory_pkl_file = os.path.join(trajectory_folder, trajectory_file)
      print('Loading file: ', trajectory_pkl_file)
      edge_id_to_trajectory = file_util.load_variable(trajectory_pkl_file)
      print('Time range: ', edge_id_to_trajectory['time_interval'])
      output_figure_path = (output_folder + 'speed_map_%s_%s.pdf' %
                            (int(edge_id_to_trajectory['time_interval'][0]),
                             int(edge_id_to_trajectory['time_interval'][1])))

      visualizer.plot_edge_trajectory_histogram_on_map(
          plot_edges,
          edge_id_to_trajectory,
          output_figure_path=output_figure_path,
          plot_max_speed=13.4112)
Пример #2
0
    def _extract_detector_data(self):
        """Extracts detector data form xml files."""
        data_parser = simulation_data_parser.SimulationDataParser()
        visualizer = map_visualizer.MapVisualizer()

        detector_folder = os.path.join(self._output_dir, 'detector/')
        detector_trajectory_folder = os.path.join(detector_folder,
                                                  'detector_trajectory/')

        if not file_util.exists(detector_trajectory_folder):
            file_util.mkdir(detector_trajectory_folder)

        detector_files = os.listdir(detector_folder)
        for detector_file in detector_files:
            if not detector_file.endswith('.xml'):
                continue
            # print('Extract file: ', detector_file)
            output_file = os.path.splitext(detector_file)[0] + '.pkl'
            output_file = os.path.join(detector_trajectory_folder, output_file)
            detector_file = os.path.join(detector_folder, detector_file)
            print('Save file: ', output_file)
            data_parser.get_and_save_detector_data(detector_file, output_file)

        # Creates figures for individual detector.
        output_figure_folder = os.path.join(detector_folder, 'detector_fig/')
        if not file_util.f_exists(output_figure_folder):
            file_util.f_mkdir(output_figure_folder)
        visualizer.plot_individual_detector(detector_trajectory_folder,
                                            output_figure_folder)
    def write_evacuation_vehicle_path_demands(cls, zipped_demands,
                                              routes_file):
        r"""Generates demands for residential vehicles.

    The output demand xml file is in the following format. Each entry has the
    information for each vehicle.

    <routes>
        <vType id="passenger" vClass="passenger"/>
        <vehicle id="0" type="passenger" depart="0.00" departLane="best" \
            departPos="base" departSpeed="max">
            <route edges="-8943413#1 -8943413#0  8936970#3 -8936970#3"/>
        </vehicle>
        <vehicle id="0" type="passenger" depart="0.00" departLane="best" \
            departPos="base" departSpeed="max">
            <route edges="-8943413#1 -8943413#0  8936970#3 -8936970#3"/>
        </vehicle>
    </routes>

    Args:
      zipped_demands: The zipped demands from function
          `create_evacuation_shortest_path_demands`.
      routes_file: Output file.
    """
        demands = sorted(zipped_demands, key=lambda x: x.time)

        # Write demands file.
        if file_util.f_exists(routes_file):
            raise ValueError('%s already exists.' % routes_file)
        token = '<routes>\n'
        file_util.append_line_to_file(routes_file, token)
        token = '    <vType id="passenger" vClass="passenger"/>\n'
        file_util.append_line_to_file(routes_file, token)
        for demand in demands:
            if demand.num_cars == 0:
                logging.info('Road edge %s is too short, no demands.',
                             demand.origin)
                continue
            if demand.destination is None:
                logging.warning('Road edge %s is not connected to any exit.',
                                demand.origin)
                continue
            for vehicle_id in range(demand.num_cars):
                token = '    <vehicle id="%s" type="passenger" ' % (
                    demand.origin + '_' + str(vehicle_id))
                token += 'depart="%s" ' % demand.time
                token += 'departLane="best" departPos="random" departSpeed="max" '
                token += 'arrivalPos="max">\n'
                # Remember to add the origin edge to the path list. The shortest path
                # acquired from `create_evacuation_shortest_path_demands` does not
                # include the origin edge. This is due to the algorithm in
                # `sumolib.net.getRestrictedShortestPathsTreeToEdge`.
                token += '        <route edges="%s"/>' % (
                    demand.origin + ' ' + ' '.join(demand.route))
                token += '\n    </vehicle>'
                file_util.append_line_to_file(routes_file, token)
        token = '\n</routes>'
        file_util.append_line_to_file(routes_file, token)
        logging.info('Save file to: %s', routes_file)
Пример #4
0
  def write_evacuation_vehicle_auto_routing_demands(cls,
                                                    zipped_demands,
                                                    exit_taz,
                                                    routes_file):
    r"""Generates demands for residential vehicles.

    The output demand xml file is in the following format. Each entry has the
    information for each vehicle. The exits are grouped into a traffic analysis
    zone (TAZ). See the link https://sumo.dlr.de/docs/Definition_of_Vehicles,
    _Vehicle_Types,_and_Routes.html#traffic_assignement_zones_taz for details.

    <routes>
        <vType id="passenger" vClass="passenger"/>
        <trip id="veh_1" depart="11" from="gneE8" departLane="best" \
            departPos="random" departSpeed="max" toTaz="exit_taz"/>
        <trip id="veh_2" depart="13" from="gneE9" departLane="best" \
            departPos="random" departSpeed="max"toTaz="exit_taz"/>
    </routes>

    Args:
      zipped_demands: The zipped demands from function
          `create_evacuation_demands`.
      exit_taz: The name of the TAZ.
      routes_file: Output file.
    """
    sorted_demands = sorted(zipped_demands, key=lambda x: x.time)

    if file_util.f_exists(routes_file):
      raise ValueError('%s already exists.' % routes_file)
    token = '<routes>\n'
    file_util.append_line_to_file(routes_file, token)
    token = '    <vType id="passenger" vClass="passenger"/>\n'
    file_util.append_line_to_file(routes_file, token)
    for demand in sorted_demands:
      if demand.num_cars == 0:
        logging.info('Road edge %s is too short, no demands.', demand.origin)
        continue
      for vehicle in range(demand.num_cars):
        token = '    <trip id="%s" type="passenger" ' % (
            demand.origin + '_' + str(vehicle))
        token += 'depart="%s" ' % demand.time
        token += 'from="%s" ' % demand.origin
        token += 'departLane="best" departPos="random" departSpeed="max" '
        token += 'arrivalPos="max" '
        token += 'toTaz="%s"/>' % exit_taz
        file_util.append_line_to_file(routes_file, token)
    token = '\n</routes>'
    file_util.append_line_to_file(routes_file, token)
    logging.info('Saved file to: %s', routes_file)
Пример #5
0
  def parse_fcd_results_single_file(self, hours):
    """Extract the data then save to file."""
    net = sumolib.net.readNet(self._sumo_net_file)
    data_parser = simulation_data_parser.SimulationDataParser()
    plot_edges = net.getEdges()

    fcd_file = os.path.join(self._output_dir, 'traffic.fcd.xml')
    output_folder = os.path.join(self._output_dir, 'trajectory/')
    if not file_util.f_exists(output_folder):
      file_util.f_mkdir(output_folder)

    time_segment_length_seconds = hours * 3600
    time_range_seconds = [0, 3600 * 12]
    data_parser.save_batch_edge_id_to_trajectory(
        fcd_file, plot_edges,
        time_range=time_range_seconds,
        time_segment_length=time_segment_length_seconds,
        parse_time_step=10, output_folder=output_folder)
Пример #6
0
    def test_append_line_to_file(self):
        r"""Tests the output file.

    The output file contains the following.
    hello world
    (hello) "world"
    (hello) !!!!!!!!!!! @~#$%^&*()_+"world"
    aaaaaaaa
    bbbbbbbbbb
    backslash\ backslash
    backslash\ backslash
    backslash\\ backslash
    backslash\\\ backslash
    backslash\\ backslash
    """
        input_lines = [
            'hello world', '(hello) "world"',
            '(hello) !!!!!!!!!!! @~#$%^&*()_+"world"', 'aaaaaaaa\nbbbbbbbbbb',
            r'backslash\ backslash', 'backslash\\ backslash',
            r'backslash\\ backslash', r'backslash\\\ backslash',
            'backslash\\\\ backslash'
        ]
        file_path = os.path.join(self._output_dir,
                                 'test_append_line_to_file.txt')
        for line in input_lines:
            file_util.append_line_to_file(file_path, line)
        self.assertTrue(file_util.f_exists(file_path))
        # Note that the linebreak in the input_lines[3].
        target_lines = [
            'hello world', '(hello) "world"',
            '(hello) !!!!!!!!!!! @~#$%^&*()_+"world"', 'aaaaaaaa',
            'bbbbbbbbbb', r'backslash\ backslash', 'backslash\\ backslash',
            r'backslash\\ backslash', r'backslash\\\ backslash',
            'backslash\\\\ backslash'
        ]
        with file_util.f_open(file_path, 'r') as actual_file:
            line_counter = 0
            read_lines = actual_file.readlines()
            for line in read_lines:
                # Linebreak is appended to the target string.
                self.assertEqual(line, target_lines[line_counter] + '\n')
                line_counter += 1
        target_line_number = len(target_lines)
        self.assertEqual(target_line_number, line_counter)
    def test_save_batch_edge_id_to_trajectory(self):
        # Case 1: Extract and save the data into batches.
        fcd_file = 'freeway_sparse.fcd.xml'
        fcd_file_path = _load_file(self._testdata_dir, fcd_file)
        time_segment_length = 3
        time_range = [0, 10]
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There are 3 output files.
        actual_output_1 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_0_3.pkl')
        actual_output_2 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_3_6.pkl')
        actual_output_3 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_6_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output_1))
        self.assertTrue(file_util.f_exists(actual_output_2))
        self.assertTrue(file_util.f_exists(actual_output_3))

        actual_dictionary = file_util.load_variable(actual_output_1)
        self.assertListEqual(
            actual_dictionary['27628577#0']['time'],
            [0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0])
        actual_dictionary = file_util.load_variable(actual_output_2)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0,
            6.0, 6.0, 6.0, 6.0, 6.0
        ])
        actual_dictionary = file_util.load_variable(actual_output_3)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
            9.0
        ])

        # Case 2: Extract and save the file as a single batch.
        time_segment_length = None
        time_range = [0, 20]
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There is 1 output file.
        actual_output = os.path.join(self._output_dir,
                                     'edge_id_to_trajectory_0_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output))
        actual_dictionary = file_util.load_variable(actual_output)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
            4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0,
            6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
            9.0, 9.0, 9.0
        ])

        # Case 3: Extract and save the file as a single batch without specifying the
        # `time_range`. By default, it will use the smallest and largest time point
        # as its lower and upper bound.
        time_segment_length = None
        time_range = None
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There is 1 output file.
        actual_output = os.path.join(self._output_dir,
                                     'edge_id_to_trajectory_0_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output))
        actual_dictionary = file_util.load_variable(actual_output)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
            4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0,
            6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
            9.0, 9.0, 9.0
        ])

        # Case 4: Extract and save the file as a single batch without specifying the
        # `time_range` but with the `time_segment_length`. By default, it will use
        # the smallest and largest time point as its lower and upper bound
        # respectively.
        time_segment_length = 4
        time_range = None
        test_edges = ['27628577#0', '367132267#0', '700010432']
        self._data_parser.save_batch_edge_id_to_trajectory(
            fcd_file_path,
            test_edges,
            time_range=time_range,
            time_segment_length=time_segment_length,
            parse_time_step=1,
            output_folder=self._output_dir)

        # There are 3 output files.
        actual_output_1 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_0_4.pkl')
        actual_output_2 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_4_8.pkl')
        actual_output_3 = os.path.join(self._output_dir,
                                       'edge_id_to_trajectory_8_9.pkl')
        self.assertTrue(file_util.f_exists(actual_output_1))
        self.assertTrue(file_util.f_exists(actual_output_2))
        self.assertTrue(file_util.f_exists(actual_output_3))
        actual_dictionary = file_util.load_variable(actual_output_1)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            0.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
            4.0, 4.0
        ])
        actual_dictionary = file_util.load_variable(actual_output_2)
        self.assertListEqual(actual_dictionary['27628577#0']['time'], [
            5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
            7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0,
            8.0, 8.0, 8.0, 8.0
        ])
        actual_dictionary = file_util.load_variable(actual_output_3)
        self.assertListEqual(
            actual_dictionary['27628577#0']['time'],
            [9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0])
Пример #8
0
def Get(_):
    """Gets the map tile by tile from south to north, then west to east."""
    coordinate_list = [float(coordinate) for coordinate in FLAGS.bbox]
    south, west, north, east = coordinate_list
    logging.info("Boundaries: south %s, west %s, north %s, east %s.", south,
                 west, north, east)
    if south > north or west > east:
        raise flags.Error("""Invalid geocoordinates in bbox.
        Make sure the coordinates are in order: south, west, north, east.""")

    north_south_span = north - south
    east_west_span = east - west
    num_batches_north_south = int(round(north_south_span / FLAGS.batch_size))
    num_batches_east_west = int(round(east_west_span / FLAGS.batch_size))
    num_batches_north_south = (1 if num_batches_north_south == 0 else
                               num_batches_north_south)
    num_batches_east_west = (1 if num_batches_east_west == 0 else
                             num_batches_east_west)
    batch_step_size_north_south = north_south_span / num_batches_north_south
    batch_step_size_east_west = east_west_span / num_batches_east_west
    batch_margin_size_north_south = (batch_step_size_north_south *
                                     FLAGS.overlap_ratio)
    batch_margin_size_east_west = (batch_step_size_east_west *
                                   FLAGS.overlap_ratio)

    logging.info("Map span [degree]: NS(%s), EW(%s).", north_south_span,
                 east_west_span)
    logging.info("Number of steps: NS(%s) X EW(%s).", num_batches_north_south,
                 num_batches_east_west)
    logging.info("Map batch size: NS(%s) X EW(%s).",
                 batch_step_size_north_south, batch_step_size_east_west)
    logging.info("Overlap margin size [degree]: NS(%s) X EW(%s).",
                 batch_margin_size_north_south, batch_margin_size_east_west)

    bottom = south
    left = west
    for north_south_bathc_index in range(num_batches_north_south):
        for east_west_batch_index in range(num_batches_east_west):
            top = bottom + batch_step_size_north_south
            right = left + batch_step_size_east_west

            query_info = '<bbox-query n="%s" s="%s" w="%s" e="%s"/>' % (
                top + batch_margin_size_north_south,
                bottom - batch_margin_size_north_south,
                left - batch_margin_size_east_west,
                right + batch_margin_size_east_west)
            filename = "%s%s-%s_%sx%s.osm.xml" % (
                FLAGS.prefix, north_south_bathc_index, east_west_batch_index,
                num_batches_north_south, num_batches_east_west)
            logging.info(query_info)
            if FLAGS.output_dir is None:
                output_dir = os.getcwd()
            else:
                output_dir = FLAGS.output_dir
            filepath = os.path.join(output_dir, filename)
            try:
                if file_util.f_exists(filepath):
                    logging.warning("File already downloaded.")
                    file_exists = True
                else:
                    file_exists = False
            except file_util.FileIOError:
                logging.warning("Error from file operation.")
                file_exists = False

            if (FLAGS.store_files and (not file_exists or FLAGS.force_download)
                    and ReadCompressed(query_info, filepath) == -1):
                return
            left += batch_step_size_east_west

        bottom += batch_step_size_north_south
        left = west