def _write_city_notes(self): with open(self.notes_fname, 'w') as f: to_write_str = "Original data downloaded on " + self.download_date + " from:\n" for feed in self.feeds: feed = str(feed).replace("_manually_expanded_gtfs", "") data = yaml.load(open("../gtfs-sources.yaml")) url = data['sites'][feed]['gtfs'] if isinstance(url, dict): for subfeed, path_to_gtfs in url.items(): to_write_str += " " + feed + "," + subfeed + ": " + path_to_gtfs + "\n" else: to_write_str += " " + feed + ": " + url + "\n" G = GTFS(self.week_db_path) timezone_name = G.get_timezone_name() timezone_str = G.get_timezone_string() to_write_str += "Extract timezone: " + timezone_name + " (" +timezone_str + ")\n" to_write_str += CITY_ID_TO_NOTES_STR[self.city_id] print(to_write_str) f.write(to_write_str)
class AllToAllRoutingPipeline: def __init__(self, feed_dict, routing_params): self.pickle = PICKLE self.gtfs_dir = feed_dict["gtfs_dir"] self.G = GTFS(feed_dict["gtfs_dir"]) self.tz = self.G.get_timezone_name() self.journey_dir = feed_dict["journey_dir"] self.day_start = feed_dict["day_start"] self.day_end = feed_dict["day_end"] self.routing_start_time = feed_dict["routing_start_time"] self.routing_end_time = feed_dict["routing_end_time"] self.analysis_start_time = feed_dict["analysis_start_time"] self.analysis_end_time = feed_dict["analysis_end_time"] self.pickle_dir = feed_dict["pickle_dir"] self.routing_params = routing_params self.jdm = None if not self.pickle: self.jdm = JourneyDataManager(os.path.join(GTFS_DB_WORK_DIR, GTFS_DB_FNAME), journey_db_path=os.path.join(RESULTS_DIR, JOURNEY_DB_FNAME), routing_params=self.routing_params, track_vehicle_legs=TRACK_VEHICLE_LEGS, track_route=TRACK_ROUTE) def get_all_events(self): print("Retrieving transit events") connections = [] for e in self.G.generate_routable_transit_events(start_time_ut=self.routing_start_time, end_time_ut=self.routing_end_time): connections.append(Connection(int(e.from_stop_I), int(e.to_stop_I), int(e.dep_time_ut), int(e.arr_time_ut), int(e.trip_I), int(e.seq))) assert (len(connections) == len(set(connections))) print("scheduled events:", len(connections)) print("Retrieving walking network") net = walk_transfer_stop_to_stop_network(self.G, max_link_distance=CUTOFF_DISTANCE) print("net edges: ", len(net.edges())) return net, connections @timeit def loop_trough_targets_and_run_routing(self, targets, slurm_array_i): net, connections = self.get_all_events() csp = None for target in targets: print(target) if csp is None: csp = MultiObjectivePseudoCSAProfiler(connections, target, walk_network=net, end_time_ut=self.routing_end_time, transfer_margin=TRANSFER_MARGIN, start_time_ut=self.routing_start_time, walk_speed=WALK_SPEED, verbose=True, track_vehicle_legs=TRACK_VEHICLE_LEGS, track_time=TRACK_TIME, track_route=TRACK_ROUTE) else: csp.reset([target]) csp.run() profiles = dict(csp.stop_profiles) if self.pickle: self._pickle_results(profiles, slurm_array_i, target) else: self.jdm.import_journey_data_for_target_stop(target, profiles) profiles = None gc.collect() @timeit def loop_trough_targets_and_run_routing_with_route(self, targets, slurm_array_i): net, connections = self.get_all_events() csp = None for target in targets: print("target: ", target) if csp is None: csp = MultiObjectivePseudoCSAProfiler(connections, target, walk_network=net, end_time_ut=self.routing_end_time, transfer_margin=TRANSFER_MARGIN, start_time_ut=self.routing_start_time, walk_speed=WALK_SPEED, verbose=True, track_vehicle_legs=TRACK_VEHICLE_LEGS, track_time=TRACK_TIME, track_route=TRACK_ROUTE) else: csp.reset([target]) csp.run() profiles = dict(csp.stop_profiles) if self.pickle: self._pickle_results(profiles, slurm_array_i, target) else: self.jdm.import_journey_data_for_target_stop(target, profiles) profiles = None gc.collect() @timeit def _pickle_results(self, profiles, pickle_subdir, target): pickle_path = makedirs(os.path.join(self.pickle_dir, str(pickle_subdir))) pickle_path = os.path.join(pickle_path, str(target) + ".pickle") profiles = dict((key, value.get_final_optimal_labels()) for (key, value) in profiles.items()) """for key, values in profiles.items(): values.sort(key=lambda x: x.departure_time, reverse=True) new_values = compute_pareto_front(values) profiles[key] = new_values """ pickle.dump(profiles, open(pickle_path, 'wb'), -1) profiles = None gc.collect() def get_list_of_stops(self, where=''): df = self.G.execute_custom_query_pandas("SELECT stop_I FROM stops " + where + " ORDER BY stop_I") return df @timeit def store_pickle_in_db(self): self.jdm = JourneyDataManager(self.gtfs_dir, journey_db_path=self.journey_dir, routing_params=self.routing_params, track_vehicle_legs=TRACK_VEHICLE_LEGS, track_route=TRACK_ROUTE) for root, dirs, files in os.walk(self.pickle_dir): for target_file in files: target = target_file.replace(".pickle", "") if not target in self.jdm.get_targets_having_journeys(): print("target: ", target) profiles = pickle.load(open(os.path.join(root, target_file), 'rb')) self.jdm.import_journey_data_for_target_stop(int(target), profiles) else: print("skipping: ", target, " already in db") self.jdm.create_indices() def calculate_additional_columns_for_journey(self): if not self.jdm: self.jdm = JourneyDataManager(self.gtfs_dir, journey_db_path=self.journey_dir, routing_params=self.routing_params, track_vehicle_legs=TRACK_VEHICLE_LEGS, track_route=TRACK_ROUTE) self.jdm.populate_additional_journey_columns() self.jdm.compute_and_store_travel_impedance_measures(self.analysis_start_time, self.analysis_end_time, TRAVEL_IMPEDANCE_STORE_PATH) def calculate_comparison_measures(self): if not self.jdm: self.jdm = JourneyDataManager(self.gtfs_dir, journey_db_path=self.journey_dir, routing_params=self.routing_params, track_vehicle_legs=TRACK_VEHICLE_LEGS, track_route=TRACK_ROUTE) prev_dict = None prev_key = None before_db_tuple = None after_db_tuple = None for (key, feed_dict) in FEED_LIST: if prev_dict: if feed_dict["feed_seq"] < prev_dict["feed_seq"]: after_db_tuple = (feed_dict["journey_dir"], key) before_db_tuple = (prev_dict["journey_dir"], prev_key) else: before_db_tuple = (feed_dict["journey_dir"], key) after_db_tuple = (prev_dict["journey_dir"], prev_key) prev_dict = feed_dict prev_key = key self.jdm.initialize_comparison_tables(DIFF_PATH, before_db_tuple, after_db_tuple)