def test_build_mega_stops(self): stops = [Stop(1, 0, 90), Stop(2, 10, 45)] groups = {0: [0, 1]} _ = MegaStopFac(0) mega_stops = _.build_mega_stops(groups, stops) self.assertEqual(mega_stops[0].id, "M0") self.assertEqual(mega_stops[0].lat, 0) self.assertEqual(mega_stops[0].lon, 90)
def get_rail_stops(self, train_dict): """ :return: """ train_routes = list(train_dict.keys()) mega_fac = MegaStopFac(700) curr = train_dict[train_routes[0]] for r in train_routes[1:]: curr = mega_fac.get_train_mega_stops(curr, train_dict[r]) else: for i in range(4): inbound, outbound = self.duplicate(curr) curr = mega_fac.get_train_mega_stops(inbound, outbound) return mega_fac.get_train_mega_stops(inbound, outbound)
def preprocess_gtsf(self, day): """ @document :param day: :return: """ self.MegaStopFactory = MegaStopFac(700) self.scheduler = schedule.ScheduleMaker(self.gtsf['trips'], self.gtsf['calendar'], self.gtsf['stop_times'], self.gtsf['stops'], self.gtsf['routes']) self.scheduler.build_daily_table(day) routes, train_dict = self.scheduler.get_routes() route_ms = {} for route in routes.keys(): _ = self.MegaStopFactory.get_mega_stops(routes[route][0], routes[route][1]) route_ms[route] = _ rsf = RailStopFac(700, self.MegaStopFactory.count) route_ms["RAIL"] = rsf.get_rail_stops(train_dict) self.megas = route_ms return route_ms
def test_build_ball_tree(self): odx = ODX(0, 1) odx.load_gtsf() day = dt.datetime.strptime("01/30/18 00:00", "%m/%d/%y %H:%M") odx.preprocess_gtsf(day) fac = MegaStopFac(700)
def test_process_query_results(self): mega_fac = MegaStopFac(100) result = ([[1], [1], [1]], [[1], [2], [3]]) dist, ind = mega_fac.process_query_results(result) self.assertEqual(dist, [mega_fac.R, mega_fac.R, mega_fac.R]) self.assertEqual(ind, [1, 2, 3])
def test_union_find(self): partners = [0, 1, 3, 2] _ = MegaStopFac(0) self.assertListEqual(_.union_find(partners), [0, 1, 2, 2]) partners = [0, 1, 2, 3] self.assertListEqual(_.union_find(partners), partners)
def test_stop_2_tup(self): stops = [Stop(1, 0, 90)] _ = MegaStopFac(0) tup = _.stop_2_tup(stops)[0] self.assertAlmostEqual(tup[0], 0) self.assertAlmostEqual(tup[1], 1.5707, places=3)
def test_get_groups(self): partners = [0, 1, 3, 2] _ = MegaStopFac(0) groups = _.union_find(partners) self.assertDictEqual(_.get_groups(groups), {0: [0], 1: [1], 2: [2, 3]})
def test_correct_outbound_matches(self): fac = MegaStopFac(10) in_dist = [20, 20, 4, 4] matches = [1, 0, 3, 2] out = fac.correct_outboud_matches(in_dist, matches, 4) self.assertEqual([4, 5, 3, 2], out)
def test_correct_inbound_matches(self): fac = MegaStopFac(10) in_dist = [20, 20, 4, 4] matches = [1, 0, 3, 2] out = fac.correct_inbound_matches(in_dist, matches) self.assertEqual([0, 1, 7, 6], out)
class ODX: """ This is the odx class which has the main """ def __init__(self, start, end, **kwargs): """ :param start: string, with time of the period that starts :param end: string, with time of the period that starts :param kwargs: """ self.start = dt.datetime.strptime("01/30/18 00:00", "%m/%d/%y %H:%M") self.end = dt.datetime.strptime("01/31/18 00:00", "%m/%d/%y %H:%M") fileDir = os.path.realpath('__file__').split('/code')[0] self.data_path = os.path.join(fileDir, 'Data') self.megas = None def load_gtsf(self): """ build a documents search tree for this so we can get the correct days data function loads all of the gtsf tables Exclusively :param gtsf_path: path to the gtsf :return: """ gtsf_path = os.path.join(self.data_path, 'gtsf') trips = pd.read_csv(os.path.join(gtsf_path, 'trips.txt')) stops = pd.read_csv(os.path.join(gtsf_path, 'stops.txt')) stop_times = pd.read_csv(os.path.join(gtsf_path, "stop_times.txt")) routes = pd.read_csv(os.path.join(gtsf_path, 'routes.txt')) cal = pd.read_csv(os.path.join(gtsf_path, 'calendar.txt')) self.gtsf = { "trips": trips, 'stops': stops, "stop_times": stop_times, "routes": routes, 'calendar': cal } def load_apc(self, apc_path): """ Need to build a script to break these apart and store in seperate data buckets. Need to implement the search tree to find the given file containing the precompiled daily data @document apc function @test functionality :param apc_path: :return: """ self.apc = pd.read_pickle(os.path.join(self.data_path, 'apc.pick')) def load_breeze(self, breeze_path): """ Need to build a script to break these apart and store in seperate data buckets. Need to implement the search tree to find the given file containing the precompiled daily data @document breeze function @test functionality :param breeze_path: :return: """ self.breeze = pd.read_pickle( os.path.join(self.data_path, 'breeze.pick')) def preprocess_gtsf(self, day): """ @document :param day: :return: """ self.MegaStopFactory = MegaStopFac(700) self.scheduler = schedule.ScheduleMaker(self.gtsf['trips'], self.gtsf['calendar'], self.gtsf['stop_times'], self.gtsf['stops'], self.gtsf['routes']) self.scheduler.build_daily_table(day) routes, train_dict = self.scheduler.get_routes() route_ms = {} for route in routes.keys(): _ = self.MegaStopFactory.get_mega_stops(routes[route][0], routes[route][1]) route_ms[route] = _ rsf = RailStopFac(700, self.MegaStopFactory.count) route_ms["RAIL"] = rsf.get_rail_stops(train_dict) self.megas = route_ms return route_ms def export_megas(self, path_out): """ This file exports the megastops to a specific file for analysis :param path_out: :return: """ with open(path_out, 'w') as fout: import csv writer = csv.writer(fout) writer.writerow(["ROUTE", 'MEGA_STOP_ID', "LAT", "LON"]) for route, mega in self.megas.items(): for stop in mega: writer.writerow([route] + list(stop.to_csv())) def build_network(self, trans_limit, id=1): """ :return: """ if self.megas is not None: self.network = Network(self.megas, id, 700) def preprocess_apc(self, day): """ @create function to preprocess apc data @document @test :param day: :return: """ pass def preprocess_breeze(self, day): """ @create function to preprocess apc data @document @test :param day: :return: """ pass def trip_chain(self): """ @create function to preprocess apc data @document @test :return: """ pass def __call__(self): """ This funciton :return: """ pass