def _weather(depart_time, r): """ :type depart_time: datetime.datetime :type r: pyticas.ttypes.Route :return: :rtype: """ from_time = depart_time - datetime.timedelta(hours=1) prd = period.Period(from_time, depart_time, interval=300) stations = r.get_stations() n_stations = len(stations) center_station = stations[int(n_stations / 2)] sites = rwis.find_nearby_sites(center_station.lat, center_station.lon) nearby_site = sites[0] getLogger(__name__).debug( ' - RWIS station : site_id=%d, lat=%f, lon=%f, distance=%f' % (nearby_site.site_id, nearby_site.lat, nearby_site.lon, nearby_site.distance_to_target)) wd = rwis.get_weather(nearby_site, prd) surface_status = wd.get_surface_statuses() if wd.is_rain(0.5) or (surface_status and wd._contains( ['rain', 'wet'], surface_status[-1], ['freezing'])): return WC_RAIN elif wd.is_snow(0.5) or (surface_status and wd._contains( ['snow', 'slush', 'ice', 'chemical wet', 'freezing'], surface_status[-1], ['freezing'])): return WC_SNOW else: return WC_NORMAL
def _handler_ttroute(da, item, action_log): """ :type da: pyticas_tetres.da.route.TTRouteDataAccess :type item: pyticas_tetres.ttypes.TTRouteInfo :type action_log: pyticas_tetres.ttypes.ActionLogInfo """ # 1. calculate travel time # 2. categorize (all) try: from pyticas_tetres.util.traffic_file_checker import has_traffic_files start = datetime.date(cfg.DATA_ARCHIVE_START_YEAR, 1, 1) last_day = datetime.date.today() - datetime.timedelta(days=cfg.DAILY_JOB_OFFSET_DAYS) start_date_str, end_date_str = start.strftime('%Y-%m-%d'), last_day.strftime('%Y-%m-%d') if not has_traffic_files(start_date_str, end_date_str): return False, "Missing traffic files for the given time range from {} to {}.".format(start_date_str, end_date_str) except Exception as e: getLogger(__name__).warning( 'Exception occured while checking if traffic files exist during handling travel time routes. Error: {}'.format( e)) daily_periods = _get_all_daily_periods() cnt = 0 for prd in daily_periods: try: inserted_ids = traveltime.calculate_a_route(prd, item) if inserted_ids: categorization.categorize(item, prd) cnt += len(inserted_ids) except Exception as ex: getLogger(__name__).warning( 'Exception occured when handling route changes : %s' % tb.traceback(ex, f_print=False)) return cnt > 0
def _handler_specialevent(da, item, action_log): """ :type da: pyticas_tetres.da.specialevent.SpecialEventDataAccess :type item: pyticas_tetres.ttypes.SpecialEventInfo :type action_log: pyticas_tetres.ttypes.ActionLogInfo """ sdt = item.str2datetime(item.start_time) edt = item.str2datetime(item.end_time) sdt = sdt - datetime.timedelta(minutes=SE_ARRIVAL_WINDOW) edt = edt + datetime.timedelta(minutes=SE_DEPARTURE_WINDOW2) periods = _get_daily_periods(sdt, edt) try: for prd in periods: _categorize_for_a_day(prd, categorization.specialevent, specialevents=[item]) return True except Exception as ex: getLogger(__name__).warning( 'Exception occured when handling specialevent changes : %s' % tb.traceback(ex, f_print=False)) return False
def _handler_ttroute(da, item, action_log): """ :type da: pyticas_tetres.da.route.TTRouteDataAccess :type item: pyticas_tetres.ttypes.TTRouteInfo :type action_log: pyticas_tetres.ttypes.ActionLogInfo """ # 1. calculate travel time # 2. categorize (all) # faverolles 1/16/2020 NOTE: always starts at datetime.today daily_periods = _get_all_daily_periods() cnt = 0 try: for prd in daily_periods: inserted_ids = traveltime.calculate_a_route(prd, item) if inserted_ids: categorization.categorize(item, prd) if inserted_ids is not False: cnt += len(inserted_ids) return cnt > 0 except Exception as ex: getLogger(__name__).warning( 'Exception occured when handling route changes : %s' % tb.traceback(ex, f_print=False)) return False
def categorize(ttri, prd, ttdata, **kwargs): """ :type ttri: pyticas_tetres.ttypes.TTRouteInfo :type prd: pyticas.ttypes.Period :type ttdata: list[pyticas_tetres.ttypes.TravelTimeInfo] :return: """ lock = kwargs.get('lock', nonop_with()) given_seis = kwargs.get('specialevents', None) seis = given_seis or se_helper.find_specialevents( prd, SE_ARRIVAL_WINDOW, SE_DEPARTURE_WINDOW1, SE_DEPARTURE_WINDOW2) specialevents = [] for sei in seis: distance = loc.minimum_distance(ttri.route, float(sei.lat), float(sei.lon)) specialevents.append((sei, distance)) year = prd.start_date.year ttseDA = TTSpecialeventDataAccess(year) # avoid to save duplicated data with lock: is_deleted = ttseDA.delete_range(ttri.id, prd.start_date, prd.end_date, item_ids=[v.id for v in seis]) if not is_deleted or not ttseDA.commit(): ttseDA.rollback() ttseDA.close_session() getLogger(__name__).debug( '! specialevent.categorize(): fail to delete existing data') return -1 dict_data = [] for idx, tti in enumerate(ttdata): seis = _find_ses(specialevents, tti.str2datetime(tti.time)) for (sei, distance, event_type) in seis: dict_data.append({ 'tt_id': tti.id, 'specialevent_id': sei.id, 'distance': distance, 'event_type': event_type }) if dict_data: with lock: inserted_ids = ttseDA.bulk_insert(dict_data, print_exception=True) if not inserted_ids or not ttseDA.commit(): ttseDA.rollback() ttseDA.close_session() getLogger(__name__).warning( '! specialevent.categorize(): fail to insert categorized data' ) return -1 ttseDA.close_session() return len(dict_data)
def run(**kwargs): try: _01_check_tt_data.run() except Exception as ex: tb.traceback(ex) getLogger(__name__).warning( 'Exception occured while performing monthly task')
def run(**kwargs): today = datetime.datetime.today() target_day = today - datetime.timedelta(days=cfg.DAILY_JOB_OFFSET_DAYS) try: _01_pre_calculation_of_tod_ttr.run(target_day) except Exception as ex: tb.traceback(ex) getLogger(__name__).warning('Exception occured while performing weekly task')
def categorize(ttri, prd, ttdata, **kwargs): """ :type ttri: pyticas_tetres.ttypes.TTRouteInfo :type prd: pyticas.ttypes.Period :type ttdata: list[pyticas_tetres.ttypes.TravelTimeInfo] :rtype: int """ lock = kwargs.get('lock', nonop_with()) given_incidents = kwargs.get('incidents', None) all_incidents = given_incidents or ihelper.find_incidents(ttri.corridors()[0], prd) route_length = ttri.route.length() incd_locations = [] for incd in all_incidents: distance = loc.location_by_coordinate(ttri.route, incd.lat, incd.lon) if distance != False: if distance < -INCIDENT_UPSTREAM_DISTANCE_LIMIT or distance > route_length + INCIDENT_DOWNSTREAM_DISTANCE_LIMIT: continue off_distance = distance if distance < 0 else max(0, distance - route_length) incd_locations.append((distance, off_distance, incd)) year = prd.start_date.year ttincident_da = TTIncidentDataAccess(year) # avoid to save duplicated data with lock: is_deleted = ttincident_da.delete_range(ttri.id, prd.start_date, prd.end_date, item_ids=[v.id for v in all_incidents]) if not is_deleted or not ttincident_da.commit(): ttincident_da.rollback() ttincident_da.close_session() getLogger(__name__).warning('! incident.categorize(): fail to delete existing data') return -1 dict_data = [] for idx, tti in enumerate(ttdata): incds = _find_incident(incd_locations, tti.str2datetime(tti.time)) for (dist, off_dist, incd) in incds: dict_data.append({ 'tt_id': tti.id, 'incident_id': incd.id, 'distance': dist, 'off_distance': off_dist }) if dict_data: with lock: inserted_ids = ttincident_da.bulk_insert(dict_data) if not inserted_ids or not ttincident_da.commit(): ttincident_da.rollback() ttincident_da.close_session() getLogger(__name__).warning('! incident.categorize(): fail to insert categorization data') return -1 ttincident_da.close_session() return len(dict_data)
def categorize(ttri, prd, ttdata, **kwargs): """ :type ttri: pyticas_tetres.ttypes.TTRouteInfo :type prd: pyticas.ttypes.Period :type ttdata: list[pyticas_tetres.ttypes.TravelTimeInfo] :return: """ lock = kwargs.get('lock', nonop_with()) snmDA = SnowMgmtDataAccess() ttsnmDA = TTSnowManagementDataAccess(prd.start_date.year, session=snmDA.get_session()) given_snowmgmts = kwargs.get('snowmgmts', None) snowmgmts = given_snowmgmts or snmDA.list_by_period( prd.start_date, prd.end_date, set_related_model_info=True) snmis = _decide_location(ttri, snowmgmts) with lock: is_deleted = ttsnmDA.delete_range(ttri.id, prd.start_date, prd.end_date, item_ids=[v.id for v in snowmgmts]) if not is_deleted or not ttsnmDA.commit(): ttsnmDA.rollback() ttsnmDA.close_session() getLogger(__name__).warning( '! snowmgmt.categorize(): fail to delete existing data') return -1 dict_data = [] for idx, tti in enumerate(ttdata): dt = tti.str2datetime(tti.time) _snmis = _find_snowmgmts(snmis, dt) for (loc_type, distance, off_distance, snmi, r) in _snmis: dict_data.append({ 'tt_id': tti.id, 'snowmgmt_id': snmi.id, 'loc_type': loc_type.value, 'distance': distance, 'off_distance': off_distance, 'road_status': -1, }) if dict_data: with lock: inserted_ids = ttsnmDA.bulk_insert(dict_data, print_exception=True) if not inserted_ids or not ttsnmDA.commit(): ttsnmDA.rollback() ttsnmDA.close_session() getLogger(__name__).warning( '! snowmgmt.categorize(): fail to insert categorized data') return -1 ttsnmDA.close_session() return len(dict_data)
def write(uid, eparam, operating_conditions, whole, yearly, monthly, daily): """ :type uid: str :type eparam: pyticas_tetres.ttypes.EstimationRequestInfo :type operating_conditions: list[pyticas_tetres.rengine.filter.ftypes.ExtFilterGroup] :type whole: list[dict] :type yearly: list[(list[dict], list[int])] :type monthly: list[(list[dict], list[[int, int]])] :type daily: list[(list[dict], list[datetime.date])] """ output_dir = util.output_path( '%s/%s - %s' % (uid, eparam.travel_time_route.corridor, eparam.travel_time_route.name)) # result file output_file = os.path.join(output_dir, 'reliabilities-by-indices (whole-time-period).xlsx') wb = xlsxwriter.Workbook(output_file) try: report_helper.write_operating_condition_info_sheet(eparam, wb) except Exception as ex: getLogger(__name__).warning('Exception occured when writing data table : %s' % tb.traceback(ex, f_print=False)) try: _write_whole_result_sheet(eparam, operating_conditions, wb, whole) except Exception as ex: getLogger(__name__).warning('Exception occured when writing data table : %s' % tb.traceback(ex, f_print=False)) if yearly: try: _write_yearly_result_sheet(eparam, operating_conditions, wb, yearly) except Exception as ex: getLogger(__name__).warning( 'Exception occured when writing data table : %s' % tb.traceback(ex, f_print=False)) if monthly: try: _write_monthly_result_sheet(eparam, operating_conditions, wb, monthly) except Exception as ex: getLogger(__name__).warning( 'Exception occured when writing data table : %s' % tb.traceback(ex, f_print=False)) if daily: try: _write_daily_result_sheet(eparam, operating_conditions, wb, daily) except Exception as ex: getLogger(__name__).warning( 'Exception occured when writing data table : %s' % tb.traceback(ex, f_print=False)) wb.close()
def _retrieve_data_from_db(route_id, operating_conditions, sdate, edate, start_time, end_time, target_days, remove_holiday, **kwargs): """ :type route_id: int :type operating_conditions: list[pyticas_tetres.rengine.filter.ExtFilterGroup] :type sdate: datetime.datetime :type edate: datetime.datetime :type start_time: datetime.time :type end_time: datetime.time :type target_days: list[int] :type remove_holiday: bool """ # logger = getLogger(__name__) prd = period.Period(sdate, edate, cfg.TT_DATA_INTERVAL) # proc_start_time = time.time() # logger.debug('>>>> retrieving data for %s' % prd.get_date_string()) year = sdate.year da_tt = tt.TravelTimeDataAccess(year) # generator traveltimes = da_tt.list_by_period(route_id, prd, start_time=start_time, end_time=end_time, weekdays=target_days, as_model=True) """:type: list[pyticas_tetres.db.model.TravelTime] """ for ttm in traveltimes: dt = str2datetime(ttm.time) if remove_holiday and period.is_holiday(dt.date()): continue _tt_weathers = list(ttm._tt_weathers) _tt_incidents = list(ttm._tt_incidents) _tt_workzones = list(ttm._tt_workzones) _tt_specialevents = list(ttm._tt_specialevents) _tt_snowmanagements = list(ttm._tt_snowmanagements) if not _tt_weathers: getLogger(__name__).warning('No weather data for route(%d) at %s' % (route_id, dt.strftime('%Y-%m-%d %H:%M'))) continue extdata = ExtData(ttm, _tt_weathers[0], _tt_incidents, _tt_workzones, _tt_specialevents, _tt_snowmanagements) for fidx, ef in enumerate(operating_conditions): try: ef.check(extdata) except Exception as ex: tb.traceback(ex) continue
def calculate_uvmt_dynamically(data, interval, critical_denisty, lane_capacity): try: vd = moe_helper.VIRTUAL_RNODE_DISTANCE seconds_per_hour = 3600 density_data = data['density'] flow_data = data['flow'] lane_data = data['lanes'] uvmt_data = [] for flow, density, lanes in zip(flow_data, density_data, lane_data): if density <= critical_denisty: uvmt = max(lane_capacity * lanes - flow, 0) uvmt = (uvmt * interval / seconds_per_hour * vd) else: uvmt = 0 uvmt_data.append(uvmt) return sum(uvmt_data) except Exception as e: from pyticas_tetres.logger import getLogger logger = getLogger(__name__) logger.warning( 'fail to calculate calculate uvmt dynamically. Error: {}'.format( e)) return 0
def import_yearly_data(year): """ :type year: int :rtype: list[dict] """ logger = getLogger(__name__) isd_stations = isd.get_station_list(STATE) logger.debug('loading NOAA ISD data for %d' % year) res = [] for (usaf, wban) in TARGET_ISD_STATIONS: st = _isd_station(usaf, wban, isd_stations) logger.debug(' : ISD station : %s-%s, %s (begin=%s, end=%s)' % (st.usaf, st.wban, st.station_name, st.begin, st.end)) stime = time.time() data_list = list(isd.get_year_data(st, year)) logger.debug(' -> data loaded: elapsed time=%s' % timeutil.human_time(seconds=(time.time() - stime))) stime = time.time() is_inserted = _insert_noaa_data(year, usaf, wban, data_list) logger.debug(' -> data inserted: elapsed time=%s, inserted=%s' % ( timeutil.human_time(seconds=(time.time() - stime)), len(data_list))) if is_inserted: res.append({'usaf': usaf, 'wban': wban, 'loaded': len(data_list)}) else: res.append({'usaf': usaf, 'wban': wban, 'loaded': 0}) return res
def run(target_date): """ :type target_date: datetime.datetime :return: """ ttr_route_da = TTRouteDataAccess() route_list = ttr_route_da.list() ttr_route_da.close_session() for ttri in route_list: try: traveltime_info.calculate_TOD_reliabilities(ttri.id, target_date) except Exception as ex: tb.traceback(ex) getLogger(__name__).warning('Fail to calculate TOD reliabilities for route=%d' % ttri.id)
def _worker_process_to_calculate_tod_reliabilities(idx, queue, lck, data_path, db_info): import gc from pyticas.tool import tb from pyticas_tetres.db.tetres import conn from pyticas.infra import Infra logger = getLogger(__name__) # initialize logger.debug('[TOD Reliability Worker %d] starting...' % (idx)) ticas.initialize(data_path) Infra.get_infra() conn.connect(db_info) logger.debug('[TOD Reliability Worker %d] is ready' % (idx)) while True: ttr_id, target_date, num, total = queue.get() if target_date is None: exit(1) try: logger.debug('[TOD Reliability Worker %d] (%d/%d) calculating for route=%s at %s' % ( idx, num, total, ttr_id, target_date.strftime('%Y-%m-%d'))) traveltime_info.calculate_TOD_reliabilities(ttr_id, target_date, lock=lck) gc.collect() except Exception as ex: tb.traceback(ex) continue
def register_service(self, app): logger = getLogger(__name__) logger.info(' - registering {}'.format(self.name)) from pyticas_tetres.api import tetres from pyticas_tetres.api.admin import (snowmgmt, snowevent, wz_group, specialevent, wz, snowroute, ttroute, actionlog, route, systemconfig, route_wise_moe_parameters) from pyticas_tetres.api.user import traveltime_info from pyticas_tetres.api.user import route as user_route from pyticas_tetres.api.user import estimation as user_estimation from pyticas_tetres.api import data_api # faverolles 12/26/2019: Added "api_additions/api_endpoints" to modules to be registered as well from api_extension import api_endpoints modules = [ ttroute, snowevent, snowmgmt, snowroute, specialevent, tetres, wz, wz_group, traveltime_info, user_route, user_estimation, data_api, actionlog, route, systemconfig, api_endpoints, route_wise_moe_parameters ] for module in modules: module.register_api(app)
def _insert_noaa_data(year, usaf, wban, isd_data_list): """ bulk insertion **CAUTION** `isd_data_list` must be continous data because existing data will be delete `isd_data_list` must contain the same year data :type usaf: str :type wban: str :type isd_data_list: list[pyticas_noaa.isd.isdtypes.ISDData] :rtype: bool """ logger = getLogger(__name__) if not _delete_existing_noaa_data(usaf, wban, isd_data_list): return False da = noaaweather.NoaaWeatherDataAccess(year) dict_list = [_get_dict(usaf, wban, isd_data) for idx, isd_data in enumerate(isd_data_list)] inserted_ids = da.bulk_insert(dict_list, print_exception=True) if not inserted_ids or not da.commit(): logger.warn(' : Exception occured when entering NOAA ISD data') return False da.close_session() return True
def _checkup_tt_for_a_route(ttri): """ :type ttri: pyticas_tetres.ttypes.TTRouteInfo :rtype: list[(pyticas_tetres.ttypes.TTRouteInfo, pyticas.ttypes.Period)] """ R_TH = 0.9 logger = getLogger(__name__) logger.debug('>>> checkup travel time data for a route : %s' % ttri) no_ttdata = [] yearly_periods = _get_yearly_periods() for prd in yearly_periods: year = prd.start_date.year n = _get_count_of_tt_data(ttri, prd) expected = _get_expected_tt_data(prd) logger.debug(' - n of tt data for %s = %s/%d' % (year, n, expected)) if expected - n >= DAILY_N: continue logger.debug(' -> check monthly data') monthly_periods = _get_monthly_periods_for_a_year(year) for mprd in monthly_periods: month = mprd.start_date.month n = _get_count_of_tt_data(ttri, mprd) expected = _get_expected_tt_data(mprd) logger.debug(' - n of tt data for %04d-%02d = %s/%d' % (year, month, n, expected)) if expected - n >= DAILY_N: continue logger.debug(' -> check monthly data') daily_periods = _get_daily_periods_for_a_month(year, month) for dprd in daily_periods: day = dprd.start_date.day n = _get_count_of_tt_data(ttri, dprd) expected = _get_expected_tt_data(dprd) rate = n / expected logger.debug(' - n of tt data for %04d-%02d-%02d = %s/%d (%.2f)' % ( year, month, day, n, expected, rate)) if rate >= R_TH: continue logger.debug(' -> it needs to be re-calculated') try: from pyticas_tetres.util.traffic_file_checker import has_traffic_files start_date_str, end_date_str = prd.start_date.strftime('%Y-%m-%d'), prd.end_date.strftime('%Y-%m-%d') if not has_traffic_files(start_date_str, end_date_str): logger.warning( 'Missing traffic files for performing monthly check up for the time range starting from {} to {}'.format( start_date_str, end_date_str)) return except Exception as e: logger.warning( 'Exception occured while checking if traffic files exist during performing monthly task. Error: {}'.format(e)) _perform_calculation_of_tt(ttri, prd) logger.debug('<<< end of checkup travel time data for a route : %s' % ttri)
def calculate_a_route(prd, ttri, **kwargs): """ :type prd: pyticas.ttypes.Period :type ttri: pyticas_tetres.ttypes.TTRouteInfo """ logger = getLogger(__name__) dbsession = kwargs.get('dbsession', None) if dbsession: da_tt = TravelTimeDataAccess(prd.start_date.year, session=dbsession) else: da_tt = TravelTimeDataAccess(prd.start_date.year) lock = kwargs.get('lock', nonop_with()) # delete data to avolid duplicated data with lock: is_deleted = da_tt.delete_range(ttri.id, prd.start_date, prd.end_date, print_exception=True) if not is_deleted or not da_tt.commit(): logger.warning('fail to delete the existing travel time data') if not dbsession: da_tt.close_session() return False # calculate tt and vmt (res_tt, res_speed, res_vmt) = _calculate_tt(ttri.route, prd) if res_tt is None: logger.warning('fail to calculate travel time') return False avg_speeds = _route_avgs(res_speed) total_vmts = _route_total(res_vmt) seg_tt = res_tt[-1].data timeline = prd.get_timeline(as_datetime=False, with_date=True) data = [] for idx, dts in enumerate(timeline): data.append({ 'route_id': ttri.id, 'time': dts, 'tt': seg_tt[idx], 'vmt': total_vmts[idx], 'speed': avg_speeds[idx] }) with lock: inserted_ids = da_tt.bulk_insert(data) if not inserted_ids or not da_tt.commit(): logger.warning('fail to insert the calculated travel time into database') if not dbsession: da_tt.close_session() return False if not dbsession: da_tt.close_session() return inserted_ids
def _handler_incident(da, item, action_log): """ :type da: pyticas_tetres.da.incident.IncidentDataAccess :type item: pyticas_tetres.ttypes.IncidentInfo :type action_log: pyticas_tetres.ttypes.ActionLogInfo """ target_date = item.str2datetime(item.cdts) prd = _get_period_for_a_day(target_date) try: _categorize_for_a_day(prd, categorization.incident, incidents=[item]) except Exception as ex: getLogger(__name__).warning( 'Exception occured when handling incident changes : %s' % tb.traceback(ex, f_print=False)) return False return True
def _handler_workzone(da, item, action_log): """ :type da: pyticas_tetres.da.wz.WorkZoneDataAccess :type item: pyticas_tetres.ttypes.WorkZoneInfo :type action_log: pyticas_tetres.ttypes.ActionLogInfo """ sdt = item.str2datetime(item.start_time) edt = item.str2datetime(item.end_time) periods = _get_daily_periods(sdt, edt) try: for prd in periods: _categorize_for_a_day(prd, categorization.workzone, workzones=[item]) return True except Exception as ex: getLogger(__name__).warning( 'Exception occured when handling workzone changes : %s' % tb.traceback(ex, f_print=False)) return False
def run(prd): """ :type prd: pyticas.ttypes.Period :return: """ tlogger = task_logger.get_task_logger(TASK_LOGGER_NAME, capacity=365) n_inserted, has_error = incident_loader.import_all_corridor( prd.start_date, prd.end_date) getLogger(__name__).debug(' - %s incidents are loaded (has_error=%s)' % (n_inserted, has_error)) tlogger.set_registry('last_executed', tlogger.now()) tlogger.add_log({ 'time': tlogger.now(), 'target_period': prd, 'failed': has_error }) tlogger.save()
def _estimation_process(id, queue, counters, lock, data_path, DB_INFO, CAD_DB_INFO, IRIS_DB_INFO): """ :type id: int :type queue: Queue :type counters: dict :type lock: Lock :type data_path: str :type DB_INFO: dict :type CAD_DB_INFO: dict :type IRIS_DB_INFO: dict """ from pyticas_tetres.db.tetres import conn from pyticas_tetres.db.iris import conn as iris_conn from pyticas_tetres.db.cad import conn as cad_conn logger = getLogger(__name__) # initialize logger.debug('[EST WORKER %d] starting...' % (id)) ticas.initialize(data_path) infra = Infra.get_infra() conn.connect(DB_INFO) cad_conn.connect(CAD_DB_INFO) iris_conn.connect(IRIS_DB_INFO) # db session is created here ttr_da = TTRouteDataAccess() logger.debug('[EST WORKER %d] is ready' % (id)) while True: (a_route_id, eparam, uid) = queue.get() try: logger.debug('[EST WORKER %d] >>>>> start estimation (uid=%s, route=%d)' % (id, uid, a_route_id)) _eparam = eparam.clone() try: _eparam.add_start_time_offset(offset=5) except Exception as e: logger.debug('Could not add five minutes offset to the starting time. Error: {}'.format(e)) _eparam.travel_time_route = ttr_da.get_by_id(a_route_id) estimation.estimate(_eparam, uid) logger.debug('[EST WORKER %d] <<<<< end of estimation (uid=%s, route=%d)' % (id, uid, a_route_id)) except Exception as ex: tb.traceback(ex) logger.debug('[EST WORKER %d] <<<<< end of task (exception occured) (uid=%s)' % (id, uid)) should_pack = False with lock: counters[uid] = counters[uid] - 1 if counters[uid] <= 0: del counters[uid] should_pack = True if should_pack: logger.debug('[EST WORKER %d] >>> make compressed file (uid=%s)' % (id, uid)) _pack_result(uid) logger.debug('[EST WORKER %d] <<< end of making compressed file (uid=%s)' % (id, uid))
def _update_moe_values(rw_moe_param_json, db_info=None, *args, **kwargs): logger = getLogger(__name__) logger.debug('>> Updating MOE Values') _worker_process_to_update_moe_values( rw_moe_param_json['rw_moe_start_date'], rw_moe_param_json['rw_moe_end_date'], db_info, rw_moe_param_json=rw_moe_param_json) logger.debug('<< End of Updating MOE Values')
def initialize_database(): logger = getLogger(__name__) logger.info(' - initialize database : add default data') from pyticas_tetres.db.tetres import conn conn.engine.execute( "INSERT INTO config (name, content) VALUES ('version', '{}')".format( __DB_VERSION__))
def _worker_process_to_specific_categorization(idx, queue, lck, data_path, db_info, **kwargs): from pyticas_tetres.db.tetres import conn from pyticas.infra import Infra from pyticas.tool import tb from pyticas_tetres.rengine.cats import incident, snowmgmt, specialevent, weather, workzone logger = getLogger(__name__) # initialize logger.debug('[TT-Categorization Worker %d] starting...' % (idx)) ticas.initialize(data_path) infra = Infra.get_infra() conn.connect(db_info) categorizers = [] categorizer_names = kwargs.get("categorizer_names") categorizer_map = { "incident": incident, "snowmgmt": snowmgmt, "specialevent": specialevent, "weather": weather, "workzone": workzone } for categorizer_name in categorizer_names: categorizers.append(categorizer_map.get(categorizer_name)) da_route = TTRouteDataAccess() logger.debug('[TT-Categorization Worker %d] is ready' % (idx)) while True: ttr_id, prd, num, total = queue.get() if prd is None: da_route.close_session() exit(1) try: ttri = da_route.get_by_id(ttr_id) if not ttri: logger.debug( '[TT-Categorization Worker %d] route is not found (%s)' % (idx, ttr_id)) continue logger.debug( '[TT-Categorization Worker %d] (%d/%d) %s (id=%s) at %s' % (idx, num, total, ttri.name, ttri.id, prd.get_date_string())) tt_da = TravelTimeDataAccess(prd.start_date.year) tt_data_list = tt_da.list_by_period(ttri.id, prd) tt_da.close_session() for cidx, categorizer in enumerate(categorizers): n_inserted = categorizer.categorize(ttri, prd, tt_data_list, lock=lck) gc.collect() except Exception as ex: tb.traceback(ex) continue
def _handler_snowmanagement(da, item, action_log): """ :type da: pyticas_tetres.da.snowmgmt.SnowMgmtDataAccess :type item: pyticas_tetres.ttypes.SnowManagementInfo :type action_log: pyticas_tetres.ttypes.ActionLogInfo """ sdt = item.str2datetime(item.lane_lost_time) edt = item.str2datetime(item.lane_regain_time) periods = _get_daily_periods(sdt, edt) try: for prd in periods: _categorize_for_a_day(prd, categorization.snowmgmt, snowmgmts=[item]) return True except Exception as ex: getLogger(__name__).warning( 'Exception occured when handling snow-management changes : %s' % tb.traceback(ex, f_print=False)) return False
def run(target_date, db_info): """ :type target_date: datetime.date :type db_info: dict """ logger = getLogger(__name__) logger.debug('>> Calculating TOD Reliabilities for all routes') _run_multi_process(_worker_process_to_calculate_tod_reliabilities, target_date, db_info) logger.debug('<< End of calculating TOD Reliabilities for all routes')
def calculate_a_route(prd, ttri, **kwargs): """ :type prd: pyticas.ttypes.Period :type ttri: pyticas_tetres.ttypes.TTRouteInfo """ logger = getLogger(__name__) dbsession = kwargs.get('dbsession', None) if dbsession: da_tt = TravelTimeDataAccess(prd.start_date.year, session=dbsession) else: da_tt = TravelTimeDataAccess(prd.start_date.year) creatable_list = list() lock = kwargs.get('lock', nonop_with()) # delete data to avoid duplicated data with lock: is_deleted = da_tt.delete_range(ttri.id, prd.start_date, prd.end_date, print_exception=True) if not is_deleted or not da_tt.commit(): logger.warning('fail to delete the existing travel time data') if not dbsession: da_tt.close_session() return False print(f"{Fore.GREEN}CALCULATING TRAVEL-TIME FOR ROUTE[{ttri.name}]") res_dict = _calculate_tt(ttri.route, prd) if not res_dict or not res_dict['tt']: logger.warning('fail to calculate travel time') return False travel_time_results = res_dict['tt'] travel_time = travel_time_results[-1].data avg_speeds = _route_avgs(res_dict['speed']) res_vmt = _route_total(res_dict['vmt']) timeline = prd.get_timeline(as_datetime=False, with_date=True) print(f"{Fore.CYAN}Start[{timeline[0]}] End[{timeline[-1]}] TimelineLength[{len(timeline)}]") for index, dateTimeStamp in enumerate(timeline): tt_data = { 'route_id': ttri.id, 'time': dateTimeStamp, 'tt': travel_time[index], 'speed': avg_speeds[index], 'vmt': res_vmt[index], } creatable_list.append(tt_data) inserted_ids = list() if creatable_list: with lock: inserted_ids = da_tt.bulk_insert(creatable_list) if not inserted_ids or not da_tt.commit(): logger.warning('fail to insert the calculated travel time into database') if not dbsession: da_tt.close_session() return inserted_ids
def _start_and_pending(): logger = getLogger(__name__) logger.info('>> starting task scheduler...') _schedule_job() t = threading.currentThread() while not getattr(t, 'to_be_killed', False): schedule.run_pending() time.sleep(1) logger.info('!! task scheduler has been ended')