def _worker_process_to_calculate_tod_reliabilities(idx, queue, lck, data_path, db_info):
    import gc
    from pyticas.tool import tb
    from pyticas_tetres.db.tetres import conn
    from pyticas.infra import Infra

    logger = getLogger(__name__)
    # initialize
    logger.debug('[TOD Reliability Worker %d] starting...' % (idx))
    ticas.initialize(data_path)
    Infra.get_infra()
    conn.connect(db_info)

    logger.debug('[TOD Reliability Worker %d] is ready' % (idx))
    while True:
        ttr_id, target_date, num, total = queue.get()
        if target_date is None:
            exit(1)
        try:
            logger.debug('[TOD Reliability Worker %d] (%d/%d) calculating for route=%s at %s' % (
                idx, num, total, ttr_id, target_date.strftime('%Y-%m-%d')))
            traveltime_info.calculate_TOD_reliabilities(ttr_id, target_date, lock=lck)
            gc.collect()
        except Exception as ex:
            tb.traceback(ex)
            continue
예제 #2
0
    def start(self, port=None, debug=True, ssl_path=None, **kwargs):
        """ start server

        :type port: int
        :type debug: bool
        :type ssl_path: str
        :rtype:
        """

        logger = getLogger(__name__)
        if not ticas.is_initialized():
            logger.info('initializing TICAS')
            ticas.initialize(self.data_path)
            Infra.get_infra('', download=True)  # load_data recent roadway network

        logger.info('starting PyTICAS Apps')

        # create key and crt for HTTPS
        if ssl_path and len(ssl_path) == 2:
            # ssl_path[0] : `crt` file path
            # ssl_path[1] : `key` file path
            logger.info('creating SSL context...')
            # make_ssl_devcert(os.path.join(ssl_path, 'ssl'), host='localhost') # make dummy ssl
            context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
            context.load_cert_chain(os.path.join(ssl_path[0]), os.path.join(ssl_path[1]))
        else:
            context = None

        # call init modules
        logger.info('loading init modules...')
        for app in self.apps:
            app.init(self.server)

        logger.info('registering service modules...')
        for app in self.apps:
            app.register_service(self.server)

        # run api web service
        if not port:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            # faverolles 12/19/2019: socket change
            # sock.bind(('localhost', 0))
            sock.bind(('0.0.0.0', 0))
            port = sock.getsockname()[1]
            sock.close()

        self.server.run(debug=debug, port=port, ssl_context=context, **kwargs)

        logger.info('program terminated')
예제 #3
0
def ramp_passage_volume(ent, prd, **kwargs):
    """ return total volume of passage detector

    if there is a merge detector, use volume of merge detector
    if there is passage and bypass detectors, use passage flow rates - bypass volume
    if there is no passage detector, use volume of bypass detector

    :type ent: pyticas.ttypes.RNodeObject
    :type prd: pyticas.ttypes.Period
    """
    infra = kwargs.get('infra', Infra.get_infra())
    ddr = infra.ddr
    merges = ent.get_merge_detectors()
    passages = ent.get_passage_detectors()
    bypasses = ent.get_bypass_detectors()
    n_data = len(prd.get_timeline())
    volumes = [-1] * n_data

    if merges:
        volumes = _total_volume(ddr, merges, prd)
    elif passages:
        volumes = _total_volume(ddr, passages, prd)
        if bypasses:
            b_vols = _total_volume(ddr, bypasses, prd)
            volumes = [ max(v - b_vols[idx], 0) if b_vols[idx] > 0 else v for idx, v in enumerate(volumes) ]
    elif bypasses:
        volumes = _total_volume(ddr, bypasses, prd)

    return volumes
예제 #4
0
def create_route(srn_name, ern_name, name='', desc='', **kwargs):
    """ Return `Route` that is from `srn_name` to `ern_name`

    :param srn_name: start rnode name
    :type srn_name: str
    :param ern_name: end rnode name
    :type ern_name: str
    :type name: str
    :type desc: str
    :rtype: pyticas.ttypes.Route
    """
    now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    if not name:
        name = 'Route @ %s' % (now)
    if not desc:
        desc = 'Route created at %s ' % now

    infra = kwargs.get('infra', Infra.get_infra())
    r = Route(name, desc)
    r.infra_cfg_date = infra.cfg_date

    start_rnode = infra.get_rnode(srn_name)
    end_rnode = infra.get_rnode(ern_name)

    r.rnodes.append(start_rnode)
    betweens = infra.geo.between_rnodes(start_rnode, end_rnode)

    # if not betweens and start_rnode.name == end_rnode.name:
    #     raise ValueError(('Cannot find end of section in creating route. ',
    #                       'Make sure start and end rnode information : {0} - {1}'.format(srn_name, ern_name)))

    r.rnodes.extend([rn for rn in betweens])
    r.rnodes.append(end_rnode)

    return r
def get_traffic_data(rnode_list, prd, datatype, **kwargs):
    """

    :type rnode_list: list[RNodeObject]
    :type prd: Period
    :type datatype: str
    :rtype: list[RNodeData]
    """
    dc = kwargs.get('detector_checker', None)
    infra = kwargs.get('infra', Infra.get_infra())
    assert isinstance(infra, Infra)

    funcs = {
        'u': infra.rdr.get_speed,
        'tq': infra.rdr.get_total_flow,
        'aq': infra.rdr.get_average_flow,
        'v': infra.rdr.get_volume,
        'k': infra.rdr.get_density,
        'o': infra.rdr.get_occupancy,
        's': infra.rdr.get_scan,
    }

    # res = []
    # for rnode in rnode_list:
    #     res.append(funcs[datatype](rnode, prd, dc))
    # return res
    worker = Worker(n_threads=THREAD_LIMIT_PER_CALL)
    for rnode in rnode_list:
        worker.add_task(funcs[datatype], rnode, prd, dc)
    return worker.run()
def target_station_and_snowroute_info(year):
    infra = Infra.get_infra()

    logger = getLogger(__name__)
    logger.info(
        '>>> updating relations between target station and truck route')

    from pyticas_ncrtes.da.snowroute import SnowRouteDataAccess
    from pyticas_ncrtes.da.target_station import TargetStationDataAccess

    snrDA = SnowRouteDataAccess()
    tsDA = TargetStationDataAccess()

    snow_routes = snrDA.list_by_year(year)
    target_stations = tsDA.list_by_year(year, as_model=True)

    for tidx, ts in enumerate(target_stations):
        rnode = infra.get_rnode(ts.station_id)
        if not rnode:
            continue

        for snri in snow_routes:
            if rnode in snri.route1.rnodes or rnode in snri.route2.rnodes:
                ts.snowroute_id = snri.id
                ts.snowroute_name = snri._snowroute_group.name
                if tidx and tidx % 100:
                    snrDA.commit()

    snrDA.commit()
    snrDA.close()
    tsDA.close()

    logger.info(
        '<<< end of updating relations between target station and truck route')
예제 #7
0
def ramp_queue_flow(ent, prd, **kwargs):
    """ return total flow rates of queue detectors

    :type ent: pyticas.ttypes.RNodeObject
    :type prd: pyticas.ttypes.Period
    """
    infra = kwargs.get('infra', Infra.get_infra())
    volumes = ramp_queue_volume(ent, prd, infra=infra)
    return [(v * 3600 / prd.interval if v > 0 else cfg.MISSING_VALUE) for v in volumes]
예제 #8
0
def _estimation_process(id, queue, counters, lock, data_path, DB_INFO, CAD_DB_INFO, IRIS_DB_INFO):
    """
    :type id: int
    :type queue: Queue
    :type counters: dict
    :type lock: Lock
    :type data_path: str
    :type DB_INFO: dict
    :type CAD_DB_INFO: dict
    :type IRIS_DB_INFO: dict
    """

    from pyticas_tetres.db.tetres import conn
    from pyticas_tetres.db.iris import conn as iris_conn
    from pyticas_tetres.db.cad import conn as cad_conn

    logger = getLogger(__name__)
    # initialize
    logger.debug('[EST WORKER %d] starting...' % (id))
    ticas.initialize(data_path)
    infra = Infra.get_infra()
    conn.connect(DB_INFO)
    cad_conn.connect(CAD_DB_INFO)
    iris_conn.connect(IRIS_DB_INFO)

    # db session is created here
    ttr_da = TTRouteDataAccess()

    logger.debug('[EST WORKER %d] is ready' % (id))
    while True:
        (a_route_id, eparam, uid) = queue.get()
        try:
            logger.debug('[EST WORKER %d] >>>>> start estimation (uid=%s, route=%d)' % (id, uid, a_route_id))
            _eparam = eparam.clone()
            try:
                _eparam.add_start_time_offset(offset=5)
            except Exception as e:
                logger.debug('Could not add five minutes offset to the starting time. Error: {}'.format(e))
            _eparam.travel_time_route = ttr_da.get_by_id(a_route_id)
            estimation.estimate(_eparam, uid)
            logger.debug('[EST WORKER %d] <<<<< end of estimation (uid=%s, route=%d)' % (id, uid, a_route_id))
        except Exception as ex:
            tb.traceback(ex)
            logger.debug('[EST WORKER %d] <<<<< end of task (exception occured) (uid=%s)' % (id, uid))

        should_pack = False

        with lock:
            counters[uid] = counters[uid] - 1
            if counters[uid] <= 0:
                del counters[uid]
                should_pack = True

        if should_pack:
            logger.debug('[EST WORKER %d] >>> make compressed file (uid=%s)' % (id, uid))
            _pack_result(uid)
            logger.debug('[EST WORKER %d] <<< end of making compressed file (uid=%s)' % (id, uid))
예제 #9
0
def _worker_process_to_specific_categorization(idx, queue, lck, data_path,
                                               db_info, **kwargs):
    from pyticas_tetres.db.tetres import conn
    from pyticas.infra import Infra
    from pyticas.tool import tb
    from pyticas_tetres.rengine.cats import incident, snowmgmt, specialevent, weather, workzone

    logger = getLogger(__name__)
    # initialize
    logger.debug('[TT-Categorization Worker %d] starting...' % (idx))
    ticas.initialize(data_path)
    infra = Infra.get_infra()
    conn.connect(db_info)
    categorizers = []
    categorizer_names = kwargs.get("categorizer_names")
    categorizer_map = {
        "incident": incident,
        "snowmgmt": snowmgmt,
        "specialevent": specialevent,
        "weather": weather,
        "workzone": workzone
    }
    for categorizer_name in categorizer_names:
        categorizers.append(categorizer_map.get(categorizer_name))
    da_route = TTRouteDataAccess()
    logger.debug('[TT-Categorization Worker %d] is ready' % (idx))
    while True:
        ttr_id, prd, num, total = queue.get()
        if prd is None:
            da_route.close_session()
            exit(1)
        try:
            ttri = da_route.get_by_id(ttr_id)
            if not ttri:
                logger.debug(
                    '[TT-Categorization Worker %d] route is not found (%s)' %
                    (idx, ttr_id))
                continue
            logger.debug(
                '[TT-Categorization Worker %d] (%d/%d) %s (id=%s) at %s' %
                (idx, num, total, ttri.name, ttri.id, prd.get_date_string()))

            tt_da = TravelTimeDataAccess(prd.start_date.year)
            tt_data_list = tt_da.list_by_period(ttri.id, prd)
            tt_da.close_session()

            for cidx, categorizer in enumerate(categorizers):
                n_inserted = categorizer.categorize(ttri,
                                                    prd,
                                                    tt_data_list,
                                                    lock=lck)

            gc.collect()

        except Exception as ex:
            tb.traceback(ex)
            continue
예제 #10
0
def get_infra():
    """ returns `Infra` object. All modules must get `Infra` object from this function.

    :rtype: pyticas.infra.Infra
    """
    global infra

    infra = Infra.get_infra()
    return infra
예제 #11
0
def run(route, prd, **kwargs):
    """

    :type route: pyticas.ttypes.Route
    :type prd: pyticas.ttypes.Period
    :return:
    """
    infra = kwargs.get('infra', Infra.get_infra())
    kwargs['detector_checker'] = route.get_detector_checker()
    return get_total_flow(infra, route.get_rnodes(), prd, **kwargs)
예제 #12
0
def _worker_process_to_calculate_tt_and_categorize(idx, queue, lck, data_path,
                                                   db_info):
    from pyticas_tetres.db.tetres import conn
    from pyticas.infra import Infra
    from pyticas.tool import tb
    from pyticas_tetres.rengine.cats import weather, incident, snowmgmt, specialevent, workzone

    logger = getLogger(__name__)
    # initialize
    logger.debug('[TT-Categorization Worker %d] starting...' % (idx))
    ticas.initialize(data_path)
    infra = Infra.get_infra()
    conn.connect(db_info)

    categorizers = [weather, incident, workzone, specialevent, snowmgmt]
    da_route = TTRouteDataAccess()
    logger.debug('[TT-Categorization Worker %d] is ready' % (idx))
    while True:
        ttr_id, prd, num, total = queue.get()
        if prd is None:
            da_route.close_session()
            exit(1)
        try:
            ttri = da_route.get_by_id(ttr_id)
            if not ttri:
                logger.debug(
                    '[TT-Categorization Worker %d] route is not found (%s)' %
                    (idx, ttr_id))
                continue
            logger.debug(
                '[TT-Categorization Worker %d] (%d/%d) %s (id=%s) at %s' %
                (idx, num, total, ttri.name, ttri.id, prd.get_date_string()))
            is_inserted = traveltime.calculate_a_route(
                prd, ttri, dbsession=da_route.get_session(), lock=lck)
            if not is_inserted:
                logger.warning(
                    '[TT-Categorization Worker %d]  - fail to add travel time data'
                    % idx)

            tt_da = TravelTimeDataAccess(prd.start_date.year)
            tt_data_list = tt_da.list_by_period(ttri.id, prd)
            tt_da.close_session()

            for cidx, categorizer in enumerate(categorizers):
                n_inserted = categorizer.categorize(ttri,
                                                    prd,
                                                    tt_data_list,
                                                    lock=lck)

            gc.collect()

        except Exception as ex:
            tb.traceback(ex)
            continue
def create_route_config(rnodes, **kwargs):
    """
    :type rnodes: list[pyticas.ttypes.RNodeObject]
    :rtype: pyticas.ttypes.RouteConfig
    """
    infra_cfg_date = kwargs.get('infra_cfg_date', None)
    infra = kwargs.get('infra', Infra.get_infra(infra_cfg_date))
    (rns, orns) = infra.geo.opposite_rnodes(rnodes)
    rc = RouteConfig()
    rc.infra_cfg_date = infra.cfg_date
    rc.add_nodes(rns, orns)
    organize(rc)
    return rc
예제 #14
0
def _moe(moe_func, moe_name, **kwargs):
    """

    :type moe_func: callable
    :type moe_name: str
    :return:
    """
    try:
        route_json = request.form.get('route', None)
        periods = request.form.get('periods', None)

        if not route_json or not periods:
            return prot.response_error('Invalid Parameter')

        r = json2route(route_json)

        period_list = []
        for prdinfo in json.loads(periods):
            prd = period.create_period(
                (prdinfo['start_year'], prdinfo['start_month'], prdinfo['start_date'], prdinfo['start_hour'],
                 prdinfo['start_min']),
                (prdinfo['end_year'], prdinfo['end_month'], prdinfo['end_date'], prdinfo['end_hour'],
                 prdinfo['end_min']),
                prdinfo['interval']
            )
            period_list.append(prd)

        tmp_dir = Infra.get_infra().get_path('moe_tmp', create=True)
        uid = str(uuid.uuid4())
        est_file = os.path.join(tmp_dir, '%s.xlsx' % uid)
        res = moe_func(r, period_list)
        write = kwargs.get('write_function', writer.write)
        write(est_file, r, res, **kwargs)

        encoded = None
        with open(est_file, 'rb') as f:
            xlsx_content = f.read()
            encoded = base64.b64encode(xlsx_content)

        if not encoded:
            return prot.response_error('ERROR : %s' % moe_name)

        os.remove(est_file)

        return prot.response_success(obj=encoded.decode('utf-8'))

    except Exception as ex:
        tb.traceback(ex)
        return prot.response_error('ERROR : %s' % moe_name)
예제 #15
0
def ramp_queue_volume(ent, prd, **kwargs):
    """ return total volume of queue detectors

    if there is no queue detectors in the entrance,
    use passage volume * PASSAGE_DEMAND_FACTOR

    :type ent: pyticas.ttypes.RNodeObject
    :type prd: pyticas.ttypes.Period
    """
    infra = kwargs.get('infra', Infra.get_infra())
    queues = ent.get_queue_detectors()
    if any(queues):
        return _total_volume(infra.ddr, queues, prd)
    else:
        return [-1]*len(prd.get_timeline())
예제 #16
0
def _output_path(sub_dir='', create=True):
    infra = Infra.get_infra()
    if sub_dir:
        output_dir = infra.get_path('moe/%s' % sub_dir, create=create)
    else:
        output_dir = infra.get_path('moe', create=create)

    if create and not os.path.exists(output_dir):
        os.makedirs(output_dir)
        return os.path.abspath(output_dir)

    if os.path.exists(output_dir):
        return os.path.abspath(output_dir)
    else:
        return output_dir
예제 #17
0
 def tetres_admin_xlsx_content_from_route():
     route_content = request.form.get('route')
     r = json2route(route_content)
     try:
         tmp_dir = Infra.get_infra().get_path('tmp', create=True)
         uid = str(uuid.uuid4())
         filepath = os.path.join(tmp_dir, '%s.xlsx' % uid)
         if not r.cfg:
             r.cfg = route_config.create_route_config(r.rnodes)
         rc.writer.write(filepath, r)
         with open(filepath, 'rb') as f:
             file_content = f.read()
             encoded = base64.b64encode(file_content)
             return prot.response_success(obj=encoded.decode('utf-8'))
     except Exception as ex:
         tb.traceback(ex)
         return prot.response_fail('fail to write route')
예제 #18
0
def _worker_process_to_create_or_update_tt_and_moe(idx, queue, lck, data_path,
                                                   db_info, **kwargs):
    from pyticas_tetres.db.tetres import conn
    from pyticas.infra import Infra
    from pyticas.tool import tb

    logger = getLogger(__name__)
    # initialize
    logger.debug('[TT-Categorization Worker %d] starting...' % (idx))
    ticas.initialize(data_path)
    infra = Infra.get_infra()
    conn.connect(db_info)
    rw_moe_param_json = kwargs.get("rw_moe_param_json")
    da_route = TTRouteDataAccess()
    logger.debug('[TT-Categorization Worker %d] is ready' % (idx))
    while True:
        ttr_id, prd, num, total = queue.get()
        if prd is None:
            da_route.close_session()
            exit(1)
        try:
            ttri = da_route.get_by_id(ttr_id)
            if not ttri:
                logger.debug(
                    '[TT-Categorization Worker %d] route is not found (%s)' %
                    (idx, ttr_id))
                continue
            logger.debug(
                '[TT-Categorization Worker %d] (%d/%d) %s (id=%s) at %s' %
                (idx, num, total, ttri.name, ttri.id, prd.get_date_string()))
            traveltime.calculate_tt_moe_a_route(
                prd,
                ttri,
                dbsession=da_route.get_session(),
                lock=lck,
                create_or_update=True,
                rw_moe_param_json=rw_moe_param_json)
            gc.collect()

        except Exception as ex:
            logger.warning(
                '[TT-Categorization Worker %d]  - fail to add travel time data'
                % idx)
            tb.traceback(ex)
            continue
예제 #19
0
def ramp_queue_occupancy(ent, prd, agg_method='max', **kwargs):
    """ return average occupancy of queue detectors

    :type ent: pyticas.ttypes.RNodeObject
    :type prd: pyticas.ttypes.Period
    :param agg_method: aggregation method. it can be 'avg' or 'max' or 'min' or 'sum'
    :type agg_method: str
    """
    infra = kwargs.get('infra', Infra.get_infra())
    ddr = infra.ddr
    queues = ent.get_queue_detectors()
    if any(queues):
        occs = []
        for q in queues:
            occs.append(ddr.get_occupancy(q, prd))

        occ_data = []
        q_count = len(queues)
        for didx in range(len(occs[0])):
            total = 0
            min_value = 999
            max_value = -999
            for qidx in range(q_count):
                o = occs[qidx][didx]
                if min_value > o: min_value = o
                if max_value < o: max_value = o
                total += o
            avg = total / q_count if total > 0 else 0
            if agg_method == 'avg':
                occ_data.append(avg)
            elif agg_method == 'min':
                occ_data.append(min_value)
            elif agg_method == 'max':
                occ_data.append(max_value)
            elif agg_method == 'sum':
                occ_data.append(total)

        del occs

        return occ_data

    return None
예제 #20
0
def route_setup(r):
    """ initialize `Route` converted from json
        (`rnode` and `corridor` must be connected to their instances)

    :type r: ROute
    :return:
    """
    logger = getLogger(__name__)

    infra = Infra.get_infra()
    rnodes = []
    for ridx, rn in enumerate(r.rnodes):
        rnode_object = infra.get_rnode(rn)
        if rnode_object is None:
            logger.warn('rnode is not found : %s' % rn)
            continue
        rnodes.append(rnode_object)

    r.rnodes = rnodes

    if hasattr(r, 'cfg') and r.cfg:
        for nidx, ns in enumerate(r.cfg.node_sets):
            if hasattr(ns.node1, 'rnode') and ns.node1.rnode:
                r.cfg.node_sets[nidx].node1.rnode = infra.get_rnode(
                    ns.node1.rnode)
            else:
                r.cfg.node_sets[nidx].node1.rnode = None

            if hasattr(ns.node2, 'rnode') and ns.node2.rnode:
                r.cfg.node_sets[nidx].node2.rnode = infra.get_rnode(
                    ns.node2.rnode)
            else:
                r.cfg.node_sets[nidx].node2.rnode = None

            try:
                ns.node1.corridor = infra.get_corridor_by_name(
                    ns.node1.corridor)
                ns.node2.corridor = infra.get_corridor_by_name(
                    ns.node2.corridor)
            except AttributeError as ex:
                raise ex
예제 #21
0
    def get_infra():
        infra = Infra.get_infra()
        corridors = []
        for corr in sorted(infra.get_corridors(), key=lambda c: c.name):
            corridors.append(corr.__dict__)

        try:
            rnodes = _get_items(infra.rnodes)
            dets = _get_items(infra.detectors)
            dmss = _get_items(infra.dmss)
            cams = _get_items(infra.cameras)
            meters = _get_items(infra.meters)
            configs = {}

            for k in SHARED_CONFIGS:
                configs[k] = getattr(ticas_cfg, k)

            configs['ROUTE_CLASS'] = types.Route.__name__
            configs['ROUTE_MODULE'] = types.Route.__module__

            api_urls_info = {}
            for k, v in api_urls.__dict__.items():
                if k.startswith('_'): continue
                api_urls_info[k] = v

            return jsonify({
                'config': configs,
                'api_urls' : api_urls_info,
                'corridor_list': corridors,
                'rnode_list': rnodes,
                'detector_list': dets,
                'dms_list': dmss,
                'camera_list': cams,
                'meter_list': meters,
            }, indent=4, only_name=True )

        except Exception as ex:
            import sys, traceback
            print('-' * 60)
            traceback.print_exc(file=sys.stdout)
            print('-' * 60)
예제 #22
0
def _worker_process(id, queue, counters, lock, data_path, DB_INFO, CAD_DB_INFO,
                    IRIS_DB_INFO):
    """
    :type id: int
    :type queue: Queue
    :type counters: dict
    :type lock: Lock
    :type data_path: str
    :type DB_INFO: dict
    :type CAD_DB_INFO: dict
    :type IRIS_DB_INFO: dict
    """

    from pyticas_tetres.db.tetres import conn
    from pyticas_tetres.db.iris import conn as iris_conn
    from pyticas_tetres.db.cad import conn as cad_conn

    logger = getLogger(__name__)
    # initialize
    logger.debug('[ADMIN WORKER %d] starting...' % (id))
    ticas.initialize(data_path)
    infra = Infra.get_infra()
    conn.connect(DB_INFO)
    cad_conn.connect(CAD_DB_INFO)
    iris_conn.connect(IRIS_DB_INFO)

    logger.debug('[ADMIN WORKER %d] is ready' % (id))
    while True:
        (_uid, _task_added_time, _task, _args, _kwargs) = queue.get()
        try:
            logger.debug('[ADMIN WORKER %d] >>>>> start to run task (uid=%s)' %
                         (id, _uid))
            _task(*_args, **_kwargs)
            logger.debug('[ADMIN WORKER %d] <<<<< end of task (uid=%s)' %
                         (id, _uid))
        except Exception as ex:
            tb.traceback(ex)
            logger.debug(
                '[ADMIN WORKER %d] <<<<< end of task (exception occured) (uid=%s)'
                % (id, _uid))
예제 #23
0
def get_total_flow(infra, rnode_list, prd, **kwargs):
    """

    :typoe infra: Infra
    :type rnode_list: list[pyticas.ttypes.RNodeObject]
    :type prd: Period
    :rtype: list[RNodeData]
    """
    dc = kwargs.get('detector_checker', None)
    infra = kwargs.get('infra', Infra.get_infra())
    results = []
    for rnode in rnode_list:
        res = None
        if rnode.is_station():
            res = infra.rdr.get_total_flow(rnode, prd, dc)
        elif rnode.is_entrance():
            res = _tq_entrance(infra, rnode, prd, dc=dc)
        elif rnode.is_exit():
            res = _tq_exit(infra, rnode, prd, dc=dc)
        else:
            continue
        results.append(res)
    return results
예제 #24
0
import time
import sys

sys.path.append("Server/src")
import global_settings
import dbinfo

if __name__ == '__main__':
    from pyticas import ticas
    from pyticas.infra import Infra
    from pyticas_tetres.db.cad import conn as conn_cad
    from pyticas_tetres.db.iris import conn as conn_iris
    from pyticas_tetres.db.tetres import conn

    ticas.initialize(global_settings.DATA_PATH)
    infra = Infra.get_infra()

    conn.connect(dbinfo.tetres_db_info())
    conn_cad.connect(dbinfo.cad_db_info())
    conn_iris.connect(dbinfo.iris_incident_db_info())

    time.sleep(1)

    print('')
    print(
        '!! Do not run multiple instances of this program. (DB sync problem can be caused in bulk-insertion and deletion)')
    print('!! Stop TeTRES Server if it is running.')
    print('')
    print('# loads weather data for the given time period')
    print('')
예제 #25
0
def _output_dir(year):
    """
    :type year: int or str
    :rtype: str
    """
    return Infra.get_infra().get_path('%s/%s' % (ISD_DIR, year), create=True)
def run(pid, stations, months, data_path, db_info):
    """ target station identification main process

    Parameters
    ===========
        - pid : process identification for multi-processing
        - stations : station list
        - months : month list
        - data_path : TICAS data path

    :type pid: int
    :type stations: list[str]
    :type months: list[(int, int)]
    :type data_path : str
    :type db_info: dict
    :return:
    """
    if db_info:
        Infra.initialize(data_path)
        infra = Infra.get_infra()

        if conn.Session == None:
            conn.connect(db_info)
    else:
        infra = ncrtes.get_infra()

    logger = getLogger(__name__)
    logger.info('starting target station identification')

    wsDA = WinterSeasonDataAccess()
    nfDA = NormalFunctionDataAccess()
    tsDA = TargetStationDataAccess()

    # process start time
    stime = time.time()
    n_stations = len(stations)
    cnt = 0
    for sidx, st in enumerate(stations):
        station = infra.get_rnode(st)
        logger.info('# PID=%d, SIDX=%d/%d, STATION=%s' % (pid, sidx, n_stations, st))
        try:
            nf = nsrf.get_normal_function(station, months, wsDA=wsDA, nfDA=nfDA, tsDA=tsDA, autocommit=True)
            if nf and nf.is_valid():
                logger.info('  - %s is valid' % station.station_id)
            else:
                logger.debug('  - %s is not valid (nf=%s)' % (station.station_id, nf))

            # cnt += 1
            #
            # if cnt and cnt % 20 == 0:
            #     logger.warning('  - commmit!!')
            #     wsDA.commit()
            #     nfDA.commit()
            #     tsDA.commit()

        except Exception as ex:
            logger.warning(tb.traceback(ex, False))

    # wsDA.commit()
    # nfDA.commit()
    # tsDA.commit()
    # logger.warning('  - commmit!! (final)')

    wsDA.close()
    nfDA.close()
    tsDA.close()

    etime = time.time()

    logger.info('end of target station identification (elapsed time=%s)' % timeutil.human_time(seconds=(etime - stime)))
예제 #27
0
def _output_dir(year):
    """
    :type year: int or str
    :rtype: str
    """
    return Infra.get_infra().get_path(QCLCD_DATA_DIR, create=True)
예제 #28
0
def cumulative_input_output(ent, prd, **kwargs):
    """ return estimated cumulative input and output of the entrance

    :type ent: pyticas.ttypes.RNodeObject
    :type prd: pyticas.ttypes.Period
    """
    infra = kwargs.get('infra', Infra.get_infra())
    ddr = infra.ddr
    queue_full_count = 0
    queue_empty_count = 0
    green_accum = 0
    occ = ramp_queue_occupancy(ent, prd, 'max')
    input = ramp_queue_volume(ent, prd)
    output = ramp_passage_volume(ent, prd)
    if input == None or output == None:
        return None, None
    greens = ent.get_green_detectors()
    green_volumes = ddr.get_volume(greens[0], prd) if any(greens) else [0] * len(output)
    cumulative_input = []
    cumulative_output = []
    cumulative_input.append(max(input[0], 0))
    cumulative_output.append(max(output[0], 0))
    reset_count = 0
    met = ent.meters[0]

    for idx in range(1, len(input)):
        passage_failure = output[idx] < 0
        green_accum += (green_volumes[idx] if green_volumes[idx] > 0 else 0)
        queue_length = max(cumulative_input[idx - 1] - cumulative_output[idx - 1], 0)
        estimated_under_count = 0

        # if occupancy is high
        if occ[idx] > QUEUE_OCC_THRESHOLD:
            queue_full_count += 1
            max_storage = met.storage * ent.lanes * K_JAM_RAMP / tmc.feet_per_mile
            under = max_storage - queue_length
            queue_overflow_ratio = min(2 * queue_full_count * 30.0 / max(met.max_wait, 1), 1)
            min_demand_adjustment = int(round(queue_full_count * 30 / 60)) * 30 / 60.0
            estimated_under_count = max(queue_overflow_ratio * under, min_demand_adjustment)
            input_volume = input[idx] + estimated_under_count
        else:
            queue_full_count = 0
            input_volume = input[idx]

        ci = cumulative_input[idx - 1] + max(input_volume, 0)
        co = cumulative_output[idx - 1] + max(output[idx], 0)

        # reset cumulative data
        # count queue empty
        # if count queue empty > 3 time stpes
        is_demand_below_passage = ci - co < -1
        is_passage_below_green = co - green_accum < -1
        is_queue_volume_low = is_demand_below_passage or is_passage_below_green
        if is_queue_volume_low and occ[idx] < QUEUE_OCC_THRESHOLD:
            queue_empty_count += 1
        else:
            queue_empty_count = 0

        if queue_empty_count >= QUEUE_EMPTY_STEPS:
            reset_count += 1
            ci = 0
            co = 0
            queue_full_count = 0
            queue_empty_count = 0
            green_accum = 0

        cumulative_input.append(ci)
        cumulative_output.append(co)

    return cumulative_input, cumulative_output
예제 #29
0
def _json_decoder(args):
    """ Return object that is unserialized

    :type args: dict
    :rtype: object
    """

    # for `datetime.datetime`

    if '__type__' in args and args['__type__'] == 'datetime':
        inst = datetime.datetime.strptime(args['datetime'],
                                          '%Y-%m-%d %H:%M:%S')
    elif '__type__' in args and args['__type__'] == 'date':
        inst = datetime.datetime.strptime(args['date'], '%Y-%m-%d').date()
    elif '__type__' in args and args['__type__'] == 'time':
        inst = datetime.datetime.strptime(args['time'], '%H:%M:%S').time()
    elif '__type__' in args and args['__type__'] == 'numpy.ndarray':
        inst = numpy.array(args['list'])
    elif '__type__' in args and args['__type__'] in numpy_types:
        inst = eval(args['__type__'])(args['item'])
    elif '__enum__' in args:
        if '.' in args["__enum__"]:
            name, member = args["__enum__"].split(".")
        else:
            member = args["__enum__"]
            name = 'ValidState'
        enumObj = _find_serializable_class(name)
        return getattr(enumObj, member)

    # for `InfraObject` class
    elif '_obj_type_' in args and 'name' in args:
        _obj_type_ = args.pop('_obj_type_')
        getter = 'get_%s' % _obj_type_.lower()
        from pyticas.infra import Infra
        inst = getattr(Infra.get_infra(args.get('infra_cfg_date', '')),
                       getter)(args['name'])

    # for `Serializable` class
    elif '__class__' in args:
        class_name = args.pop('__class__')
        module_name = args.pop('__module__', None)
        try:
            module = import_module(module_name)
            cls = getattr(module, class_name)
        # if module is not found by changing module name or path
        except Exception as ex:
            tb.traceback(ex)
            getLogger(__name__).error('fail to unserialize for %s.%s' %
                                      (module_name, class_name))
            cls = _find_serializable_class(class_name)

        if cls and hasattr(cls, 'unserialize'):
            args = dict((key, value) for key, value in args.items())
            inst = cls.unserialize(args)
        else:
            inst = args

    else:
        inst = args

    if hasattr(inst, '__unserialized__'):
        inst.__unserialized__()

    return inst
예제 #30
0
def station_list_file():
    """
    :rtype: str
    """
    output_dir = Infra.get_infra().get_path(ISD_DIR, create=True)
    return os.path.join(output_dir, 'isd-history.csv')