コード例 #1
0
def newtilt(tilt=None):
    logger.debug(f"tilt {tilt}")

    if not tilt:
        return

    return tilt + DELTA_TILT if tilt + DELTA_TILT < MAX_TILT else tilt
コード例 #2
0
ファイル: rets_data.py プロジェクト: githubcj79/windows_ret_2
def rets_data(time_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    when_ = time_
    period = when_.strftime("%Y-%m-%d")

    query_ = f'''
    select x.datetimeid, x.node, x.devicename, x.deviceno, x.tilt,
    x.subname, x.subunitno, y.localcellid, y.eci, y.cellname
    from (select
    ret.dateid as datetimeid,
    ret.node as node,
    ret.devicename as devicename,
    ret.deviceno as deviceno,
    sub.tilt as tilt,
    sub.subname as subname,
    sub.subunitno as subunitno
    from ret
    inner join retsubunit sub on
    date(ret.dateid) = date(sub.dateid) and
    ret.node = sub.node and
    ret.deviceno = sub.deviceno
    where date(ret.dateid) = current_date) as x
    inner join lcellreference as y
    on (x.node = y.node
    and (x.deviceno = y.localcellid or x.deviceno = y.localcellid + 10)
    and STR_TO_DATE(y.dateid, '%Y-%m-%d') = '{period}');
    '''

    return pd_sql(time_=time_, query_=query_)
コード例 #3
0
def processor(time_=None):
    '''
    Esta función detecta las transacciones en la tabla transactions.
    y las ejecuta sobre el nbi.
    Por ahora no se reintentan transacciones fallidas.
    '''
    logger.debug(f"time_ {time_}")

    if not time_:
        return

    engine = get_engine()
    session = get_session(engine=engine)

    # detectar las transacciones a procesar
    # trxs = session.query(Transaction).filter(Transaction.sent.is_(null()))

    # trxs = session.query(Transaction).filter(
    #     or_(Transaction.sent.is_(null()),
    #         Transaction.oldtilt != Transaction.newtilt)).first()

    trxs = session.query(Transaction).filter(
        or_(Transaction.sent.is_(null()),
            Transaction.oldtilt != Transaction.newtilt))

    if ENV == 'sim':
        for trx in trxs:
            # logger.info(f"trx \n{trx}")
            nbi_simulator(time_=time_, session_=session, trx_=trx)

    if ENV == 'prod':
        nbi_processor(time_=time_, session_=session, trxs_=trxs)

    session.commit()
    session.close()
コード例 #4
0
def pd_sql(time_=None, query_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    try:

        cnx = mysql.connector.connect(
                user=user_,
                password=password_,
                host=host_,
                database=database_,
                use_pure=True,
            )

        query = query_
        df = pd.read_sql(query,cnx)
        logger.info(f'df.shape {df.shape}')
        cnx.close()
        return df

    except mysql.connector.Error as err:
      if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
        logger.error("Something is wrong with your user name or password")
      elif err.errno == errorcode.ER_BAD_DB_ERROR:
        logger.error("Database does not exist")
      else:
        logger.error(err)
    else:
      cnx.close()
コード例 #5
0
ファイル: scheduler.py プロジェクト: githubcj79/windows_ret_2
def scheduler(time_=None):
    logger.debug(f"time_ {time_}")

    if not time_:
        return

    engine = get_engine()
    db_connection = engine.connect()

    # set más reciente de 'High' overshooters en terreno plano
    query_ = '''
            select o.cellname
            from overshooters o, terrains t
            where o.cellname = t.cellname
                and o.overshooter and t.is_plain
                and o.intensity = 'High'
                and o.datetimeid = (select max(datetimeid) from overshooters)
                and t.datetimeid = (select max(datetimeid) from terrains);
            '''

    candidates_df = pd.read_sql(query_, db_connection)
    db_connection.close()

    # entregar los candidatos a mid_term_evaluator()
    mid_term_evaluator(time_=time_, candidates_df=candidates_df)
コード例 #6
0
def delta_percentaje(reference=None, value=None):
    logger.debug(f"reference {reference} value {value}")

    if not reference or not value:
        return

    delta = reference - value
    return delta * 100 / reference
コード例 #7
0
def neighborhood(time_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    cells_df = get_cells_df(time_=time_)

    l = ['SITE', 'LAT', 'LON']
    sites_df = cells_df[l].drop_duplicates()

    sites_df['key'] = 1
    merged_df = pd.merge(sites_df, sites_df, on='key').drop("key", 1)

    merged_df = merged_df[merged_df['SITE_x'] != merged_df['SITE_y']]

    merged_df['distance_'] = haversine_distance(merged_df['LAT_x'].values,
                                                merged_df['LON_x'].values,
                                                merged_df['LAT_y'].values,
                                                merged_df['LON_y'].values)

    merged_df['bearing_'] = bearing(merged_df['LAT_x'].values,
                                    merged_df['LON_x'].values,
                                    merged_df['LAT_y'].values,
                                    merged_df['LON_y'].values)

    # logger.info(f'neighborhood: merged_df.columns {merged_df.columns}')
    l = ['SITE_x', 'SITE_y', 'distance_', 'bearing_']
    merged_df = merged_df[l]

    merged_df = merged_df[merged_df['distance_'] <= KM]

    l = ['SITE', 'CELLNAME', 'AZIMUTH']
    merged_df = pd.merge(cells_df[l],
                         merged_df,
                         how="inner",
                         left_on='SITE',
                         right_on='SITE_x')

    merged_df = merged_df[(merged_df['bearing_'] > merged_df['AZIMUTH'] - D)
                          & (merged_df['bearing_'] < merged_df['AZIMUTH'] + D)]

    l = ['CELLNAME', 'distance_']
    merged_df.sort_values(by=l, inplace=True)

    l = ['CELLNAME']
    neighborhood_df = merged_df.groupby(l).head(N_DISTANCE)

    l = ['CELLNAME', 'AZIMUTH', 'SITE_x', 'SITE_y', 'distance_', 'bearing_']

    logger.info(f'neighborhood_df[l].shape {neighborhood_df[l].shape}')
    return neighborhood_df[l], cells_df
コード例 #8
0
def main():
    # time_=datetime.datetime(2021, 2, 25, 10, 30, 0, 0)
    # day_before = time_  - datetime.timedelta(days=1)

    now_ = datetime.datetime.now()
    time_ = now_
    period = time_.strftime("%Y-%m-%d")

    query_ = f'''
    select distinct * from lcellreference as l
    where STR_TO_DATE(l.dateid, '%Y-%m-%d') = '{period}';
    '''

    df = pd_sql(time_=time_, query_=query_)
    logger.debug(f'df.shape {df.shape}')
コード例 #9
0
ファイル: enabler.py プロジェクト: githubcj79/windows_ret_2
def enabler(cellnames=None):
    logger.debug(f'ENV {ENV}')

    engine = get_engine()
    session = get_session(engine=engine)

    for cellname in cellnames:
        # logger.debug(f'cellname {cellname}')
        antennas = session.query(Ret).filter(Ret.cellname == cellname, )
        for antenna in antennas:
            antenna.enabled = True
            # logger.info(f'node {antenna.node} deviceno {antenna.deviceno}')
            # session.commit()

    session.commit()
    session.close()
コード例 #10
0
def average_kpis(time_=None):
    '''
    Esta función recibe time_ de modo de poder inferir el periodo.
    Para el periodo anterior, esta función devuelve para todas las
    celdas (cellname) : user_avg, user_thrp_dl, traffic_dl.

    Los datos anteriores corresponden al promedio de los valores
    de esas muestras para la data del periodo por celda.

    Esta función devuelve un dataframe con la data anterior.
    '''
    logger.debug(f"ENV {ENV} time_ {time_}")

    if not time_:
        return

    # period = time_.strftime("%Y%m%d")
    # logger.debug(f"period {period}")

    data_df = pd.DataFrame()  # empty df
    if ENV == 'sim':
        dict_ = {
            'eNodeB_Name': [
                'MBTS-AIS_3G_003',
                'MBTS-ARA_3G_013',
            ],
            # 'cellname': ['AIS_4G_003_3', 'ARA_4G_013_3',],
            'Cell_Name': [
                'AIS_4G_003_3',
                'ARA_4G_013_3',
            ],
            'user_avg': [
                81.0,
                200.0,
            ],
            'user_thrp_dl': [
                25.4,
                23.2,
            ],
            'traffic_dl': [8285.170, 7660.760],
        }
        data_df = pd.DataFrame.from_dict(dict_)

    if ENV == 'prod':
        data_df = get_ta_df(time_=time_)

    return data_df
コード例 #11
0
def all_enabler(time_=None):
    '''
    La finalidad de esta función es habilitar a todas las celdas.
    Con excepción de aquellas cuyo nombre haga match con el pattern
    _MM_
    '''
    logger.debug(f'ENV {ENV} time_ {time_}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    df = cells_data(time_=time_)
    cellnames = df[~df['CELLNAME'].str.contains("_MM_")]['CELLNAME'].drop_duplicates().tolist()
    logger.debug(f'len(cellnames) {len(cellnames)}')

    enabler(cellnames=cellnames)
コード例 #12
0
def cells_data(time_=None):
    logger.debug(f'ENV {ENV} time_ {time_}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    when_ = time_
    period = when_.strftime("%Y-%m-%d")
    logger.debug(f'period {period}')

    query_ = f'''
    select distinct * from lcellreference as l
    where STR_TO_DATE(l.dateid, '%Y-%m-%d') = '{period}';
    '''

    return pd_sql(time_=time_, query_=query_)
コード例 #13
0
def trx_updater(commands=None, sent_=None):
    '''
    Esta función recibe una lista de diccionarios, con las respuestas
    a los comandos de cambio de tilt ejecutados en el NBI.
    Si el resultado es exitoso se actualizan las tablas rets y
    transactions.
    '''
    logger.debug(f"ENV {ENV}")

    if not commands:
        return

    engine = get_engine()
    session = get_session(engine=engine)

    for command in commands:
        result = command['data']['result']
        logger.debug(f"result {result}")
        executed_time_stamp_str = command['data']['executed_time_stamp']
        executed_time_stamp = datetime.datetime.strptime(
            executed_time_stamp_str, '%Y-%m-%d %H:%M:%S')
        object_id = command['object_id']
        trx = session.query(Transaction).filter(
            Transaction.id == object_id).first()
        if not trx:
            session.commit()
            session.close()
            return

        trx.sent = sent_
        if result:
            trx.oldtilt = trx.newtilt
            trx.success = executed_time_stamp
            ret_updater(node=trx.node,
                        deviceno=trx.deviceno,
                        tilt=trx.newtilt,
                        session=session)
        else:
            logger.info(f"result {result}")
            trx.failure = executed_time_stamp

    session.commit()
    session.close()
コード例 #14
0
def transactions(time_=None):
    '''
    Esta función detecta las transacciones en la tabla transactions.
    y las ejecuta sobre el nbi.
    Por ahora no se reintentan transacciones fallidas.
    '''
    logger.debug(f"time_ {time_}")

    if not time_:
        return

    engine = get_engine()
    session = get_session(engine=engine)

    # detectar las transacciones a procesar
    trxs = session.query(Transaction).filter(Transaction.sent.is_(null()))
    for trx in trxs:
        # logger.info(f"trx \n{trx}")
        processor(time_=time_,session_=session,trx_=trx)

    session.commit()
    session.close()
コード例 #15
0
def ret_updater(node=None, deviceno=None, tilt=None, session=None):
    logger.debug(f"ENV {ENV}")

    if not node or not deviceno or not tilt or not session:
        return

    ret = session.query(func.max(Ret.datetimeid)).first()
    logger.debug(f"ret {ret} type {type(ret)}")

    for datetimeid_ in ret:
        pass

    if not datetimeid_:
        return

    trx = session.query(Ret).filter(
        and_(Ret.node == node, Ret.deviceno == deviceno,
             Ret.datetimeid == datetimeid_)).first()

    if not trx:
        return

    trx.tilt = tilt
    session.commit()
コード例 #16
0
def giver_of_times():
    '''
    Esta funcion es un generador de tiempos.
    Estos tiempos se asocian a los ciclos.
    Es el reloj del simulador.
    '''
    logger.debug(f"")

    time_list = [
                    datetime.datetime(2021, 1, 10, 10, 30, 0, 0),
                    datetime.datetime(2021, 1, 11, 10, 30, 0, 0),
                    datetime.datetime(2021, 1, 12, 10, 30, 0, 0),
                    datetime.datetime(2021, 1, 13, 10, 30, 0, 0),
                ]

    seconds = 1

    if ENV == 'sim':
        for time_ in time_list:
            time.sleep(seconds)
            yield time_

    if ENV == 'prod':
        yield datetime.datetime.now()
コード例 #17
0
def nbi_simulator(time_=None, session_=None, trx_=None):
    logger.debug(f"time_ {time_}")

    if not time_ or not session_ or not trx_:
        pass

    logger.info(f"trx_ \n{trx_}")
    # logger.info(f"ENV {ENV}")

    # if ENV == 'sim':

    logger.info(f"ENV {ENV}")

    trx_.sent = datetime.now()

    nbi_response = failure_percentage()
    logger.info(f"nbi_response {nbi_response}")

    if nbi_response:
        trx_.success = datetime.now()
    else:
        trx_.failure = datetime.now()

    session_.commit()
コード例 #18
0
def ta_data(time_=None):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return

    when_ = time_
    period = when_.strftime("%Y-%m-%d")

    query_ = f'''
    select distinct * from prs_lte_hour p
    where STR_TO_DATE(p.dateid_date, '%Y-%m-%d')
     between '{period}' and '{period}';
    '''

    query_ = f'''
    select distinct * from prs_lte_hour p
    where STR_TO_DATE(p.dateid_date, '%Y-%m-%d')
     between '{period}' and '{period}'
      and p.dateid_hour = '20';
    '''

    return pd_sql(time_=time_, query_=query_)
コード例 #19
0
ファイル: tables.py プロジェクト: githubcj79/windows_ret_2
def get_prod_engine():
    logger.debug(f'get_engine:')
    return create_engine(PROD_DB_STR_CONNECTION, echo=ECHO)
コード例 #20
0
def evaluator(time_=None, candidates_kpis_df=pd.DataFrame()):
    '''
    Esta función recibe todas las celdas candidatas y sus kpis promedio,
    para el instante actual.
    Dependiendo de si la celda existe en la tabla transactions,
    hay comparaciones con kpis promedio iniciales.
    En base a reglas pueden entrar transacciones a la tabla transactions.
    '''
    logger.debug(f"time_ {time_}")

    if not time_:
        return

    if candidates_kpis_df.empty:
        return

    logger.debug(f"candidates_kpis_df \n{candidates_kpis_df}")

    engine = get_engine()
    session = get_session(engine=engine)

    for idx in candidates_kpis_df.index:  # overshooters plain terrain
        node = candidates_kpis_df['eNodeB_Name'][idx]
        user_avg = candidates_kpis_df['user_avg'][idx]
        user_thrp_dl = candidates_kpis_df['user_thrp_dl'][idx]
        traffic_dl = candidates_kpis_df['traffic_dl'][idx]

        antennas = session.query(Ret).filter(Ret.node == node, )
        for antenna in antennas:
            if not antenna.enabled:
                continue
            logger.debug(f"node {antenna.node} deviceno {antenna.deviceno}")
            trx = session.query(Transaction).filter(
                and_(Transaction.node == antenna.node,
                     Transaction.deviceno == antenna.deviceno)).first()
            if trx:
                # si trx anterior no fue exitosa
                if not trx.success:
                    logger.debug(f"continue: success {trx.success}")
                    continue
                cond_ = delta_percentaje(
                    trx.user_thrp_dl_initial,
                    user_thrp_dl) > MAX_DELTA_USER_THRP_DL_PERCENTAJE
                cond_ = cond_ or delta_percentaje(
                    trx.traffic_dl_initial,
                    traffic_dl) > MAX_DELTA_TRAFFIC_DL_PERCENTAJE
                if cond_:
                    # rollback
                    logger.debug(f"rollback")
                    newtilt_ = trx.oldtilt
                else:
                    newtilt_ = newtilt(trx.newtilt)

                if trx.newtilt == newtilt_:
                    logger.debug(f"continue: newtilt_ {newtilt_}")
                    continue

                # si nuevo tilt es distinto al último
                trx.newtilt = newtilt_
                trx.generated = datetime.now()
            else:
                if not (user_avg >= MIN_USER_AVG and user_avg <= MAX_USER_AVG):
                    logger.debug(f"continue: user_avg {user_avg}")
                    continue
                if antenna.tilt == newtilt(antenna.tilt):
                    logger.debug(
                        f"continue: antenna.tilt == newtilt(antenna.tilt)")
                    continue
                # se crea entrada en tabla transactions
                trx = Transaction(
                    node=antenna.node,
                    cellname=antenna.cellname,
                    deviceno=antenna.deviceno,
                    subunitno=antenna.subunitno,
                    tilt_initial=antenna.tilt,

                    # oldtilt = tilt_initial,
                    oldtilt=antenna.tilt,

                    # originalmente
                    # user_thrp_dl_initial = user_thrp_dl,
                    # traffic_dl_initial = traffic_dl,

                    # para ver si pasa
                    user_thrp_dl_initial=float(user_thrp_dl),
                    traffic_dl_initial=float(traffic_dl),
                    newtilt=newtilt(antenna.tilt),
                    datetimeid=time_,
                    generated=datetime.now(),
                )
                logger.debug(f"trx \n{trx}")
                session.add(trx)
            session.commit()

    session.commit()
    session.close()
コード例 #21
0
ファイル: tables.py プロジェクト: githubcj79/windows_ret_2
def create_tables():
    logger.debug(f'create_tables:')
    engine = get_engine()
    BASE.metadata.create_all(engine)
    return True
コード例 #22
0
def nbi_processor(time_=None, session_=None, trxs_=None):
    '''
    Esta función recibe el query (trxs_) con todas las transacciones
    a ejecutar.
    Construye un mensaje al NBI con todas ellas.
    Espera el mensaje de respuesta y de acuerdo con lo recibido
    actualiza las transacciones en la BD (transactions y rets)
    '''
    logger.debug(f"time_ {time_} ENV {ENV}")
    # logger.debug(f"trxs_ \n{trxs_} ENV {ENV}")

    if not time_ or not session_ or not trxs_:
        return

    # logger.debug(f"hello !!! ..")
    '''
    {
    'object_id': 14144,
    'data': {
       'command': 'MOD CELLDLSCHALGO:LOCALCELLID=0,DLEPFCAPACITYFACTOR=EPF_CAPC_FACTOR_1;',
        'network_element': 'MBTS-VAL_3G_138'
            }
    },
    '''

    # RET Command
    # MOD RETSUBUNIT:DEVICENO=0,SUBUNITNO=1,TILT=60;{MBT-RM2023}

    command_list = []

    for trx in trxs_:
        logger.debug(f"trx \n{trx}")
        object_id = 14144
        command = (
            f'MOD RETSUBUNIT:DEVICENO={trx.deviceno},'
            f'SUBUNITNO={trx.subunitno},'
            f'TILT={trx.newtilt};'
            # '{'
            # f'{trx.node}'
            # '}'
        )
        network_element = f'{trx.node}'
        dict_ = {
            # 'object_id': object_id, # original
            'object_id': trx.id,  # para probar si puedo hilar con la trx
            'data': {
                'command': command,
                'network_element': network_element
            }
        }
        command_list.append(dict_)

    logger.debug(f"command_list \n{command_list}")
    if not command_list:
        return

    random_script_id = str(random.randint(0, 1e6)).zfill(6)+\
                        ' '+datetime.datetime.now().strftime('%Y%m%d%H%M%S')
    data = {
        'client_id': CLIENT_ID,
        'script_id': random_script_id,
        'command_type': ['MOD'],
        'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        'timeout': -1,
        'priority': 0,
        'corre_id': 20,
        'command_list': command_list
    }

    event_ = Event(type_='mml_type', data=data)
    id_ = event_.id_
    logger.debug(f"id_ {id_} ENV {ENV}")

    sent_ = datetime.datetime.now()
    producer.send(MML_TOPIC, value=event_.as_dictionary())
    logger.debug(f"after producer.send(MML_TOPIC ..)")
    logger.debug(f"MML_TOPIC {MML_TOPIC}")

    for m in consumer:
        logger.debug(f"after for m in consumer:")
        if id_ == m.value['id_']:
            logger.debug("*** match")
            # logger.info(f"m.value \n{m.value}")
            pprint.pprint(m.value, indent=0, width=120)
            '''
            - para cada comando ejecutado, estudiar respuesta y
                actualizar trxs y rets, si corresponde
            '''
            trx_updater(commands=m.value['data']['command_list'], sent_=sent_)
            break
        else:
            print("continue", flush=True)
            continue
コード例 #23
0
def main():
    for time_ in giver_of_times():
        logger.debug(f"time_ {time_}")
コード例 #24
0
def overshooters(time_=None,
                 neighborhood_df=pd.DataFrame(),
                 cells_df=pd.DataFrame()):
    logger.debug(f'ENV {ENV}')

    if not time_:
        logger.info(f'time_ {time_}')
        return
    # ------------------------------------------
    # fue necesario setear tiempos para avanzar con las pruebas ..
    # now_ = datetime.datetime.now()

    if neighborhood_df.empty:
        neighborhood_df, cells_df = neighborhood(time_)

    day_before = time_ - datetime.timedelta(days=1)
    ta_df = get_ta_df(time_=day_before)

    # neighborhood_df.reset_index(inplace=True)
    # cells_df.reset_index(inplace=True)
    # ta_df.reset_index(inplace=True)
    # ------------------------------------------

    neighborhood_df, overshooters_df = overshooting(
        neighborhood_df=neighborhood_df, ta_df=ta_df)
    # neighborhood_df.to_excel(r'data/neighborhood_df.xlsx', index = False)
    # overshooters_df.to_excel(r'data/overshooters_df.xlsx', index = False)

    # neighborhood_df.reset_index(inplace=True)
    # overshooters_df.reset_index(inplace=True)

    neighborhood_df, overshooters_intensity_df = overshooting_intensity(
        neighborhood_df=neighborhood_df, ta_df=ta_df)
    # neighborhood_df.to_excel(r'data/neighborhood_df.xlsx', index = False)
    overshooters_intensity_df.to_excel(r'data/overshooters_intensity_df.xlsx',
                                       index=False)

    # neighborhood_df.reset_index(inplace=True)
    # overshooters_intensity_df.reset_index(inplace=True)

    intensity_df = overshooters_intensity_df.drop(['distance_'], axis=1)
    # intensity_df.reset_index(inplace=True)

    merged_df = pd.merge(overshooters_df,
                         intensity_df,
                         how="inner",
                         left_on='CELLNAME',
                         right_on='CELLNAME').drop_duplicates()
    # merged_df.to_excel(r'data/merged_df.xlsx', index = False)
    # merged_df.reset_index(inplace=True)

    # ------------------------------------------------
    l = ['CELLNAME', 'ta_', 'distance_', 'overshooter', 'overs_intensity']
    overshooters_df = merged_df[l].drop_duplicates()

    l = [
        'cellname', 'ta_calculated', 'average_distance', 'overshooter',
        'intensity'
    ]
    overshooters_df.columns = l

    overshooters_df['datetimeid'] = cells_df.iloc[0]['Dateid']
    # ------------------------------------------------

    # return overshooters_df, intensity_df, merged_df
    return overshooters_df
コード例 #25
0
ファイル: tables.py プロジェクト: githubcj79/windows_ret_2
def get_engine():
    logger.debug(f'get_engine:')
    return create_engine(LOCAL_DB_STR_CONNECTION, echo=ECHO)
コード例 #26
0
from ret.config.settings import (
    ENV,
    CLIENT_ID,
    KAFKA_BROKER_URL,
    MML_TOPIC,
    RST_TOPIC,
)
from ret.utilities.trx_updater import trx_updater

producer = KafkaProducer(
    bootstrap_servers=[KAFKA_BROKER_URL],
    value_serializer=lambda msg: json.dumps(msg).encode(
        'utf-8'),  # we serialize our data to json for efficent transfer
)

logger.debug(f"MML_TOPIC {MML_TOPIC} RST_TOPIC {RST_TOPIC}")
consumer = KafkaConsumer(
    RST_TOPIC,
    bootstrap_servers=[KAFKA_BROKER_URL],
    auto_offset_reset='latest',  # where to start reading the messages at
    enable_auto_commit=True,
    #group_id='event-collector-group-2', # consumer group id
    value_deserializer=lambda m: json.loads(m.decode(
        'utf-8'))  # we deserialize our data from json
)


def nbi_processor(time_=None, session_=None, trxs_=None):
    '''
    Esta función recibe el query (trxs_) con todas las transacciones
    a ejecutar.
コード例 #27
0
def main():
    for time_ in giver_of_times():
        dict_ = average_kpis(time_)
        logger.debug(f"dict_ \n{dict_}")
コード例 #28
0
ファイル: read_yaml.py プロジェクト: githubcj79/windows_ret_2
def read_yaml(file_path):
    logger.debug(f'read_yaml:')
    with open(file_path, "r") as f:
        return yaml.safe_load(f)