Ejemplo n.º 1
0
async def process_log_data(payload: LogRawBatch,
                           context: EventContext) -> Optional[LogBatch]:
    """
    Receives emitted batch of raw log lines from service handler, process and filter in order
    to emit processed batch to a stream.
    """
    config = context.settings(datatype=LogReaderConfig)
    logger.info(context,
                "Processing batch of log entries...",
                extra=extra(batch_size=len(payload.data)))
    try:
        entries: List[LogEntry] = []
        for entry in payload.data:
            processed_entry = await _process_log_entry(entry, context)
            if processed_entry is not None:
                entries.append(processed_entry)  # type: ignore
        if len(entries) > 0:
            return LogBatch(entries=entries)
        logger.info("Filtered out all entries in batch.")
        return None
    except Exception as e:  # pylint: disable=broad-except  # pragma: no cover
        logger.error(context, e)
        return None
    finally:
        await asyncio.sleep(config.batch_wait_interval_secs)
Ejemplo n.º 2
0
async def __service__(context: EventContext) -> Spawn[LogRawBatch]:
    config = context.settings(datatype=LogReaderConfig)
    event_handler = LogFileHandler(config, context)
    logger.info(context,
                "Starting LogFileHandler...",
                extra=extra(logs_path=config.logs_path,
                            checkpoint_path=config.checkpoint_path))
    observer = start_observer(event_handler, config.logs_path)
    logger.info(context, "LogFileHandler started.")

    try:
        while True:
            batch = await event_handler.get_and_reset_batch()
            if len(batch) == 0:
                logger.info(
                    context,
                    "LogFileHandler returned empty batch. Sleeping...")
                await asyncio.sleep(config.batch_wait_interval_secs)
            else:
                for i in range(0, len(batch), config.batch_size):
                    yield LogRawBatch(data=batch[i:i + config.batch_size + 1])
                    await asyncio.sleep(config.batch_wait_interval_secs)
            event_handler.close_inactive_files()
    except KeyboardInterrupt:  # pragma: no cover
        pass
    except Exception as e:  # pylint: disable=broad-except  # pragma: no cover
        logger.error(context, e)
    finally:
        observer.stop()
        observer.join()
Ejemplo n.º 3
0
def authorize(context: EventContext, user_info: ContextUserInfo,
              now: datetime) -> AuthInfoExtended:
    """
    Authorize user and returns auth info containing tokens for api access and authorization renewal

    :param context: event context from app requesting authorization or login happened
    :param user_info: already validated user info to be encoded in tokens:
        Notice this method wont check if user is valid, invoking app should ensure this.
    :param now: current datetime, fixed as start of authorization process
    :return: AuthInfoExtended, containing new access and refresh tokens
    """
    cfg: AuthSettings = context.settings(key="auth", datatype=AuthSettings)
    renew_in = int(1000.0 * max(
        1.0 * cfg.access_token_expiration -
        1.0 * cfg.access_token_renew_window *
        (1.0 + 0.5 * random.random()), 0.5 * cfg.access_token_expiration *
        (0.5 * random.random() + 0.5)))
    token = _new_access_token(asdict(user_info), context, now,
                              cfg.access_token_expiration, renew_in)
    refresh_token = _new_refresh_token(asdict(user_info), context, now,
                                       cfg.refresh_token_expiration)
    result = AuthInfoExtended(
        app=context.app_key,
        access_token=token,
        refresh_token=refresh_token,
        token_type=AuthType.BEARER.name,
        access_token_expiration=cfg.access_token_expiration,
        refresh_token_expiration=cfg.refresh_token_expiration,
        renew=renew_in,
        user_info=user_info)
    return result
Ejemplo n.º 4
0
async def invoke_login(context: EventContext):
    auth_info = await login.login(None, context)
    cfg = context.settings(key='auth', datatype=AuthSettings)
    assert auth_info.token_type == 'BEARER'
    access_token_info = auth.decode_token(auth_info.access_token)
    assert access_token_info['app'] == 'test_app.test'
    assert access_token_info['id'] == 'id'
    assert access_token_info['email'] == 'test@email'
    assert access_token_info['user'] == 'test'
    iat = access_token_info['iat']
    assert access_token_info['exp'] == iat + cfg.access_token_expiration
    assert access_token_info['renew'] > 0
    assert access_token_info['renew'] < 1000.0 * (
        cfg.access_token_expiration - cfg.access_token_renew_window)

    refresh_token_info = auth.decode_token(auth_info.refresh_token)
    assert refresh_token_info['app'] == 'test_app.test'
    assert refresh_token_info['id'] == 'id'
    assert refresh_token_info['email'] == 'test@email'
    assert refresh_token_info['user'] == 'test'
    iat = refresh_token_info['iat']
    assert refresh_token_info['exp'] == iat + cfg.refresh_token_expiration

    assert auth_info.user_info == ContextUserInfo(id='id',
                                                  user='******',
                                                  email='test@email')
    assert auth_info.access_token_expiration == cfg.access_token_expiration
    assert auth_info.refresh_token_expiration == cfg.refresh_token_expiration
    assert auth_info.renew == access_token_info['renew']
    return auth_info
async def _save_partition(partition_key: str, items: List[DataObject],
                          context: EventContext):
    settings = context.settings(datatype=FileStorageSettings)
    path = Path(settings.path) / partition_key
    file = path / f"{uuid.uuid4()}{SUFFIX}"
    logger.info(context, f"Saving {file}...")
    os.makedirs(path.resolve(), exist_ok=True)
    async with aiofiles.open(file, 'w') as f:
        for item in items:
            await f.write(Payload.to_json(item) + "\n")
Ejemplo n.º 6
0
async def live_stats(collector: Collector, context: EventContext) -> Collector:
    settings = context.settings(key='apps_visualizer',
                                datatype=AppsVisualizerSettings)
    apps = await collector['runtime_apps']
    options = await collector['payload']
    host_pids = set((server.host_name, server.pid)
                    for _, runtime_info in apps.apps.items()
                    for server in runtime_info.servers
                    if options.host_filter in server.url)
    event_stats = get_stats(
        host_pids=host_pids,
        time_window_secs=settings.live_active_treshold_seconds,
        recent_secs=settings.live_recent_treshold_seconds)
    graph = await collector['cytoscape_data']
    if len(event_stats) == 0:
        return graph

    for item_id, item in graph.data.items():
        label = item['data'].get('label', '')
        source = item['data'].get('source', '')
        target = item['data'].get('target', '')
        if label and source and source[
                0] == '>' and source[-len(label):] != label:
            source += '.' + label
        if target and target[0] == '>':
            target = ''
        keys = filter(bool, [item_id, source, target])

        classes = []
        for key in keys:
            s = event_stats.get(key)
            if s:
                if s.started:
                    classes.append('STARTED')
                if s.pending:
                    classes.append('PENDING')
                if s.recent:
                    classes.append('RECENT')
                if s.failed:
                    classes.append('FAILED')
                if s.ignored:
                    classes.append('IGNORED')
                item['classes'] = _classes(item, classes)
                target = item['data'].get('target', ' ')
                if target[0] == '>':
                    target_item = graph.data[target]
                    target_item['classes'] = _classes(target_item, classes)
                if len(source) > 5 and (source[-5:] == ".POST"
                                        or source[-4:] == ".GET"
                                        or source[-8:] == "MULTIPART"):
                    source_item = graph.data[source]
                    source_item['classes'] = _classes(source_item, classes)
    return graph
async def __service__(context: EventContext) -> Spawn[FlushSignal]:
    settings: FileStorageSettings = context.settings(
        datatype=FileStorageSettings)
    if settings.flush_seconds:
        while True:
            await asyncio.sleep(settings.flush_seconds)
            for partition_key in list(buffer.keys()):
                yield FlushSignal(partition_key=partition_key)
    else:
        if settings.flush_max_size == 0:
            logger.warning(
                context, "Flushing partitions by size and time are disabled."
                "Specify either `flush_seconds` or `flush_max_size`"
                "to enable flushing the buffer periodically")
        else:
            logger.info(context, "Flushing partitions by time disabled.")
async def buffer_item(payload: DataObject,
                      context: EventContext) -> Optional[FlushSignal]:
    """
    Consumes any Dataobject type from stream and put it local memory buffer to be flushed later
    """
    settings: FileStorageSettings = context.settings(
        datatype=FileStorageSettings)
    partition_key = get_partition_key(payload, settings.partition_dateformat
                                      or "")
    async with buffer_lock:
        partition = buffer.get(partition_key, Partition())
        buffer[partition_key] = partition
    async with partition.lock:
        partition.items.append(payload)
    if settings.flush_max_size and len(
            partition.items) >= settings.flush_max_size:
        return FlushSignal(partition_key=partition_key)
    return None
Ejemplo n.º 9
0
async def get_runtime_apps(context: EventContext,
                           *, refresh: bool = False,
                           expand_events: bool = False) -> RuntimeApps:
    """
    Extract current runtime app_config objects
    """
    global _apps, _expire
    if not refresh and _lock.locked():
        raise RuntimeError("Events graph request in process. Ignoring")
    settings = context.settings(key="apps_visualizer", datatype=AppsVisualizerSettings)
    now_ts = datetime.now(tz=timezone.utc).timestamp()
    async with _lock:
        if _apps is None or refresh or now_ts > _expire:
            logger.info(context, "Contacting hosts config-manager...")
            _apps = await get_apps_config(settings.hosts, context, expand_events=expand_events)
            _expire = now_ts + settings.refresh_hosts_seconds
        else:
            logger.info(context, "Using cached runtime apps information.")
        return _apps