Example #1
0
def run():
    logging.basicConfig(format=settings.LOGGING_FORMAT,
                        datefmt="%Y-%m-%dT%H:%M:%S",
                        level=settings.LOGGING_LEVEL)

    logging.getLogger('sdc.rabbit').setLevel(logging.INFO)

    # These structlog settings allow bound fields to persist between classes
    structlog.configure(logger_factory=LoggerFactory(), context_class=wrap_dict(dict))
    logger = structlog.getLogger()

    logger.info('Starting SDX Downstream', version=__version__)

    message_processor = MessageProcessor()

    quarantine_publisher = QueuePublisher(
        urls=settings.RABBIT_URLS,
        queue=settings.RABBIT_QUARANTINE_QUEUE
    )

    message_consumer = MessageConsumer(
        durable_queue=True,
        exchange=settings.RABBIT_EXCHANGE,
        exchange_type='topic',
        rabbit_queue=settings.RABBIT_QUEUE,
        rabbit_urls=settings.RABBIT_URLS,
        quarantine_publisher=quarantine_publisher,
        process=message_processor.process
    )

    try:
        message_consumer.run()
    except KeyboardInterrupt:
        message_consumer.stop()
Example #2
0
    def __init__(self, id, server, address, description=None, path=None,
                 protocol_bindings=None, available=True,
                 authentication_required=False):

        self.id = id
        self.server = server

        self.address = address
        self.path = path

        self.description = description
        self.supported_protocol_bindings = (
            protocol_bindings or self.supported_protocol_bindings)

        self.available = available
        self.authentication_required = authentication_required

        self.log = structlog.getLogger(
            "{}.{}".format(self.__module__, self.__class__.__name__),
            service_id=id)

        if not self.supported_protocol_bindings:
            self.log.warning(
                "No protocol bindings specified, service will be invisible",
                service=self.id)
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
    sys.path.append(os.path.dirname(os.path.dirname(
        os.path.abspath(__file__))))

import buildscripts.resmokelib.parser as _parser  # pylint: disable=wrong-import-position
import buildscripts.resmokelib.suitesconfig as suitesconfig  # pylint: disable=wrong-import-position
import buildscripts.util.read_config as read_config  # pylint: disable=wrong-import-position
import buildscripts.util.taskname as taskname  # pylint: disable=wrong-import-position
import buildscripts.util.teststats as teststats  # pylint: disable=wrong-import-position

# pylint: disable=wrong-import-position
from buildscripts.patch_builds.task_generation import TimeoutInfo, resmoke_commands
# pylint: enable=wrong-import-position

LOGGER = structlog.getLogger(__name__)

AVG_SETUP_TIME = int(timedelta(minutes=5).total_seconds())
DEFAULT_TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig",
                                      "suites")
CONFIG_FILE = "./.evergreen.yml"
MIN_TIMEOUT_SECONDS = int(timedelta(minutes=5).total_seconds())
MAX_EXPECTED_TIMEOUT = int(timedelta(hours=48).total_seconds())
LOOKBACK_DURATION_DAYS = 14
GEN_SUFFIX = "_gen"

HEADER_TEMPLATE = """# DO NOT EDIT THIS FILE. All manual edits will be lost.
# This file was generated by {file} from
# {suite_file}.
"""
Example #4
0
import structlog

from ...models import (
    Item,
    AddItemRequest,
    AddItemResponse,
    GetItemResponse,
    ListItemsResponse,
)

from fastapi import APIRouter, HTTPException
from redis.sentinel import Sentinel

router = APIRouter()
logger = structlog.getLogger("redis-items")

sentinel = Sentinel([('redis-0.redis', 26379)], socket_timeout=0.1)


@router.put(
    "/{key}",
    summary="Create redis item",
    response_model=AddItemResponse,
)
def add(key: str, req: AddItemRequest):
    logger.info("Adding redis item", key=key, value=req.value)
    master = sentinel.master_for(
        "redis")  # slaves are read-only; use master for writes
    master.set(key, req.value)
    return AddItemResponse(key=key, value=req.value)
Example #5
0
import atexit
import datetime
import os
import shutil
import tempfile
import uuid
from pathlib import Path
from typing import List, Iterable, Union, Tuple

import pathlib
import structlog
import logging

from datacube.utils import is_supported_document_type, read_documents, InvalidDocException, uri_to_local_path

_LOG = structlog.getLogger()

# This check is buggy when used with Tuple[] type: https://github.com/PyCQA/pylint/issues/867
# pylint: disable=invalid-sequence-index

# This may eventually go to a config file...
# ".trash" directories will be created at this level for any datasets contained within.
# TODO: Could these be inferred from the collections paths?
BASE_DIRECTORIES = [
    '/g/data/fk4/datacube',
    '/g/data/rs0/datacube',
    '/g/data/v10/reprocess',
    '/g/data/v10/archived',
    '/g/data/rs0/scenes',
    '/scratch/v10/scenes',
    '/g/data/v10/public/data',
Example #6
0
import numpy as np
import requests
import structlog
from beard.metrics import b3_f_score, b3_precision_recall_fscore
from inspire_disambiguation import conf
from inspire_disambiguation.core.es.readers import (
    get_input_clusters, get_curated_signature_blocks, get_signatures)
from inspire_disambiguation.core.helpers import (process_clustering_output,
                                                 train_validation_split)
from inspire_disambiguation.core.ml.models import (Clusterer,
                                                   DistanceEstimator,
                                                   EthnicityEstimator)
from inspire_disambiguation.core.ml.sampling import sample_signature_pairs
from redis import StrictRedis

LOGGER = structlog.getLogger()


def train_and_save_ethnicity_model(load_data_path, save_model_path):
    """Train the ethnicity estimator model and save it to disk.

    Args:
        load_data_path (str): Full path to training data for ethnicity estimator.
        save_model_path (str): Full path where trained ethnicity model will be saved.
    """
    start_time = datetime.now()
    estimator = EthnicityEstimator()
    estimator.load_data(load_data_path)
    load_time = datetime.now()
    LOGGER.info("Training EthnicityEstimator. May take a while...")
    estimator.fit()
Example #7
0
import typing as t
import structlog as logging
from contextlib import contextmanager
from django.db import models

from apps.b3_migration.model_descriptors.utils import (
    get_model_class, )
from apps.b3_migration.sync.auto_synchronization_base \
    import AutoSynchronizationBase

logger = logging.getLogger(__name__)


def sync_source_and_target_models(
    source_instance,
    source_model_descriptor,
    target_model_descriptor,
    buddy_model_class,
    logging_prefix,
    update=False,
):
    """
    Create/update an instance of the target model based on the source model

    1. Using the target model's :dict: `fields_mapping` (
            which maps the names of fields in the source model to the name of
            fields in the new model
            e.g.
                fields_mapping = {'postcode': 'zip_code'}

                             leads to
Example #8
0
import click
import structlog

from click_didyoumean import DYMGroup

from presence import config
from presence.cli import cli
from presence.rpc import Broker, Client, Worker, ServiceClient
from presence.tasks.store import Store

log = structlog.getLogger()


@cli.group(cls=DYMGroup)
def run():
    pass


@run.command()
def broker():
    """Run the broker"""
    port = config['port']
    bind = 'tcp://*:{}'.format(port)

    log.info('Broker starting...', bind=bind)

    broker = Broker(bind)
    broker.start()


@run.command()
Example #9
0
"""Package initializer."""

from pyramid.config import Configurator
from pyramid.router import Router
from pyramid_heroku import expandvars_dict

import structlog
import typing as t

logger = structlog.getLogger("init")


def configure_logging() -> None:
    """Configure structlog logging.

    Whenever structlog is imported it only creates a proxy. Calling this
    method turns the proxy into an actual logger.
    """
    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_log_level,
            structlog.stdlib.add_logger_name,
            structlog.processors.format_exc_info,
            structlog.processors.KeyValueRenderer(key_order=("level", "logger",
                                                             "event"),
                                                  sort_keys=True),
        ],
        logger_factory=structlog.stdlib.LoggerFactory(),
        cache_logger_on_first_use=True,
    )
Example #10
0
# -*- coding: utf-8 -*-
""" Sendgrid transport class
"""
import structlog
import toolz
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Content
from sendgrid.helpers.mail import Email
from sendgrid.helpers.mail import Mail

from services.notification_sender.email_templates import EmailRenderer
from .base_transport import BaseTransport

logger = structlog.getLogger(__name__,
                             transport='SendGridTranport',
                             transport_type='email')


async def send_email(notification):
    asyncio.sleep(1)


async def handle_email_transport(pool):
    conn = await pool.acquire()
    while True:
        loop_start = time.perf_counter()
        # begin transaction
        transport_type = TransportType['email']
        loop_start = time.perf_counter()
        # begin transaction
        async with QItem(conn, transport_type=transport_type) as item:
Example #11
0
from typing import List

from sqlalchemy.exc import IntegrityError, DataError, ProgrammingError
from sqlalchemy.orm import sessionmaker, Session
from structlog import getLogger

from db.models.base import BaseModel

log = getLogger('DataBase')


class DBIntegrityException(Exception):
    pass


class DBDuplicateException(Exception):
    pass


class DBDataException(Exception):
    pass


class DBNoResultException(Exception):
    pass


class DBProgrammingException(Exception):
    pass

Example #12
0
import sys
import uuid

import structlog
import twisted

from twisted.internet import protocol, reactor

logger = structlog.getLogger()


class Counter(object):
    i = 0

    def inc(self):
        self.i += 1

    def __repr__(self):
        return str(self.i)


class Echo(protocol.Protocol):
    def connectionMade(self):
        self._counter = Counter()
        self._log = logger.new(
            connection_id=str(uuid.uuid4()),
            peer=self.transport.getPeer().host,
            count=self._counter,
        )

    def dataReceived(self, data):
Example #13
0
              stdlib.PositionalArgumentsFormatter(),
              processors.TimeStamper(fmt="iso"),
              processors.StackInfoRenderer(), processors.format_exc_info,
              processors.UnicodeDecoder(), stdlib.render_to_log_kwargs
          ])

LOGLEVEL = None
log = None
with open('logger/config.json') as f:
    dictConfig = json.loads(f.read())

    conf = env_config.CONFIG

    if conf["debug"] == True:
        LOGLEVEL = logging.DEBUG
    else:
        LOGLEVEL = logging.INFO

    if conf["log_file_handler"] == True:
        os.makedirs('/tmp/.event_server', exist_ok=True)
        dictConfig["handlers"]["json"]["class"] = 'logging.FileHandler'
        dictConfig["handlers"]["json"][
            "filename"] = '/tmp/.event_server/server.log'
    else:
        dictConfig["handlers"]["json"]["class"] = 'logging.StreamHandler'

    dictConfig["loggers"][""]["level"] = LOGLEVEL
    logging.config.dictConfig(dictConfig)

    log = structlog.getLogger(conf["system_log_code"])
    log.debug("logging initialized")
Example #14
0
    def map_fips(self, fips: str) -> None:
        """
        For a given fips code, for either a county or state, generate the CAN UI output format.

        Parameters
        ----------
        fips: str
            FIPS code to map.
        """
        log.info("Mapping output to WebUI.", state=self.state, fips=fips)
        shim_log = structlog.getLogger(fips=fips)
        pyseir_outputs = load_data.load_ensemble_results(fips)

        try:
            fit_results = load_inference_result(fips)
            t0_simulation = datetime.fromisoformat(fit_results["t0_date"])
        except (KeyError, ValueError):
            log.error("Fit result not found for fips. Skipping...", fips=fips)
            return
        population = self._get_population(fips)

        # We will shim all suppression policies by the same amount (since historical tracking error
        # for all policies is the same).
        baseline_policy = "suppression_policy__inferred"  # This could be any valid policy

        # We need the index in the model's temporal frame.
        idx_offset = int(fit_results["t_today"] - fit_results["t0"])

        # Get the latest observed values to use in calculating shims
        observed_latest_dict = combined_datasets.get_us_latest_for_fips(fips)

        observed_death_latest = observed_latest_dict[CommonFields.DEATHS]
        observed_total_hosps_latest = observed_latest_dict[
            CommonFields.CURRENT_HOSPITALIZED]
        observed_icu_latest = observed_latest_dict[CommonFields.CURRENT_ICU]

        # For Deaths
        model_death_latest = pyseir_outputs[baseline_policy]["total_deaths"][
            "ci_50"][idx_offset]
        model_acute_latest = pyseir_outputs[baseline_policy]["HGen"]["ci_50"][
            idx_offset]
        model_icu_latest = pyseir_outputs[baseline_policy]["HICU"]["ci_50"][
            idx_offset]
        model_total_hosps_latest = model_acute_latest + model_icu_latest

        death_shim = shim.calculate_strict_shim(
            model=model_death_latest,
            observed=observed_death_latest,
            log=shim_log.bind(type=CommonFields.DEATHS),
        )

        total_hosp_shim = shim.calculate_strict_shim(
            model=model_total_hosps_latest,
            observed=observed_total_hosps_latest,
            log=shim_log.bind(type=CommonFields.CURRENT_HOSPITALIZED),
        )

        # For ICU This one is a little more interesting since we often don't have ICU. In this case
        # we use information from the same aggregation level (intralevel) to keep the ratios
        # between general hospitalization and icu hospitalization
        icu_shim = shim.calculate_intralevel_icu_shim(
            model_acute=model_acute_latest,
            model_icu=model_icu_latest,
            observed_icu=observed_icu_latest,
            observed_total_hosps=observed_total_hosps_latest,
            log=shim_log.bind(type=CommonFields.CURRENT_ICU),
        )

        # Iterate through each suppression policy.
        # Model output is interpolated to the dates desired for the API.
        suppression_policies = [
            key for key in pyseir_outputs.keys()
            if key.startswith("suppression_policy")
        ]
        for suppression_policy in suppression_policies:
            output_for_policy = pyseir_outputs[suppression_policy]
            output_model = pd.DataFrame()
            t_list = output_for_policy["t_list"]
            t_list_downsampled = range(0, int(max(t_list)),
                                       self.output_interval_days)

            output_model[schema.DAY_NUM] = t_list_downsampled
            output_model[schema.DATE] = [
                (t0_simulation + timedelta(days=t)).date().strftime("%Y-%m-%d")
                for t in t_list_downsampled
            ]
            output_model[schema.TOTAL] = population
            output_model[schema.TOTAL_SUSCEPTIBLE] = np.interp(
                t_list_downsampled, t_list, output_for_policy["S"]["ci_50"])
            output_model[schema.EXPOSED] = np.interp(
                t_list_downsampled, t_list, output_for_policy["E"]["ci_50"])
            output_model[schema.INFECTED] = np.interp(
                t_list_downsampled,
                t_list,
                np.add(output_for_policy["I"]["ci_50"],
                       output_for_policy["A"]["ci_50"]),
            )  # Infected + Asympt.
            output_model[schema.INFECTED_A] = output_model[schema.INFECTED]

            interpolated_model_acute_values = np.interp(
                t_list_downsampled, t_list, output_for_policy["HGen"]["ci_50"])
            output_model[schema.INFECTED_B] = interpolated_model_acute_values

            raw_model_icu_values = output_for_policy["HICU"]["ci_50"]
            interpolated_model_icu_values = np.interp(t_list_downsampled,
                                                      t_list,
                                                      raw_model_icu_values)
            output_model[schema.INFECTED_C] = (
                icu_shim + interpolated_model_icu_values).clip(min=0)

            # General + ICU beds. don't include vent here because they are also counted in ICU
            output_model[schema.ALL_HOSPITALIZED] = (
                interpolated_model_acute_values +
                interpolated_model_icu_values + total_hosp_shim).clip(min=0)

            output_model[schema.ALL_INFECTED] = output_model[schema.INFECTED]

            # Shim Deaths to Match Observed
            raw_model_deaths_values = output_for_policy["total_deaths"][
                "ci_50"]
            interp_model_deaths_values = np.interp(t_list_downsampled, t_list,
                                                   raw_model_deaths_values)
            output_model[schema.DEAD] = (interp_model_deaths_values +
                                         death_shim).clip(min=0)

            # Continue mapping
            final_beds = np.mean(output_for_policy["HGen"]["capacity"])
            output_model[schema.BEDS] = final_beds
            output_model[schema.CUMULATIVE_INFECTED] = np.interp(
                t_list_downsampled,
                t_list,
                np.cumsum(output_for_policy["total_new_infections"]["ci_50"]),
            )

            if fit_results:
                output_model[schema.Rt] = np.interp(
                    t_list_downsampled,
                    t_list,
                    fit_results["eps2"] * fit_results["R0"] *
                    np.ones(len(t_list)),
                )
                output_model[schema.Rt_ci90] = np.interp(
                    t_list_downsampled,
                    t_list,
                    2 * fit_results["eps2_error"] * fit_results["R0"] *
                    np.ones(len(t_list)),
                )
            else:
                output_model[schema.Rt] = 0
                output_model[schema.Rt_ci90] = 0

            output_model[schema.CURRENT_VENTILATED] = (
                icu_shim +
                np.interp(t_list_downsampled, t_list,
                          output_for_policy["HVent"]["ci_50"])).clip(min=0)
            output_model[schema.POPULATION] = population
            # Average capacity.
            output_model[schema.ICU_BED_CAPACITY] = np.mean(
                output_for_policy["HICU"]["capacity"])
            output_model[schema.VENTILATOR_CAPACITY] = np.mean(
                output_for_policy["HVent"]["capacity"])

            # Truncate date range of output.
            output_dates = pd.to_datetime(output_model["date"])
            output_model = output_model[
                (output_dates >= datetime(month=3, day=3, year=2020))
                & (output_dates < datetime.today() + timedelta(days=90))]
            output_model = output_model.fillna(0)

            # Fill in results for the Rt indicator.
            rt_results = load_Rt_result(fips)
            if rt_results is not None:
                rt_results.index = rt_results[
                    "Rt_MAP_composite"].index.strftime("%Y-%m-%d")
                merged = output_model.merge(
                    rt_results[["Rt_MAP_composite", "Rt_ci95_composite"]],
                    right_index=True,
                    left_on="date",
                    how="left",
                )
                output_model[schema.RT_INDICATOR] = merged["Rt_MAP_composite"]

                # With 90% probability the value is between rt_indicator - ci90
                # to rt_indicator + ci90
                output_model[schema.RT_INDICATOR_CI90] = (
                    merged["Rt_ci95_composite"] - merged["Rt_MAP_composite"])
            else:
                log.warning(
                    "No Rt Results found, clearing Rt in output.",
                    fips=fips,
                    suppression_policy=suppression_policy,
                )
                output_model[schema.RT_INDICATOR] = "NaN"
                output_model[schema.RT_INDICATOR_CI90] = "NaN"

            output_model[[schema.RT_INDICATOR,
                          schema.RT_INDICATOR_CI90]] = output_model[[
                              schema.RT_INDICATOR, schema.RT_INDICATOR_CI90
                          ]].fillna("NaN")

            int_columns = [
                col for col in output_model.columns if col not in (
                    schema.DATE,
                    schema.Rt,
                    schema.Rt_ci90,
                    schema.RT_INDICATOR,
                    schema.RT_INDICATOR_CI90,
                    schema.FIPS,
                )
            ]
            output_model.loc[:,
                             int_columns] = output_model[int_columns].fillna(
                                 0).astype(int)
            output_model.loc[:, [
                schema.Rt, schema.Rt_ci90, schema.RT_INDICATOR, schema.
                RT_INDICATOR_CI90
            ]] = output_model[[
                schema.Rt, schema.Rt_ci90, schema.RT_INDICATOR,
                schema.RT_INDICATOR_CI90
            ]].fillna(0)

            output_model[schema.FIPS] = fips
            intervention = Intervention.from_webui_data_adaptor(
                suppression_policy)
            output_model[schema.INTERVENTION] = intervention.value
            output_path = get_run_artifact_path(fips,
                                                RunArtifact.WEB_UI_RESULT,
                                                output_dir=self.output_dir)
            output_path = output_path.replace("__INTERVENTION_IDX__",
                                              str(intervention.value))
            output_model.to_json(output_path, orient=OUTPUT_JSON_ORIENT)
Example #15
0
# Installed
import datetime
import http
import flask
import json
import jwcrypto
from jwcrypto import jwk, jwt
import structlog

# Own modules
from dds_web import basic_auth, auth, mail
from dds_web.errors import AuthenticationError, AccessDeniedError, InviteError, TokenMissingError
from dds_web.database import models
import dds_web.utils

action_logger = structlog.getLogger("actions")

# VARIABLES ############################################################################ VARIABLES #

MFA_EXPIRES_IN = datetime.timedelta(hours=168)

####################################################################################################
# FUNCTIONS ############################################################################ FUNCTIONS #
####################################################################################################

# Error handler -------------------------------------------------------------------- Error handler #


@basic_auth.error_handler
def auth_error(status):
    """Handles 401 (Unauthorized) or 403 (Forbidden) for basic authentication."""
Example #16
0
    def format(self, record):
        for arg in ('exception', '_name'):
            val = getattr(record, arg, None)
            if isinstance(val, bytes):
                setattr(record, arg, val.decode('utf-8'))
        return super(ConsoleFormatter, self).format(record)


structlog.configure(
    processors=[
        structlog.stdlib.filter_by_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        structlog.processors.UnicodeEncoder(),
        simulate_stdlib_logging
    ],
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)

LOGGER = structlog.getLogger('assembl')


def includeme(config):
    """add request.logger"""
    config.add_request_method(
        'assembl.lib.logging.logger_for_request', 'logger')
Example #17
0
import structlog

from .test_base import BaseTestCase

from tasktiger import Worker
from tasktiger.logging import tasktiger_processor

from .utils import get_tiger

tiger = get_tiger()

logger = structlog.getLogger("tasktiger")


def logging_task():
    log = logger.info("simple task")
    # Confirm tasktiger_processor injected task id
    assert log[1]["task_id"] == tiger.current_task.id


class TestLogging(BaseTestCase):
    """Test logging."""

    def test_structlog_processor(self):
        try:
            # Use ReturnLogger for testing
            structlog.configure(
                processors=[tasktiger_processor],
                context_class=dict,
                logger_factory=structlog.ReturnLoggerFactory(),
                wrapper_class=structlog.stdlib.BoundLogger,
Example #18
0
    def __init__(self, worker_port=None, log_directory=None):
        threading.Thread.__init__(self)

        structlog.configure(
            processors=[
                structlog.stdlib.filter_by_level,
                structlog.stdlib.add_logger_name,
                structlog.stdlib.add_log_level,
                structlog.stdlib.PositionalArgumentsFormatter(),
                structlog.processors.TimeStamper(fmt='iso'),
                structlog.processors.StackInfoRenderer(),
                structlog.processors.format_exc_info,
                structlog.processors.JSONRenderer()  # ,
                # zmq_processor
            ],
            context_class=dict,
            logger_factory=structlog.stdlib.LoggerFactory(),
            wrapper_class=structlog.stdlib.BoundLogger,
            cache_logger_on_first_use=True,
        )

        # create file handler which logs messages down to the debug level to a file for postprocessing
        if log_directory is None:
            latest_experiment = logging.FileHandler('../logs/octrl.log', mode='w')
        else:
            latest_experiment = logging.FileHandler(log_directory+'/octrl.log', mode='w')

        latest_experiment.setLevel(logging.DEBUG)
        # create console handler with a higher log level
        console = logging.StreamHandler(sys.stderr)
        console.setLevel(logging.WARNING)

        if log_directory is None:
            rotating = handlers.RotatingFileHandler('../logs/experiment_history.log', mode='w', maxBytes=128e+6,
                                                    backupCount=5, delay=True)
        else:
            rotating = handlers.RotatingFileHandler(log_directory+'/experiment_history.log', mode='w', maxBytes=128e+6,
                                                    backupCount=5, delay=True)

        self.logger = structlog.getLogger()
        self.logger.setLevel(logging.DEBUG)

        try:
            rotating.doRollover()
        except AttributeError:
            # python2.6 quirk
            pass
        rotating.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        console.setFormatter(formatter)
        self.logger.addHandler(latest_experiment)
        self.logger.addHandler(console)
        self.logger.addHandler(rotating)
        if worker_port is None:
            self.worker_port = 5555 + 128
        else:
            self.worker_port = worker_port
        self.logger.info('Logging thread started...')
        self.logger.info('Logging initialized in {0} on port {1}'.format(threading.current_thread().name, self.worker_port))
        self.context = None
        self.socket = None
Example #19
0
"""Public API application module."""

import structlog

from aiohttp import web
from db import db_manager
from api.middlewares import error_middleware, auth_middleware
from api.v1.handlers import (ping, auth, register, check)

logger = structlog.getLogger('api.' + __name__)


def init_server(loop=None):
    """Configure server before starting.

    :param loop: Event loop
    :return: Server app

    """
    app = web.Application(middlewares=[
        error_middleware,
        web.normalize_path_middleware(
            append_slash=True,
            merge_slashes=True,
            redirect_class=web.HTTPPermanentRedirect), auth_middleware
    ])
    add_routes(app)

    async def init(app):
        """Configure app before starting server.
Example #20
0
"""

from datetime import datetime
from getoffmylawn.auth.models import User
from getoffmylawn.urls.models import Url
from pyramid.paster import bootstrap
from pyramid.paster import setup_logging
from sqlalchemy.orm.session import Session

import argparse
import structlog
import sys
import transaction
import typing as t

logger = structlog.getLogger("populate")

USER_ONE_ID = "aaaaaaaa-bbbb-4ccc-aaaa-eeeeeeeeeee1"
USER_TWO_ID = "aaaaaaaa-bbbb-4ccc-aaaa-eeeeeeeeeee2"
USER_JOHNJACOB_ID = "aaaaaaaa-bbbb-4ccc-aaaa-eeeeeeeeeee3"
URL_FOO_ID = "aaaaaaaa-bbbb-4ccc-aaaa-eeeeeeeeeee1"
URL_BAR_ID = "aaaaaaaa-bbbb-4ccc-aaaa-eeeeeeeeeee2"
URL_JOHNJACOB_ID = "aaaaaaaa-bbbb-4ccc-aaaa-eeeeeeeeeee3"

# "secret", hashed
SECRET = "$argon2i$v=19$m=512,t=2,p=2$mRMCwLg3Rgih1JqTUooxxg$/bBw6iXly9rfryTkaoPX/Q"


def add_users(db: Session) -> None:
    """Add demo users to db."""
"""

from __future__ import (
    absolute_import, division, print_function, unicode_literals
)

import logging.config

import structlog

from distutils.version import StrictVersion

from config import logging_config
from config.service_name import MICROSERVICE_NAME
from local_mqtt_client.local_mqtt_client import LocalMQTTClient

version = StrictVersion("1.0.1")

# load the config config
logging.config.dictConfig(logging_config.get_logging_conf())
mosquito_monitor_logger = structlog.getLogger(MICROSERVICE_NAME)
mosquito_monitor_logger.addHandler(logging.NullHandler())

mosquito_monitor_logger.info("Starting the Monitor")

lbc = LocalMQTTClient()

mosquito_monitor_logger.info("UpstreamMQTTClient Object Created")

lbc.run_loop(in_thread=False, forever=True)
Example #22
0
from typing import TypeVar
from asyncpg.pool import Pool
from asyncpg.connection import Connection
from asyncio.queues import Queue
from time import perf_counter

from sqlalchemy.dialects.postgresql import JSONB
import asyncpg
import yo.db

from yo.db import metadata
from yo.schema import TransportType
from yo.json import loads

logger = structlog.getLogger(__name__, source='YoDB')

PoolOrConn = TypeVar('PoolOrConn', Pool, Connection)
QItemId = int
NotificationId = int


class QueueStorage:
    '''A mixin class to preserve compatability with asyncio.Queue which
    calls len(self._queue)
    '''
    def __init__(self, loop, pool):
        self.loop = loop
        self.pool = pool

    def __len__(self) -> int:
Example #23
0
from __future__ import absolute_import

import structlog

from tasktiger import TaskTiger, Worker
from tasktiger.logging import tasktiger_processor

from .test_base import BaseTestCase
from .utils import get_tiger, get_redis

tiger = get_tiger()

logger = structlog.getLogger("tasktiger")


def logging_task():
    log = logger.info("simple task")
    # Confirm tasktiger_processor injected task id
    assert log[1]["task_id"] == tiger.current_task.id


class TestLogging(BaseTestCase):
    """Test logging."""
    def test_structlog_processor(self):
        try:
            # Use ReturnLogger for testing
            structlog.configure(
                processors=[tasktiger_processor],
                context_class=dict,
                logger_factory=structlog.ReturnLoggerFactory(),
                wrapper_class=structlog.stdlib.BoundLogger,
Example #24
0
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0

import socket

import structlog

from decisionengine.framework.modules.logging_configDict import DELOGGER_CHANNEL_NAME, LOGGERNAME

logger = structlog.getLogger(LOGGERNAME)
logger = logger.bind(module=__name__.split(".")[-1],
                     channel=DELOGGER_CHANNEL_NAME)


def get_random_port():
    try:
        logger.debug("looking for random port in get_random_port")
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
            s.bind(("127.0.0.1", 0))
            s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            return s.getsockname()[1]

    except OSError:  # pragma: no cover
        logger.exception("problem with get_random_port")
        raise
    except Exception:  # pragma: no cover
        logger.exception("Unexpected error!")
        raise
Example #25
0
from structlog import getLogger

logger = getLogger(__name__)

@dataclass
class Color:
    """A color container object.

    Default color is black.
    """

    r: float = 0  #: Normalized red channel value.
    g: float = 0  #: Normalized green channel value.
    b: float = 0  #: Normalized blue channel value.
    a: float = 1  #: Normalized alpha value (influences the transparency).


logger = structlog.getLogger(__name__)


class Visualizer:
    """A class that creates different types of visualizations.
    """

    def draw_gantt(self, stats: SchedulerStats, filepath: str):
        """Draws a Gantt chart.

        Directories referenced by filepath are created similar to mkdir -p.

        Args:
            stats (SchedulerStats): A container object for the scheduler's \
            output statistics.
            filepath (str): The location for writing the resulting Gantt chart.
Example #27
0
import structlog
import requests
from functools import lru_cache
from telegram import InputMediaPhoto

from eduzen_bot.plugins.commands.series import keyboards
from eduzen_bot.plugins.commands.series.constants import EZTV_NO_RESULTS, EZTV_API_ERROR
from eduzen_bot.plugins.commands.series.api import prettify_serie, get_all_seasons, get_poster_url


logger = structlog.getLogger(filename=__name__)


def monospace(text):
    return f"```\n{text}\n```"


def prettify_episode(ep):
    """Episodes have name, season, episode, torrent, magnet, size, seeds and released attributes"""
    # Some episodes do not have a torrent download. But they do have a magnet link.
    # Since magnet links are not clickable on telegram, we leave them as a fallback.
    if ep.torrent:
        header = f"[{ep.name}]({ep.torrent})\n"
    elif ep.magnet:
        header = f"Magnet: {monospace(ep.magnet)}"
    else:
        header = "No torrent nor magnet available for this episode."

    return f"{header}" f"🌱 Seeds: {ep.seeds} | 🗳 Size: {ep.size or '-'}"

Example #28
0
import click
import structlog
import collections

from datacube import Datacube
from datacube.model import Dataset
from datacube.ui import click as ui
from digitalearthau import uiutil

_LOG = structlog.getLogger('archive-locationless')
_siblings_count = 0


@click.command()
@click.option('--check-locationless/--no-check-locationless',
              is_flag=True,
              default=False,
              help='Check any datasets without locations')
@click.option('--archive-locationless',
              is_flag=True,
              default=False,
              help="Archive datasets with no active locations (forces --check-locationless)")
@click.option('--check-ancestors',
              is_flag=True,
              default=False,
              help='Check if ancestor/source datasets are still active/valid')
@click.option('--check-siblings',
              is_flag=True,
              default=False,
              help='Check if ancestor datasets have other children of the same type (they may be duplicates)')
@click.option('--archive-siblings',
Example #29
0
from __future__ import print_function
from builtins import *                  # python3 types

__all__ = ['file_get_bytes',
           'file_put_bytes',
           'file_truncate',
           'file_update_attrs',
           'simple_get_record',]
import os
import sys

from time import sleep
from time import time
import structlog
logger = structlog.getLogger('fuse.log-mixin.' + __name__)
mylog = logger.bind(scope=__name__)
import inspect

# If we are running from the source directory, try
# to load the module from there first.
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# zzz print('{} init: argv:{}, basedir:{}'.format(os.path.basename(basedir),
#                                            sys.argv[0],
#                                            basedir,))
if (os.path.exists(basedir)
    and os.path.exists(os.path.join(basedir, 'setup.py'))):
    add_dirs = [os.path.join(basedir, os.path.basename(basedir)),
                os.path.join(basedir, '../si446x'),
                os.path.join(basedir, '../tagnet')]
    for ndir in add_dirs:
Example #30
0
 def setUp(self):
     self.factory = RequestFactory()
     self.logger = structlog.getLogger(__name__)
Example #31
0
from funcy import flatten

from ...db.notifications import create_notification
from ...db import create_asyncpg_pool

from .handlers import handle_vote
from .handlers import handle_account_update
from .handlers import handle_send
from .handlers import handle_receive
from .handlers import handle_follow
from .handlers import handle_repost
from .handlers import handle_power_down
from .handlers import handle_mention
from .handlers import handle_comment

logger = structlog.getLogger(__name__, service_name='blockchain_follower')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
logger = logger.bind()
EXECUTOR = ThreadPoolExecutor()
'''
{
            "block": 20000000,
            "op": [
                "author_reward",
                {
                    "author": "ivelina89",
                    "permlink": "friends-forever",
                    "bbd_payout": "2.865 BBD",
                    "dpay_payout": "0.000 BEX",
                    "vesting_payout": "1365.457442 VESTS"
                }
Example #32
0
def successful_task(foo=None):
    import structlog

    logger = structlog.getLogger(__name__)
    logger.info("This is a successful task")
Example #33
0
from sanic.exceptions import SanicException
from sanic.request import Request
from structlog import getLogger

from api.base import APIValidateException
from db.database import DataBase, DBSession, DBNoResultException
from transport.sanic.base import SanicEndpoint

log = getLogger('BaseSanicEndpoint')


class BaseSanicEndpoint(SanicEndpoint):

    async def _method(self, request: Request, body: dict, *args, **kwargs):
        try:
            database: DataBase = self.context.database
            session: DBSession = database.make_session()

            return await super()._method(request, body, session=session)

        except APIValidateException as e:
            return await self.make_response_json(code=e.status_code, message=str(e))

        except DBNoResultException as e:
            log.error(e)
            return await self.make_response_json(code=400)

        except SanicException as e:
            log.error(e)
            if hasattr(e, 'error_code'):
                return await self.make_response_json(code=e.status_code, error_code=e.error_code, message=str(e))
import json
from jsonschema import SchemaError, ValidationError, validate
from mock import patch
from sdc.crypto.decrypter import decrypt
from structlog import getLogger

from tests.integration.integration_test_case import IntegrationTestCase
from app.keys import KEY_PURPOSE_SUBMISSION

logger = getLogger()

FEEDBACK_FORM_URL = '/feedback'
FEEDBACK_THANKYOU_URL = '/feedback/thank-you'


class Feedback(IntegrationTestCase):
    def setUp(self):
        self.patcher = patch('app.setup.LogSubmitter')
        mock_class = self.patcher.start()

        self.instance = mock_class.return_value
        self.instance.send_message.return_value = True

        super().setUp()
        self.launchSurvey('test', 'textfield')

    def tearDown(self):
        self.patcher.stop()

    def test_correct_feeback_link_in_page(self):
        soup = self.getHtmlSoup()
Example #35
0
class Help(HelpView):
    """
    Route: help
        This takes care of the help section, sending the data off
        to the relevant functions.
        This can only be reached if you are logged in.
    """
    # Specify the method(s) that are allowed to be used to reach this view
    methods = ["POST"]
    # Logger instance
    logger = logging.getLogger("netsocadmin.help")

    def dispatch_request(self) -> str:
        email = flask.request.form.get("email", "")
        subject = flask.request.form.get("subject", "")
        message = flask.request.form.get("message", "")
        # Ensure all fields are populated
        if not all([email, subject, message]):
            self.logger.info("not all fields specified")
            return self.render(help_error="Please specify all fields",
                               help_active=True)

        sent_email, sent_discord = True, True

        # Send the email
        try:
            email_resp = help_post.send_help_email(flask.session["username"],
                                                   email, subject, message)
            if not str(email_resp.status_code).startswith("20"):
                self.logger.error(
                    f"non 20x status code for help email: {email_resp.status_code} - {email_resp.body}"
                )
            sent_email = str(email_resp.status_code).startswith("20")
        except Exception as e:
            self.logger.error(f"failed to send help email: {str(e.body)}")

        # Try and send to Discord
        try:
            sent_discord = help_post.send_help_webhook(
                flask.session["username"], email, subject, message)
        except Exception as e:
            self.logger.error(f"failed to send help discord webhook: {str(e)}")

        # Check that at least one form of communication was sent
        if not sent_email and not sent_discord:
            # If not, report an error to the user
            return self.render(
                help_error=
                "There was a problem :( Please email [email protected] instead",
                help_active=True,
            )
        # Otherwise when things are okay, report back stating so
        message = ''
        if sent_email:
            message += "sent help email"
        if sent_discord and sent_email:
            message += " and "
        if sent_discord:
            message += "fired discord webhook"
        self.logger.info(message)
        return self.render(help_success=True, help_active=True)
Example #36
0
import anyconfig
import argparse
import structlog

from opentaxii.taxii.entities import CollectionEntity
from opentaxii.cli import app

log = structlog.getLogger(__name__)


def create_services():

    parser = argparse.ArgumentParser(
        description="Create services using OpenTAXII Persistence API",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )

    parser.add_argument(
        "-c", "--services-config", dest="config",
        help="YAML file with services configuration", required=True)

    args = parser.parse_args()
    services_config = anyconfig.load(args.config, forced_type="yaml")

    with app.app_context():

        app.taxii_server.persistence.create_services_from_object(
            services_config)


def create_collections():
Example #37
0
            ],
            fmt=fmt, datefmt=datefmt
        )

    def format(self, record):
        return super(ConsoleFormatter, self).format(record)


structlog.configure(
    processors=[
        structlog.stdlib.filter_by_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        structlog.processors.UnicodeEncoder(),
        simulate_stdlib_logging
    ],
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)

LOGGER = structlog.getLogger('assembl')


def includeme(config):
    """add request.logger"""
    config.add_request_method(
        'assembl.lib.logging.logger_for_request', 'logger')