Esempio n. 1
0
  def __init__(self, fmt, datefmt, options):
    """
        Constructor that just fowards the arguments to the two parents classes
    """

    libJsonFormatter.__init__(self, fmt, datefmt)
    BaseFormatter.__init__(self, fmt, datefmt, options)
Esempio n. 2
0
    def __init__(self, fmt, datefmt, options):
        """
        Constructor that just fowards the arguments to the two parents classes
    """

        libJsonFormatter.__init__(self, fmt, datefmt)
        BaseFormatter.__init__(self, fmt, datefmt, options)
Esempio n. 3
0
def configure_json_formatter():
    """
    Configure the log formatter

    :return:
    """
    log_keys = [
        'asctime',
        'created',
        'filename',
        'funcName',
        'levelname',
        'levelno',
        'lineno',
        'module',
        'msecs',
        'message',
        'name',
        'pathname',
        'process',
        'processName',
        'relativeCreated',
        'thread',
        'threadName',
    ]

    log_format = ' '.join(['%({0:s})'.format(i) for i in log_keys])

    return JsonFormatter(log_format)
def init_log():
    settings = LoggerSettings()
    logger = getLogger()
    logger.setLevel(NOTSET)

    logger_level = get_logger_level(settings.LOGGER_LEVEL)

    # Add stdout handler, with level INFO
    console = StreamHandler(stdout)
    console.setLevel(logger_level)
    formatter = Formatter(settings.LOGGER_FORMAT)
    console.setFormatter(formatter)
    logger.addHandler(console)

    # Add file rotating handler
    if settings.LOGGER_TO_FILE:
        file_log_handler = handlers.RotatingFileHandler(
            filename=settings.LOGGER_FILE,
            maxBytes=settings.LOGGER_FILE_SIZE,
            backupCount=settings.LOGGER_FILE_BACKUP_COUNT,
            encoding=settings.LOGGER_FILE_ENCODING)
        file_log_handler.setLevel(logger_level)
        if settings.LOGGER_JSON_FORMAT:
            formatter = JsonFormatter(settings.LOGGER_FORMAT)
        else:
            formatter = Formatter(settings.LOGGER_FORMAT)
        file_log_handler.setFormatter(formatter)
        logger.addHandler(file_log_handler)

    logger.info(f'Logger settings[{settings.dict()}].')
Esempio n. 5
0
    def create_file_handler(self):
        handler = TimedRotatingFileHandler(
            filename=self.config.LOG_FILENAME,
            when=self.config.LOG_ROTATION_INTERVAL_UNIT,
            backupCount=self.config.LOG_BACKUP_COUNT,
            interval=self.config.LOG_ROTATION_INTERVAL)

        formatter = JsonFormatter(fmt=self.config.LOG_FILE_FORMAT,
                                  json_encoder=CustomJsonEncoder)

        handler.setFormatter(formatter)
        return handler
Esempio n. 6
0
def configure_logging_formatter(formatter: Formatter = Formatter.JSON) -> None:
    """
    Configure formatter for all existing loggers.

    Note: it sets StreamHandler only
    """
    if formatter is Formatter.JSON:
        formatter_instance = JsonFormatter(formatter.value)
    else:
        formatter_instance = logging.Formatter(formatter.value)
    handler = logging.StreamHandler()
    handler.setFormatter(formatter_instance)

    # We're setting a proper formatter on all existing loggers including these which created at import time
    for name in logging.getLogger().manager.loggerDict:  # type: ignore
        logger = logging.getLogger(name)
        logger.addHandler(handler)
        # Prevent twice messages from child loggers like "aiohttp.access"
        logger.propagate = False
Esempio n. 7
0
def _get_json_handler():
    formatter = JsonFormatter(
        "(asctime) (levelname) (module) (funcName) (message)")
    log_handler = logging.StreamHandler()
    log_handler.setFormatter(formatter)
    return log_handler
Esempio n. 8
0
def main():
    import argparse

    logging.basicConfig(level=logging.INFO)
    openeogeotrellis.backend.logger.setLevel(logging.DEBUG)

    handler = logging.StreamHandler(stream=sys.stdout)
    handler.formatter = JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%S%z")

    root_logger = logging.getLogger()
    root_logger.addHandler(handler)

    _log.info("argv: {a!r}".format(a=sys.argv))
    _log.info("ConfigParams(): {c}".format(c=ConfigParams()))

    # FIXME: there's no Java output because Py4J redirects the JVM's stdout/stderr to /dev/null unless JavaGateway's
    #  redirect_stdout/redirect_stderr are set (EP-4018)

    try:
        parser = argparse.ArgumentParser(usage="OpenEO AsyncTask --task <task>",
                                         formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.add_argument("--py4j-jarpath", default="venv/share/py4j/py4j0.10.7.jar", help='Path to the Py4J jar')
        parser.add_argument("--py4j-classpath", default="geotrellis-extensions-2.2.0-SNAPSHOT.jar",
                            help='Classpath used to launch the Java Gateway')
        parser.add_argument("--principal", default="*****@*****.**", help="Principal to be used to login to KDC")
        parser.add_argument("--keytab", default="openeo-deploy/mep/openeo.keytab",
                            help="The full path to the file that contains the keytab for the principal")
        parser.add_argument("--task", required=True, dest="task_json", help="The task description in JSON")

        args = parser.parse_args()

        task = json.loads(args.task_json)
        task_id = task['task_id']
        if task_id not in [TASK_DELETE_BATCH_PROCESS_RESULTS, TASK_POLL_SENTINELHUB_BATCH_PROCESSES,
                           TASK_DELETE_BATCH_PROCESS_DEPENDENCY_SOURCES]:
            raise ValueError(f'unsupported task_id "{task_id}"')

        arguments: dict = task.get('arguments', {})

        def batch_jobs() -> GpsBatchJobs:
            java_opts = [
                "-client",
                "-Dsoftware.amazon.awssdk.http.service.impl=software.amazon.awssdk.http.urlconnection.UrlConnectionSdkHttpService"
            ]

            java_gateway = JavaGateway.launch_gateway(jarpath=args.py4j_jarpath,
                                                      classpath=args.py4j_classpath,
                                                      javaopts=java_opts,
                                                      die_on_exit=True)

            return GpsBatchJobs(get_layer_catalog(opensearch_enrich=True), java_gateway.jvm, args.principal,
                                args.keytab)

        if task_id in [TASK_DELETE_BATCH_PROCESS_RESULTS, TASK_DELETE_BATCH_PROCESS_DEPENDENCY_SOURCES]:
            batch_job_id = arguments['batch_job_id']
            dependency_sources = (arguments.get('dependency_sources') or [f"s3://{sentinel_hub.OG_BATCH_RESULTS_BUCKET}/{subfolder}"
                                                                          for subfolder in arguments['subfolders']])

            _log.info(f"removing dependency sources {dependency_sources} for batch job {batch_job_id}...",
                      extra={'job_id': batch_job_id})
            batch_jobs().delete_batch_process_dependency_sources(job_id=batch_job_id,
                                                                 dependency_sources=dependency_sources,
                                                                 propagate_errors=True)
        elif task_id == TASK_POLL_SENTINELHUB_BATCH_PROCESSES:
            batch_job_id = arguments['batch_job_id']
            user_id = arguments['user_id']

            while True:
                time.sleep(SENTINEL_HUB_BATCH_PROCESSES_POLL_INTERVAL_S)

                with JobRegistry() as registry:
                    job_info = registry.get_job(batch_job_id, user_id)

                if job_info.get('dependency_status') not in ['awaiting', "awaiting_retry"]:
                    break
                else:
                    try:
                        batch_jobs().poll_sentinelhub_batch_processes(job_info)
                    except Exception:
                        # TODO: retry in Nifi? How to mark this job as 'error' then?
                        _log.error("failed to handle polling batch processes for batch job {j}:\n{e}"
                                   .format(j=batch_job_id, e=traceback.format_exc()),
                                   extra={'job_id': batch_job_id})

                        with JobRegistry() as registry:
                            registry.set_status(batch_job_id, user_id, 'error')
                            registry.mark_done(batch_job_id, user_id)

                        raise

        else:
            raise AssertionError(f'unexpected task_id "{task_id}"')
    except Exception as e:
        _log.error(e, exc_info=True)
        raise e
Esempio n. 9
0
from datetime import datetime, timedelta
from urllib.parse import urlsplit

from flask import Flask, flash, redirect, render_template, Response, request, session, url_for
from flask_wtf.csrf import CSRFProtect
from pythonjsonlogger.jsonlogger import JsonFormatter
from spotify_client import Config, SpotifyClient
from spotify_client.exceptions import SpotifyException

import config
from forms import AddSongToQueueForm
from lib.credentials_manager import CredentialsManager

app_logger = logging.FileHandler(filename='app.log')
json_formatter = JsonFormatter(
    fmt=
    '%(levelname)s %(asctime)s %s(pathname)s %(lineno)s %(name)s %(message)s')

spotify_logger_handler = app_logger
spotify_logger_handler.setFormatter(json_formatter)

app_logger_handler = app_logger
app_logger_handler.setFormatter(json_formatter)

spotify_logger = logging.getLogger('spotify_client')
spotify_logger.setLevel(logging.INFO)
spotify_logger.addHandler(spotify_logger_handler)

app_logger = logging.getLogger('spotifydj')
app_logger.setLevel(logging.INFO)
app_logger.addHandler(app_logger_handler)
Esempio n. 10
0
 def __init__(self, url, jwt):
     super().__init__()
     self.jwt = jwt
     self.url = f"{url}/events"
     self.setFormatter(
         JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s"))
Esempio n. 11
0
def get_formatter() -> JsonFormatter:
    return JsonFormatter(fmt="%(asctime)s %(levelname)s %(name)s %(message)s")
Esempio n. 12
0
# log_format= dict([
#     ('asctime', 'asctime'),
#     ('name', 'name'),
#     ('levelname', 'levelname'),
#     ('message', 'message')])
#
# formatter = JsonFormatter(
#     fmt=log_format,
#     ensure_ascii=False,
#     mix_extra=True,
#     mix_extra_position='tail' # optional: head, mix
# )

log_format = '%(asctime)%(name)%(levelname):%(message)'
formatter = JsonFormatter(log_format)

sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
root.addHandler(sh)

for logg in [logging.getLogger()] + [
        logging.getLogger(name) for name in logging.root.manager.loggerDict
]:
    print(logg.name, logg.handlers)

root.info('test mix extra in fmt',
          extra={
              'extra1': 'extra content 1',
              'extra2': 'extra content 2'
          })
Esempio n. 13
0
 def _get_formatter(self):
     return JsonFormatter(self.base_fields, datefmt="%Y-%m-%d %H:%M:%S")
Esempio n. 14
0
def configure_logging(level_name, stream=None, filename=None):
    relevant_loggers = [
        logging.getLogger(),
        logging.getLogger('oldspeak'),
        logging.getLogger('sqlalchemy'),
        logging.getLogger('werkzeug'),
    ]
    available_handlers = []

    logging.Logger.manager.loggerDict.clear()
    level = getattr(logging, bytes(level_name), b'INFO')

    TEST_MODE = os.getenv('TEST_MODE')
    TEST_NOLOGS = TEST_MODE in ['nologs', 'no-logs']

    if TEST_NOLOGS:
        stream = None
        filename = None
        level_name = 'WARNING'

    elif not TEST_NOLOGS:
        stream = sys.stderr
        filename = None
        level_name = 'DEBUG'

    stream_handler = None
    file_handler = None

    if stream:
        stream_handler = logging.StreamHandler(stream=stream)
        available_handlers.append(stream_handler)

    if filename:
        file_handler = logging.FileHandler(filename=filename, encoding='utf-8')
        available_handlers.append(file_handler)

    if not stream and not filename:
        available_handlers.append(logging.NullHandler())

    def setup_logger(logger):
        logger.handlers = []
        logger.setLevel(level=level)
        for handler in available_handlers:
            logger.addHandler(handler)

    if stream_handler:
        stream_handler.setLevel(level)
        stream_handler.setFormatter(
            ColoredFormatter(
                fmt='%(asctime)s %(name)s %(levelname)s %(message)s'))

    if file_handler:
        json_formatter = JsonFormatter(
            '%(levelname)s %(asctime)s %(module)s %(process)d %(message)s %(pathname)s $(lineno)d $(funcName)s'
        )

        file_handler.setLevel(level)
        file_handler.setFormatter(json_formatter)

    for logger in relevant_loggers:
        setup_logger(logger)
Esempio n. 15
0
 def __init__(self):
     self.formatter = JsonFormatter(self.fmt, self.date_fmt)
Esempio n. 16
0
from app.middleware import RequestId
from app.views import bp as app_views_bp
from environment_settings import APP_X_FORWARDED_NUMBER
from payments.views import bp as payments_views_bp
from liqpay_int.api import bp as liqpay_resources_bp
from treasury.api.views import bp as treasury_resources_bp
from celery_ui.views import bp as celery_views_bp
from werkzeug.middleware.proxy_fix import ProxyFix
from celery_ui.events import events

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# Logging

default_handler.setFormatter(
    JsonFormatter("%(levelname)s %(asctime)s %(module)s %(process)d "
                  "%(message)s %(pathname)s $(lineno)d $(funcName)s"))

logger_root = logging.getLogger()
logger_root.addHandler(default_handler)
logger_root.setLevel(logging.INFO)

# Flask

cache = Cache(config={'CACHE_TYPE': 'simple'})
app = Flask(__name__, template_folder="templates")
cache.init_app(app)

# Celery events thread
events.start()

# Swagger
Esempio n. 17
0
            if p.returncode:
                _log.warning("{c} returned exit code {r}".format(
                    c=" ".join(cmd), r=p.returncode))
        else:
            _log.warning("No Kerberos principal/keytab: will not refresh TGT")


if __name__ == '__main__':
    import argparse

    logging.basicConfig(level=logging.INFO)
    openeogeotrellis.backend.logger.setLevel(logging.DEBUG)

    handler = logging.StreamHandler(stream=sys.stdout)
    handler.formatter = JsonFormatter(
        "%(asctime)s %(name)s %(levelname)s %(message)s",
        datefmt="%Y-%m-%dT%H:%M:%S%z")

    root_logger = logging.getLogger()
    root_logger.addHandler(handler)

    _log.info("ConfigParams(): {c}".format(c=ConfigParams()))

    parser = argparse.ArgumentParser(
        usage="OpenEO JobTracker",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--principal",
                        default="*****@*****.**",
                        help="Principal to be used to login to KDC")
    parser.add_argument(
        "--keytab",
Esempio n. 18
0
from aiohttp.http_websocket import WSMessage
from pythonjsonlogger.jsonlogger import JsonFormatter

from fluxcd_teams_bot import settings
from fluxcd_teams_bot.parser import parse

# TODO move loggers to separate file
main_logger = logging.getLogger("flux2teams")
main_logger.setLevel(settings.LOG_LEVEL)
main_handler = logging.StreamHandler()
main_handler.setFormatter(
    JsonFormatter(
        fmt="%(levelname)s %(name)s %(pathname)s %(lineno)s %(message)s",
        rename_fields={
            "levelname": "level",
            "name": "logger",
            "pathname": "file",
        },
        timestamp=True,
    ))
main_logger.addHandler(main_handler)
logger = logging.getLogger("flux2teams.server")

# TODO needs work, dont like the output
http_logger = logging.getLogger("flux2teams.http")
http_logger.propagate = False
http_handler = logging.StreamHandler()
http_handler.setFormatter(
    JsonFormatter(
        fmt="%(levelname)s %(name)s",
        rename_fields={
Esempio n. 19
0
 def _get_formatter(self, json):
     if json:
         return JsonFormatter()
     else:
         return logging.Formatter(self.format_string)
Esempio n. 20
0
def get_json_formatter() -> JsonFormatter:
    return JsonFormatter(
        '%(asctime)s %(filename)s %(lineno)d %(levelname)s %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')