Ejemplo n.º 1
0
def get_json_log_handler(path, app_name, json_fields):
    handler = logging.FileHandler(path)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['collector', app_name]
    formatter.defaults['@fields'] = json_fields
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 2
0
def get_json_log_handler(path, app_name):
    handler = RotatingFileHandler(
        path, maxBytes=1024 * 1024 * 10, backupCount=5)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['application', app_name]
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 3
0
def get_json_log_handler(path, app_name, json_fields):
    handler = logging.FileHandler(path)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['collector', app_name]
    formatter.defaults['@fields'] = json_fields
    handler.setFormatter(formatter)
    return handler
def get_json_log_handler(path, app_name, json_fields):
    handler = ConcurrentRotatingFileHandler(
        path, "a", 2 * 1024 * 1024 * 1024, 1)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['collector', app_name]
    formatter.defaults['@fields'] = json_fields
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 5
0
def get_json_log_handler(path, app_name, json_fields):
    handler = ConcurrentRotatingFileHandler(path, "a", 2 * 1024 * 1024 * 1024,
                                            1)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['collector', app_name]
    formatter.defaults['@fields'] = json_fields
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 6
0
def create_logger(name,
                  filehandler_config=None,
                  environment='',
                  stream_config=None,
                  level=None):
    if name in _log_registy:
        return _log_registy[name]

    if level is None:
        level = LOGSTASH

    with _log_lock:
        logger = logging.getLogger(name)
        logger.environment = sanitize_name(environment)
        logger.setLevel(level)

        logstash_formatter = LogstashFormatter()

        if filehandler_config:
            fh_handler = logging.handlers.RotatingFileHandler(
                **filehandler_config)
            fh_handler.setLevel(level)
            fh_handler.setFormatter(logstash_formatter)
            logger.addHandler(fh_handler)

        if stream_config:
            stream_handler = logging.StreamHandler(
                stream=stream_config.get('stream'))
            stream_handler.setLevel(stream_config.get('level', level))
            stream_formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            stream_handler.setFormatter(stream_formatter)
            logger.addHandler(stream_handler)

        _log_registy[name] = logger
        return logger
    REDIS=redis.StrictRedis(**flask_app.config['REDIS_SETTINGS']),
    MONGO=pymongo.MongoClient(**flask_app.config['MONGODB_SETTINGS']),
    CELERY_BROKER_URL='redis://%(host)s:%(port)i/%(db)i' %
    flask_app.config['REDIS_SETTINGS'])

celery = make_celery(flask_app)


def environment():
    return os.getenv("GOVUK_ENV", "development")


handler = logging.FileHandler("log/%s.json.log" % environment())
logging.worker_hijack_root_logger = False

formatter = LogstashFormatter()
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
flask_app.logger.addHandler(handler)

flask_app.logger.setLevel(logging.INFO)

if flask_app.debug:
    handler = logging.FileHandler("log/development.log")
    handler.setLevel(logging.DEBUG)
    flask_app.logger.addHandler(handler)
    flask_app.logger.setLevel(logging.DEBUG)


def logstasher_request(request):
    """Returns the REQUEST line for logstasher"""
Ejemplo n.º 8
0
def get_json_log_handler(path):
    handler = FileHandler(path)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['application']
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 9
0
import time
import json
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from elasticsearch_dsl import F, Q
from testing.clients import guid_lookup_table
from testing.testing import Inventory
from logstash_formatter import LogstashFormatter

# from base import Inventory
es_endpoint = '%s:9200' % Inventory().es
# es_endpoint = '54.152.5.133:9200'  # FIXME speedup hack
client = Elasticsearch(es_endpoint)

lsformatter = LogstashFormatter(defaults=dict())
es_index_name = 'logstash-%s' % datetime.utcnow().strftime('%Y.%m.%d')
es_doc_type = 'ethlog'


def pprint(x):
    print json.dumps(x.to_dict(), indent=2)


def ip_from_guid(guid):
    return guid_lookup_table[guid]['guid_short'] + ' @ ' + guid_lookup_table[
        guid]['ip'] + '/' + guid_lookup_table[guid]['impl']


def time_range_filter(field="@timestamp", offset=60):
    start_time = datetime.utcfromtimestamp(time.time() - offset).strftime(
Ejemplo n.º 10
0
    sys.exit(1)

es_endpoint = sys.argv[1]
extra = dict(x.split(',') for x in sys.argv[2:])

# ES
es = elasticsearch.Elasticsearch(es_endpoint)
# logstash-YYYY.MM.DD
es_index_name = 'logstash-%s' % datetime.datetime.utcnow().strftime('%Y.%m.%d')
es_doc_type = 'ethlog'


def es_log(doc):
    es.create(index=es_index_name, doc_type=es_doc_type, body=doc)

lsformatter = LogstashFormatter(defaults=extra)

while True:
    l = sys.stdin.readline().strip()
    if not l:
        continue

    # workaround for multi-line jsons from cpp
    if l == '{':
        ml = l
        while True:
            l = sys.stdin.readline()

            # workaround for wrong time format in cpp, add subseconds and Z
            if '\"ts\"' in l and not 'Z\"' in l:
                l = l[:-2] + '.000000Z\"'
Ejemplo n.º 11
0
def get_json_log_handler(path, app_name):
    handler = FileHandler(path)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['application', app_name]
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 12
0
import falcon
import os
import socket
import json
import logging
from logstash_formatter import LogstashFormatter

logger = logging.getLogger('cf-env')
logger.setLevel(logging.INFO)
ADDR = str(os.environ['SYSLOG_URL'])
PORT = int(os.environ['SYSLOG_PORT'])
handler = logging.handlers.SysLogHandler(address=(ADDR, PORT))
formatter = LogstashFormatter()

handler.setFormatter(formatter)
logger.addHandler(handler)


class EnvResources:
    def on_get(self, req, resp):
        """
        This get will show yo the env vars that will be logged to the stdout
        and stderr
        """
        resp.status = falcon.HTTP_200
        msg = """Hey  I'm running at: %s:%s""" \
            % (self.get_host(), self.get_port())
        resp.body = msg
        env_vars = self.get_vcap_env()
        logger.info("Test message",
                    extra={
Ejemplo n.º 13
0
import logging
#import logging.handlers
import logging.handlers as handlers
#import logstash

from logstash_formatter import LogstashFormatter
logger = logging.getLogger(__name__)

# Set logging level across the logger. Set to INFO in production
logger.setLevel(logging.DEBUG)

# create formatter
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter1 = LogstashFormatter()
# LogstashLog Handler
#logstash_handler = logstash.LogstashHandler("54.69.130.37",9299, version=1)
#logstash_handler.setFormatter(formatter1)
#Local File Handler
#create file handler which logs even debug messages
file_handler = logging.FileHandler(__name__ + '_file.log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)

# create console handler with debug level
# This should be changed to ERROR in production
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)

#adding Rotating file handler
Ejemplo n.º 14
0
def get_logger(logger_name=DEFAULT_LOGGER_NAME,
               log_level=DEFAULT_LOG_LEVEL,
               enable_machine_log_file=False,
               enable_human_log_file=False,
               enable_stream_log=True,
               ignore_log_failures=True,
               machine_log_file_path=None,
               human_log_file_path=None,
               human_log_format=DEFAULT_HUMAN_LOG_FORMAT,
               logstash_host=None,
               logstash_port=None,
               enable_logstash_log=False,
               **kwargs):
    # IMPORTANT NOTE:
    # depending on environment settings, log failure may be silently ignored.
    # typically, this is done when log failure should not fail the
    # parent process
    try:
        lgr = logging.getLogger(logger_name)
        # adding logger handlers only if no handlers exist.
        # the logger object acts as a singleton by nature,
        # but handlers may be unintentionally duplicated
        if not len(lgr.handlers):
            lgr.setLevel(int(log_level))

            # add stream handler if enabled
            if str(enable_stream_log).lower() == 'true':
                stream_handler = logging.StreamHandler()
                stream_handler.setFormatter(
                    logging.Formatter(human_log_format))
                lgr.addHandler(stream_handler)

            # add human readable log file handler
            if str(enable_human_log_file).lower() == 'true':
                file_handler = logging.FileHandler(human_log_file_path)
                file_handler.setFormatter(logging.Formatter(human_log_format))
                lgr.addHandler(file_handler)

            # add machine log handler
            if str(enable_machine_log_file).lower() == 'true':
                from logstash_formatter import LogstashFormatter
                file_handler = logging.FileHandler(machine_log_file_path)
                file_handler.setFormatter(LogstashFormatter())
                lgr.addHandler(file_handler)

            # add LogStash handler
            if str(enable_logstash_log).lower() == 'true':
                from logstash import LogstashHandler
                # from logstash_formatter import LogstashFormatter
                logstash_handler = LogstashHandler(logstash_host,
                                                   int(logstash_port),
                                                   version=1)
                # logstash_handler.setFormatter(LogstashFormatter())
                lgr.addHandler(logstash_handler)

        return lgr

    except:
        if str(ignore_log_failures).lower() == 'true':
            pass
        else:
            raise
Ejemplo n.º 15
0
from logstash_formatter import LogstashFormatter
from prometheus_client import REGISTRY, generate_latest, Summary

from spacy.symbols import ENT_TYPE, TAG, DEP
import spacy.about
import spacy.util

from .parse import Parse, Entities, Sentences, SentencesDependencies

logging.basicConfig(
    level=logging.DEBUG,
    format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s")
log = logging.getLogger()
if os.getenv('REWE_BDP_STAGE') is not None:
    loghandler = logging.StreamHandler()
    loghandler.setFormatter(LogstashFormatter())
    log.handlers = []
    log.addHandler(loghandler)

MODELS = os.getenv("languages", "").split()

# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('request_processing_seconds',
                       'Time spent processing request',
                       labelnames=['method', 'endpoint'])

_models = {}


def get_model(model_name):
    if model_name not in _models: