Exemplo n.º 1
0
    def add_http_logstash_handler(self, host='http://logapi.misa.com.vn', port=80, app_id='test-log', level=logging.ERROR, database_path=None,
                                  headers=None):
        from logstash_async.handler import AsynchronousLogstashHandler
        from logstash_async.formatter import LogstashFormatter

        class HttpTransport:

            def __init__(self, host, port, **kwargs):
                self._host = host
                self._port = port
                self._headers = kwargs['headers'] if 'headers' in kwargs and kwargs['headers'] else {'Authorization': 'Basic bWlzYTpNaXNhQDIwMTk=',
                                                                                                     'Content-Type': 'application/json; charset=utf-8'}

            def close(self):
                pass

            def send(self, data: dict, use_logging=None):
                for v in data:
                    requests.post(url=self._host, data=v, headers=self._headers)

        class CustomFormatter(LogstashFormatter):
            def _move_extra_record_fields_to_prefix(self, message):
                super()._move_extra_record_fields_to_prefix(message)
                message['app_id'] = app_id

        handler = AsynchronousLogstashHandler(host, port, transport=HttpTransport(host, port, headers=headers),
                                              database_path=database_path or f'/tmp/logstash_{app_id}.db')

        handler.formatter = CustomFormatter(tags=[app_id])
        handler.setLevel(level)
        self.handlers.append(handler)
        return self
Exemplo n.º 2
0
def get_logger():
    global hd_log
    if hd_log is None:
        BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        print('base_dir', BASE_DIR)
        log_file_path = os.path.join(BASE_DIR, 'Log/python-web.log')
        err_log_file_path = os.path.join(BASE_DIR, 'Log/python-web-err.log')
        logger.add(log_file_path, rotation="50 MB",
                   encoding='utf-8')  # Automatically rotate too big file
        logger.add(err_log_file_path,
                   rotation="50 MB",
                   encoding='utf-8',
                   level='ERROR')  # Automatically rotate too big file
        # logstash
        logstash_ip = get_config('logstash', 'ip')
        if logstash_ip:
            logstash_port = get_config('logstash', 'port')
            logstash_handler = AsynchronousLogstashHandler(logstash_ip,
                                                           int(logstash_port),
                                                           database_path=None)
            logstash_formatter = LogstashFormatter(
                message_type='python-logstash',
                extra_prefix='',
                extra=dict(app_name='python-web'))
            logstash_handler.setFormatter(logstash_formatter)
            logger.add(sink=logstash_handler)
        hd_log = logger
    return hd_log
Exemplo n.º 3
0
def a_async_handler(host='0.0.0.0', port=5000, formatter=None):
    async_handler = AsynchronousLogstashHandler(host, port, database_path=None)

    formatter = formatter if formatter else a_formatter()
    async_handler.setFormatter(formatter)

    return async_handler
Exemplo n.º 4
0
    def configure_logging(self):

        # Filters
        str_format_filter = dist_zero.logging.StrFormatFilter()
        context = {
            'env': settings.DIST_ZERO_ENV,
            'mode': runners.MODE_SIMULATED,
            'runner': True,
            'simulator_id': self.id,
            'start_at': self._start_datetime,
        }
        if settings.LOGZ_IO_TOKEN:
            context['token'] = settings.LOGZ_IO_TOKEN
        context_filter = dist_zero.logging.ContextFilter(context)

        # Formatters
        human_formatter = dist_zero.logging.HUMAN_FORMATTER
        json_formatter = dist_zero.logging.JsonFormatter(
            '(asctime) (levelname) (name) (message)')

        # Handlers
        stdout_handler = logging.StreamHandler(sys.stdout)
        human_file_handler = logging.FileHandler('./.tmp/simulator.log')
        json_file_handler = logging.FileHandler('./.tmp/simulator.json.log')
        logstash_handler = AsynchronousLogstashHandler(
            settings.LOGSTASH_HOST,
            settings.LOGSTASH_PORT,
            database_path='./.tmp/logstash.db',
        )

        stdout_handler.setLevel(logging.ERROR)
        human_file_handler.setLevel(logging.DEBUG)
        json_file_handler.setLevel(logging.DEBUG)
        logstash_handler.setLevel(logging.DEBUG)

        stdout_handler.setFormatter(human_formatter)
        human_file_handler.setFormatter(human_formatter)
        json_file_handler.setFormatter(json_formatter)
        logstash_handler.setFormatter(json_formatter)

        stdout_handler.addFilter(str_format_filter)
        human_file_handler.addFilter(str_format_filter)
        json_file_handler.addFilter(str_format_filter)
        json_file_handler.addFilter(context_filter)
        logstash_handler.addFilter(str_format_filter)
        logstash_handler.addFilter(context_filter)

        # Loggers
        dist_zero_logger = logging.getLogger('dist_zero')
        root_logger = logging.getLogger()

        dist_zero.logging.set_handlers(root_logger, [
            json_file_handler,
            human_file_handler,
            logstash_handler,
            stdout_handler,
        ])
 def _bind_handlers(self):
     """ Binds Logstash Handlers to the Logging object """
     load_dotenv()
     handler = AsynchronousLogstashHandler(
         host=os.environ["LOGSTASH_HOST"],
         port=int(os.environ["LOGSTASH_PORT"]),
         database_path=None,
     )
     handler.setFormatter(FlaskLogstashFormatter())
     self._logger.addHandler(handler)
Exemplo n.º 6
0
def get_handler(extra={}, formatter=None):
    extra['logstash_host'] = logstash_host
    extra['logstash_port'] = logstash_port
    if not formatter:
        formatter = get_formatter(extra)
    handler = AsynchronousLogstashHandler(logstash_host,
                                          logstash_port,
                                          database_path=database_path)
    handler.setFormatter(formatter)
    return handler
Exemplo n.º 7
0
    def add_logstash_handler(self, host, port, app_id, level=logging.ERROR, database_path=None):
        from logstash_async.handler import AsynchronousLogstashHandler
        from logstash_async.formatter import LogstashFormatter

        class CustomFormatter(LogstashFormatter):
            def _move_extra_record_fields_to_prefix(self, message):
                super()._move_extra_record_fields_to_prefix(message)
                message['app_id'] = app_id

        handler = AsynchronousLogstashHandler(host, port, database_path=database_path or f'/tmp/logstash_{app_id}.db')

        handler.formatter = CustomFormatter(tags=[app_id])
        handler.setLevel(level)
        self.handlers.append(handler)
        return self
Exemplo n.º 8
0
def logger_configure(
        log_level: int = logging.DEBUG) -> None:  # pragma: no cover
    formatter: Formatter = Formatter()

    logger.configure(
        **{
            'handlers': [
                {
                    'sink': sys.stdout,
                    'colorize': True,
                    'format': formatter.stdout_format
                },
            ],
            'extra': {
                'headers': '',
                'uuid': None,
            },
        })

    LoggerConfigure.add_levels()

    if setting.integrate.logstash.enable:
        logger.add(
            AsynchronousLogstashHandler(
                setting.integrate.logstash.host,
                setting.integrate.logstash.port,
                setting.integrate.logstash.database_path,
                **setting.integrate.logstash.options,
            ))
Exemplo n.º 9
0
def main(interface, logstash_config):
    global output
    if logstash_config is not None:
        global test_logger
        test_logger = logging.getLogger('python-logstash-logger')
        test_logger.setLevel(logging.INFO)
        test_logger.addHandler(
            AsynchronousLogstashHandler(logstash_config['host'],
                                        logstash_config['port'],
                                        database_path=None))

        test_logger.error(
            'python-logstash-async: test logstash error message.')
        logit()
    elif output is not None:
        writeit()
    else:
        printit()

    max_bytes = 1000000
    promiscuous = True
    read_timeout = 100  # in milliseconds
    pc = pcapy.open_live(interface, max_bytes, promiscuous, read_timeout)
    packet_limit = -1  # infinite
    pc.loop(packet_limit, recv_pkts)  # capture packets
Exemplo n.º 10
0
 def setup_logger(self):
     if os.environ.get("FRED_LOG_HOST") and os.environ.get("FRED_LOG_PORT"):
         self.logger = logging.getLogger("python-logstash-logger")
         self.logger.addHandler(
             AsynchronousLogstashHandler(
                 os.environ.get("FRED_LOG_HOST"),
                 int(os.environ.get("FRED_LOG_PORT")), ""))
         self.logger.addHandler(logging.StreamHandler())
     else:
         self.logger = logging.Logger("logger")
     self.logger.setLevel(logging.INFO)
Exemplo n.º 11
0
    def __init__(self):
        self.logger = logging.getLogger("logstash")
        self.logger.setLevel(logging.INFO)
        try:
            host = os.environ["LOGSTASH_HOST"]
        except:
            host = "localhost"
        try:
            port = int(os.environ["LOGSTASH_PORT"])
        except:
            port = 5044
        handler = AsynchronousLogstashHandler(host=host,
                                              port=port,
                                              ssl_enable=False,
                                              ssl_verify=False,
                                              database_path='')

        formatter = LogstashFormatter()
        handler.setFormatter(formatter)
        self.logger.addHandler(handler)
        self.info("Init logger")
Exemplo n.º 12
0
def initiate_log():
    # Create the logger and set it's logging level
    logger = logging.getLogger("logstash")
    logger.setLevel(logging.DEBUG)

    # Create the handler
    handler = AsynchronousLogstashHandler(
        host='ab5413e7-7e28-45a6-bdaa-d6c3e00cab46-ls.logit.io',
        port=27421,
        ssl_enable=True,
        ssl_verify=False,
        database_path='')

    # Here you can specify additional formatting on your log record/message
    formatter = LogstashFormatter()
    handler.setFormatter(formatter)

    # Assign handler to the logger
    logger.addHandler(handler)

    return logger
Exemplo n.º 13
0
def get_log():
    host = 'localhost'
    port = 5000
    test_logger = logging.getLogger('breath-logger')
    # Set it to whatever level you want - default will be info
    test_logger.setLevel(logging.DEBUG)
    # Create a handler for it
    async_handler = AsynchronousLogstashHandler(host, port, database_path=None)
    # Add the handler to the logger
    test_logger.addHandler(async_handler)

    return test_logger
Exemplo n.º 14
0
def configure_logging(service_name):
    """
    Configure logging based on the settings in the settings file.
    This sets up a handler for each logging mode that is enabled.
    See `microservice.core.settings.LoggingMode` for the supported logging types.

    :param str service_name: Name of the service being served by this instance.
    """
    logger = logging.getLogger()
    logger.setLevel(settings.logging_level)

    formatter_kwargs = {
        'fmt': json.dumps({'extra': {
            'local_service': service_name,
            # Basic way to distinguish logs between instances of the same microservice.
            'instance_id': random.randint(100000, 999999)
        }})
    }

    formatter = LogstashFormatterV1(**formatter_kwargs)

    if settings.LoggingMode.FILE in settings.logging_modes:
        file_handler = logging.FileHandler('{}.log'.format(service_name))
        file_handler.setFormatter(formatter)
        file_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(file_handler)

    if settings.LoggingMode.HUMAN in settings.logging_modes:
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(HumanReadableLogstashFormatter(**formatter_kwargs))
        stdout_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(stdout_handler)

    if settings.LoggingMode.STDOUT in settings.logging_modes:
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(formatter)
        stdout_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(stdout_handler)

    if settings.LoggingMode.LOGSTASH in settings.logging_modes:
        # TODO: test this
        raise Exception("Warning: untested")
        logstash_handler = AsynchronousLogstashHandler(
            **settings.logstash_settings)
        logstash_handler.setFormatter(formatter)
        logstash_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(logstash_handler)

    if settings.LoggingMode.FLUENTD in settings.logging_modes:
        # TODO: test this
        raise Exception("Warning: untested")
        fluentd_handler = handler.FluentHandler(
            'pycroservices.follow',
            **settings.fluentd_settings,
            buffer_overflow_handler=overflow_handler)
        fluentd_handler.setFormatter(formatter)
        fluentd_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(fluentd_handler)
Exemplo n.º 15
0
def get_logger(log_name="",
               log_path='.',
               single_log_file_size=1024 * 1024 * 600,
               log_to_file=True,
               backup_count=3,
               host=None,
               port=None,
               database_path=None):
    """:return a logger"""

    if not os.path.exists(log_path):
        try:
            os.makedirs(log_path)
        except Exception as e:
            print(str(e))

    logger = logging.getLogger("{}".format(log_name))
    logger.setLevel(logging.DEBUG)

    if log_name and log_to_file:
        # file
        log_file = "{}/{}.log".format(log_path, log_name)
        fh = logging.handlers.RotatingFileHandler(
            log_file, maxBytes=single_log_file_size, backupCount=backup_count)
        color_formatter = ColoredFormatter(
            fmt=
            '%(asctime)s %(funcName)s[line:%(lineno)d] [%(levelname)s]: %(message)s'
        )
        fh.setFormatter(color_formatter)
        fh.setLevel(logging.DEBUG)
        logger.addHandler(fh)

    # stdout
    sh = logging.StreamHandler()
    color_formatter = ColoredFormatter(
        fmt=
        '%(asctime)s %(funcName)s[line:%(lineno)d] [%(levelname)s]: %(message)s'
    )
    sh.setFormatter(color_formatter)
    sh.setLevel(logging.DEBUG)
    logger.addHandler(sh)

    if host:
        from logstash_async.handler import AsynchronousLogstashHandler
        alh = AsynchronousLogstashHandler(host,
                                          port,
                                          database_path=database_path)
        logger.addHandler(alh)

    return logger
Exemplo n.º 16
0
    def initLogger(appConfig: dict):
        # formatting for log stash
        logstashFormatter = LogstashFormatter(
            message_type='python-logstash',
            extra=dict(application='mis_weekly_report_gen_service'))

        # set app logger name and minimum logging level
        appLogger = logging.getLogger('python-logstash-logger')
        appLogger.setLevel(logging.INFO)

        # configure console logging
        streamHandler = logging.StreamHandler()
        # streamHandler.setFormatter(logstashFormatter)
        appLogger.addHandler(streamHandler)

        # configure logstash logging
        host = appConfig["logstashHost"]
        port = appConfig["logstashPort"]
        if not (pd.isna(host)) and not (pd.isna(port)):
            logstashHandler = AsynchronousLogstashHandler(
                host, port, database_path='logstash.db')
            logstashHandler.setFormatter(logstashFormatter)
            appLogger.addHandler(logstashHandler)
        AppLogger.__instance = appLogger
Exemplo n.º 17
0
def setup_logging(config):
    root_logger = logging.getLogger()
    if config.logging_level == 'DEBUG':
        root_logger.setLevel(logging.DEBUG)
    elif config.logging_level == 'INFO':
        root_logger.setLevel(logging.INFO)
    elif config.logging_level == 'WARNING':
        root_logger.setLevel(logging.WARNING)
    elif config.logging_level == 'ERROR':
        root_logger.setLevel(logging.ERROR)
    elif config.logging_level == 'CRITICAL':
        root_logger.setLevel(logging.CRITICAL)
    if config.external_logging:
        root_logger.addHandler(
            AsynchronousLogstashHandler(config.logging_host,
                                        config.logging_port,
                                        database_path=config.logging_local_db))
Exemplo n.º 18
0
if USE_LOGGER:

    import logging
    from logstash_async.handler import AsynchronousLogstashHandler

    # Setup elk stack
    # host_logger = 'localhost'
    host_logger = 'logstash'
    port_logger = 5000

    # Get you a test logger
    test_logger = logging.getLogger('python-logstash-logger')
    # Set it to whatever level you want - default will be info
    test_logger.setLevel(logging.DEBUG)
    # Create a handler for it
    async_handler = AsynchronousLogstashHandler(host_logger, port_logger, database_path=None)
    # Add the handler to the logger
    test_logger.addHandler(async_handler)



###### Define possible cache statuses#####
SUCCESS = 1
CUSTOM_CACHE_FAILED = 2
REDIS_CACHE_FAILED = 3
BOTH_CACHES_FAILED = 4
##########################################


class ResponseCaching():
    delta_expire = 15 # save request in cache for delta seconds
Exemplo n.º 19
0
import logging
import sys
from logstash_async.handler import AsynchronousLogstashHandler

host = 'logstash'
port = 5005

test_logger = logging.getLogger('logstash')
test_logger.setLevel(logging.INFO)
test_logger.addHandler(
    AsynchronousLogstashHandler(host, port, database_path='logstash.db'))

# If you don't want to write to a SQLite database, then you do
# not have to specify a database_path.
# NOTE: Without a database, messages are lost between process restarts.
# test_logger.addHandler(AsynchronousLogstashHandler(host, port))

test_logger.error('python-logstash-async: test logstash error message.')
test_logger.info('python-logstash-async: test logstash info message.')
test_logger.warning('python-logstash-async: test logstash warning message.')

# add extra field to logstash message
extra = {
    'test_string': 'python version: ' + repr(sys.version_info),
    'test_boolean': True,
    'test_dict': {
        'a': 1,
        'b': 'c'
    },
    'test_float': 1.23,
    'test_integer': 123
Exemplo n.º 20
0
    logger.info("*************")
    try:
        compute(" AND Lot:"+str(lot),str(lot))   
    except Exception as e:
        logger.error("Unable to compute Lot "+str(lot),exc_info=True)

    logger.info("<== "*10)

logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger()

lshandler=None

if os.environ["USE_LOGSTASH"]=="true":
    logger.info ("Adding logstash appender")
    lshandler=AsynchronousLogstashHandler("logstash", 5001, database_path='logstash_test.db')
    lshandler.setLevel(logging.ERROR)
    logger.addHandler(lshandler)

handler = TimedRotatingFileHandler("logs/"+MODULE+".log",
                                when="d",
                                interval=1,
                                backupCount=30)

logFormatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s')
handler.setFormatter( logFormatter )
logger.addHandler(handler)

logger.info("==============================")
logger.info("Starting: %s" % MODULE)
logger.info("Module:   %s" %(VERSION))
Exemplo n.º 21
0
def prepare_logstash(args):
    """Prepare logstash handler based on the configuration stored in the Tango
    database.

    :param args: process execution arguments
    :type args: list<str>

    .. note::
        The prepare_logstash function has been included in Sardana
        on a provisional basis. Backwards incompatible changes
        (up to and including its removal) may occur if
        deemed necessary by the core developers.
    """
    log_messages = []

    try:
        from logstash_async.handler import AsynchronousLogstashHandler
    except ImportError:
        msg = ("Unable to import logstash_async. Skipping logstash "
               + "configuration...", )
        log_messages.append(msg,)
        return log_messages

    def get_logstash_conf(dev_name):
        try:
            props = db.get_device_property(dev_name, "LogstashHost")
            host = props["LogstashHost"][0]
        except IndexError:
            host = None
        try:
            props = db.get_device_property(dev_name, "LogstashPort")
            port = int(props["LogstashPort"][0])
        except IndexError:
            port = None
        try:
            props = db.get_device_property(dev_name, "LogstashCacheDbPath")
            cache_db_path = props["LogstashCacheDbPath"][0]
        except IndexError:
            cache_db_path = None
        return host, port, cache_db_path

    db = Database()

    bin_name = args[0]
    try:
        instance_name = args[1]
    except IndexError:
        msg = ("Unknown %s instance name. " % bin_name
               + "Skipping logstash configuration...")
        log_messages.append(msg, )
        return log_messages
    server_name = bin_name + "/" + instance_name
    if bin_name in ["Pool", "MacroServer"]:
        class_name = bin_name
        dev_name = get_dev_from_class_server(db, class_name, server_name)[0]
        host, port, cache = get_logstash_conf(dev_name)
    else:
        dev_name = get_dev_from_class_server(db, "Pool", server_name)[0]
        host, port, cache = get_logstash_conf(dev_name)
        if host is None:
            dev_name = get_dev_from_class_server(db, "MacroServer",
                                                 server_name)[0]
            host, port, cache = get_logstash_conf(dev_name)

    if host is not None:
        root = Logger.getRootLog()
        handler = AsynchronousLogstashHandler(host, port, database_path=cache)
        # don't use full path for program_name
        handler._create_formatter_if_necessary()
        _, handler.formatter._program_name = os.path.split(handler.formatter._program_name)
        root.addHandler(handler)
        msg = ("Log is being sent to logstash listening on %s:%d",
               host, port)
        log_messages.append(msg)

    return log_messages
Exemplo n.º 22
0
)


# Kafka initialize
consumer_obj = KafkaConsumer(
    globals.RECEIVE_TOPIC,
    bootstrap_servers=[globals.KAFKA_HOSTNAME + ':' + globals.KAFKA_PORT],
    auto_offset_reset="earliest",
    enable_auto_commit=True,
    group_id="my-group",
    value_deserializer=lambda x: json.loads(x.decode("utf-8")),
    security_protocol="SASL_PLAINTEXT",
    sasl_mechanism='PLAIN',
    sasl_plain_username=globals.KAFKA_USERNAME,
    sasl_plain_password=globals.KAFKA_PASSWORD
)

# Get you a test logger
error_logger = logging.getLogger('python-logstash-logger')
# Set it to whatever level you want - default will be info
error_logger.setLevel(logging.DEBUG)
# Create a handler for it
async_handler = AsynchronousLogstashHandler(globals.LOGSTASH_HOSTNAME, int(globals.LOGSTASH_PORT), database_path=None)
# Add the handler to the logger
error_logger.addHandler(async_handler)


def err_logger(msg):
    msg = globals.RECEIVE_TOPIC + " " + msg
    error_logger.error(msg)
Exemplo n.º 23
0
def setup_bot(backend_name: str, logger, config, restore=None) -> ErrBot:
    # from here the environment is supposed to be set (daemon / non daemon,
    # config.py in the python path )

    bot_config_defaults(config)

    if hasattr(config, 'BOT_LOG_FORMATTER'):
        format_logs(formatter=config.BOT_LOG_FORMATTER)
    else:
        format_logs(theme_color=config.TEXT_COLOR_THEME)

        if hasattr(config, 'BOT_LOG_FILE') and config.BOT_LOG_FILE:
            hdlr = logging.FileHandler(config.BOT_LOG_FILE)
            hdlr.setFormatter(
                logging.Formatter(
                    "%(asctime)s %(levelname)-8s %(name)-25s %(message)s"))
            logger.addHandler(hdlr)

    if hasattr(config, 'BOT_LOG_LOGSTASH') and config.BOT_LOG_LOGSTASH:
        try:
            from logstash_async.handler import AsynchronousLogstashHandler
            from logstash_async.formatter import LogstashFormatter
        except ImportError:
            log.exception(
                "You have BOT_LOG_LOGSTASH enabled, but I couldn't import modules "
                "needed for Logstash integration. Did you install python-logstash-async? "
                "(See https://python-logstash-async.readthedocs.io/en/latest/installation.html for installation instructions)"
            )
            exit(-1)

        logger.addHandler(
            AsynchronousLogstashHandler(config.BOT_LOG_LOGSTASH_HOST,
                                        int(config.BOT_LOG_LOGSTASH_PORT),
                                        database_path=None))
        hdlr = logger.handlers[-1]
        hdlr.setFormatter(
            LogstashFormatter(
                extra=dict(application=config.BOT_LOG_LOGSTASH_APP,
                           environment=config.BOT_LOG_LOGSTASH_ENV)))

    if hasattr(config, 'BOT_LOG_SENTRY') and config.BOT_LOG_SENTRY:
        sentry_integrations = []

        try:
            import sentry_sdk
            from sentry_sdk.integrations.logging import LoggingIntegration

        except ImportError:
            log.exception(
                "You have BOT_LOG_SENTRY enabled, but I couldn't import modules "
                "needed for Sentry integration. Did you install sentry-sdk? "
                "(See https://docs.sentry.io/platforms/python for installation instructions)"
            )
            exit(-1)

        sentry_logging = LoggingIntegration(
            level=config.SENTRY_LOGLEVEL, event_level=config.SENTRY_EVENTLEVEL)

        sentry_integrations.append(sentry_logging)

        if hasattr(config,
                   'BOT_LOG_SENTRY_FLASK') and config.BOT_LOG_SENTRY_FLASK:
            try:
                from sentry_sdk.integrations.flask import FlaskIntegration
            except ImportError:
                log.exception(
                    "You have BOT_LOG_SENTRY enabled, but I couldn't import modules "
                    "needed for Sentry integration. Did you install sentry-sdk[flask]? "
                    "(See https://docs.sentry.io/platforms/python/flask for installation instructions)"
                )
                exit(-1)

            sentry_integrations.append(FlaskIntegration())

        try:
            if hasattr(config, 'SENTRY_TRANSPORT') and isinstance(
                    config.SENTRY_TRANSPORT, tuple):
                mod = importlib.import_module(config.SENTRY_TRANSPORT[1])
                transport = getattr(mod, config.SENTRY_TRANSPORT[0])

                sentry_sdk.init(dsn=config.SENTRY_DSN,
                                integrations=sentry_integrations,
                                transport=transport)
            else:
                sentry_sdk.init(dsn=config.SENTRY_DSN,
                                integrations=sentry_integrations)
        except ImportError:
            log.exception(
                f'Unable to import selected SENTRY_TRANSPORT - {config.SENTRY_TRANSPORT}'
            )
            exit(-1)

    logger.setLevel(config.BOT_LOG_LEVEL)

    storage_plugin = get_storage_plugin(config)

    # init the botplugin manager
    botplugins_dir = path.join(config.BOT_DATA_DIR, PLUGINS_SUBDIR)
    if not path.exists(botplugins_dir):
        makedirs(botplugins_dir, mode=0o755)

    plugin_indexes = getattr(config, 'BOT_PLUGIN_INDEXES',
                             (PLUGIN_DEFAULT_INDEX, ))
    if isinstance(plugin_indexes, str):
        plugin_indexes = (plugin_indexes, )

    # Extra backend is expected to be a list type, convert string to list.
    extra_backend = getattr(config, 'BOT_EXTRA_BACKEND_DIR', [])
    if isinstance(extra_backend, str):
        extra_backend = [extra_backend]

    backendpm = BackendPluginManager(config, 'errbot.backends', backend_name,
                                     ErrBot, CORE_BACKENDS, extra_backend)

    log.info(f'Found Backend plugin: {backendpm.plugin_info.name}')

    repo_manager = BotRepoManager(storage_plugin, botplugins_dir,
                                  plugin_indexes)

    try:
        bot = backendpm.load_plugin()
        botpm = BotPluginManager(
            storage_plugin,
            config.BOT_EXTRA_PLUGIN_DIR, config.AUTOINSTALL_DEPS,
            getattr(config, 'CORE_PLUGINS',
                    None), lambda name, clazz: clazz(bot, name),
            getattr(config, 'PLUGINS_CALLBACK_ORDER', (None, )))
        bot.attach_storage_plugin(storage_plugin)
        bot.attach_repo_manager(repo_manager)
        bot.attach_plugin_manager(botpm)
        bot.initialize_backend_storage()

        # restore the bot from the restore script
        if restore:
            # Prepare the context for the restore script
            if 'repos' in bot:
                log.fatal('You cannot restore onto a non empty bot.')
                sys.exit(-1)
            log.info(f'**** RESTORING the bot from {restore}')
            restore_bot_from_backup(restore, bot=bot, log=log)
            print('Restore complete. You can restart the bot normally')
            sys.exit(0)

        errors = bot.plugin_manager.update_plugin_places(
            repo_manager.get_all_repos_paths())
        if errors:
            log.error('Some plugins failed to load:\n' +
                      '\n'.join(errors.values()))
            bot._plugin_errors_during_startup = "\n".join(errors.values())
        return bot
    except Exception:
        log.exception("Unable to load or configure the backend.")
        exit(-1)
Exemplo n.º 24
0
LOGSTASH_DB_PATH = "python-elk-logstash.db"
LOGSTASH_TRANSPORT = "logstash_async.transport.BeatsTransport"
LOGSTASH_PORT = 5042

LOGSTASH_TRANSPORT = HttpTransport(
    LOGSTASH_HOST,
    LOGSTASH_PORT,
    ssl_verify=False
    timeout=5.0,
    username="******",
    password="******"
)

logstash_handler = AsynchronousLogstashHandler(
    LOGSTASH_HOST,
    LOGSTASH_PORT,
    transport=LOGSTASH_TRANSPORT,
    database_path=LOGSTASH_DB_PATH
)

#logstash_formatter = FlaskLogstashFormatter(
#    message_type='python-logstash',
#    extra_prefix='dev',
#    extra=dict(application='example-app', environment='production'))
logstash_formatter = FlaskLogstashFormatter(metadata={"beat": "myapp"})
logstash_handler.setFormatter(logstash_formatter)
test_logger.addHandler(logstash_handler)

# logstash_handler.formatter = FlaskLogstashFormatter(metadata={"beat": "myapp"})
# app.logger.addHandler(logstash_handler)

@app.route('/')
Exemplo n.º 25
0
def create_app(settings):
    app = Eve(settings=settings, json_encoder=UUIDEncoder, validator=CustomValidator, auth=JWTAuth)

    app.name = 'Materials service'

    # We are using a document in the counters collection to generate sequential ids to be
    # used for barcodes. Here we're "seeding" the collection with the inital document
    with app.app_context():
        current_app.data.driver.db \
            .get_collection('counters') \
            .update({'_id': 'barcode'}, {'$setOnInsert': {'seq': 0}}, upsert=True)

    # Create a swagger.json
    app.register_blueprint(swagger)

    # Configure swagger ui to display docs using swagger.json @ SWAGGER_URL
    app.register_blueprint(get_swaggerui_blueprint(SWAGGER_URL, API_URL), url_prefix=SWAGGER_URL)

    login_manager = LoginManager()
    login_manager.init_app(app)

    @login_manager.user_loader
    def load_user(email):
        print email
        return User(email)

    # Application hooks
    def set_uuid(resource_name, items):
        for item in items:
            item['_id'] = str(uuid.uuid4())

    app.on_insert += set_uuid

    # Containers hooks
    def set_barcode_if_not_present(containers):
        for container in containers:
            if 'barcode' not in container:
                result = app.data.driver.db.counters.find_one_and_update(
                    {'_id': 'barcode'},
                    {'$inc': {'seq': 1}},
                    return_document=ReturnDocument.AFTER)

                container['barcode'] = 'AKER-%s' % result['seq']

    def insert_empty_slots(containers):
        for container in containers:
            addresser = Addresser(container['num_of_rows'],
                                  container['num_of_cols'],
                                  bool(container.get('row_is_alpha')),
                                  bool(container.get('col_is_alpha')))
            slots = container.get('slots')
            if not slots:
                container['slots'] = [{'address': address} for address in addresser]
            else:
                definedaddresses = {slot['address'] for slot in container['slots']}
                for address in addresser:
                    if address not in definedaddresses:
                        slots.append({'address': address})

    app.on_insert_containers += set_barcode_if_not_present
    app.on_insert_containers += insert_empty_slots

    # Materials hooks
    def set_owner_id(materials):
        for material in materials:
            if not material.get("owner_id"):
                material["owner_id"] = current_user.id

    app.on_insert_materials += set_owner_id

    # Very rudimentary validation method... just for development!
    @app.route('/materials/validate', methods=['POST'])
    def validate(**lookup):
        if 'materials' not in request.json:
            abort(422)

        if (validate_existence(request.json['materials'])):
            return "ok"
        else:
            return "not ok - some materials not found"

    def validate_existence(materials):
        validation_set = set(materials)
        result_set = set()

        for material in app.data.driver.db.materials.find(
                {'_id': {'$in': materials}}, {'_id': 1}):
            result_set.add(material['_id'])

        difference = validation_set - result_set

        return not difference

    @app.route('/materials/verify_ownership', methods=['POST'])
    def verify_ownership(**lookup):
        materials = request.json.get('materials')
        owner_id = request.json.get('owner_id')

        if materials is None or not owner_id:
            abort(422)

        if len(materials) == 0:
            # If materials is an empty list, then the check is logically
            # successful
            return Response(status=200, mimetype="application/json")

        if not validate_existence(materials):
            abort(422, description="There was at least one material that did not exist")

        find_args = {
            '$and': [
                {'_id': {'$in': materials}},
                {'owner_id': {'$ne': owner_id}}
            ]
        }

        materials_cursor = app.data.driver.db.materials.find(find_args)

        if materials_cursor.count() > 0:
            response_body = json.dumps({
                "_status": "ERR",
                "_error": "{0} material(s) do not belong to {1}".format(materials_cursor.count(),
                                                                        owner_id),
                "_issues": [material['_id'] for material in materials_cursor]
            })

            return Response(status=403, response=response_body, mimetype="application/json")

        return Response(status=200, mimetype="application/json")

    def cerberus_to_json_list(schema, quality):
        return [key for key, value in schema.iteritems() if value.get(quality)]

    def cerberus_to_json_change_type_for_datetime(schema):
        for value in schema.itervalues():
            if value['type'] == 'datetime':
                value['type'] = 'string'
                value['format'] = 'date'

    def cerberus_to_json_filter_parameters(schema, filter_list):
        for key in filter_list:
            schema.pop(key, None)

    def cerberus_to_json_change_allowed_with_one_of(schema):
        for value in schema.itervalues():
            if 'allowed' in value:
                value['enum'] = value['allowed']
                del value['enum']

    def cerberus_to_json_only_id_is_required(schema):
        for key, value in schema.iteritems():
            if key == '_id':
                value['required'] = True
            elif value.get('required'):
                value['required'] = False

    def amend_required_order(required):
        if 'supplier_name' in required and required[0] != 'supplier_name':
            required.remove('supplier_name')
            required.insert(0, 'supplier_name')

    def form_field_order(field_name):
        """This function describes the order for the fields shown on the submission
        form, with any unspecified fields being displayed after the sorted ones."""
        return FORM_FIELD_ORDER.get(field_name, len(FORM_FIELD_ORDER))

    def cerberus_to_json_schema(schema_obj, patch=False):
        filter_list = ['meta', 'parent', 'ancestors']
        if not patch:
            filter_list.append('_id')
        schema = copy.deepcopy(schema_obj)
        cerberus_to_json_change_type_for_datetime(schema)
        cerberus_to_json_filter_parameters(schema, filter_list)
        if patch:
            cerberus_to_json_only_id_is_required(schema)
        cerberus_to_json_change_allowed_with_one_of(schema)
        required = cerberus_to_json_list(schema, 'required')
        searchable = cerberus_to_json_list(schema, 'searchable')
        amend_required_order(required)
        show_on_form = cerberus_to_json_list(schema, 'show_on_form')
        show_on_form.sort(key=form_field_order)

        return {'type': 'object',
                'properties': schema,
                'required': required,
                'searchable': searchable,
                'show_on_form': show_on_form}

    @app.route('/containers/json_schema', methods=['GET'])
    def containers_json_schema(**lookup):
        return json_schema_request('containers')

    @app.route('/materials/json_schema', methods=['GET'])
    def materials_json_schema(**lookup):
        return json_schema_request('materials')

    # Deprecated in favour of json_schema
    @app.route('/materials/schema', methods=['GET'])
    def bulk_schema(**lookup):
        return json_schema_request('materials')

    @app.route('/materials/json_patch_schema', methods=['GET'])
    def materials_json_patch_schema(**lookup):
        return json_schema_request('materials', True)

    def json_schema_request(model_name, patch=False):
        schema_obj = cerberus_to_json_schema(current_app.config['DOMAIN'][model_name]['schema'],
                                             patch)
        schema_str = json.dumps(schema_obj, default=json_util.default)
        return Response(response=schema_str, status=200, mimetype="application/json")

    def process_where(where, in_date_value=False):
        if not where:
            return where
        if isinstance(where, dict):
            for k, v in where.iteritems():
                where[k] = process_where(v, in_date_value or k == 'date_of_receipt')
        elif isinstance(where, (list, tuple)):
            return [process_where(x, in_date_value) for x in where]
        elif in_date_value and isinstance(where, basestring):
            try:
                return str_to_date(where)
            except ValueError:
                return where
        return where

    def _bulk_find(resource, args):

        find_args = {
          'filter': process_where(args.get('where')),
          'projection': args.get('projection'),
        }
        try:
            limit = max(int(args['max_results']), 0)
        except (ValueError, KeyError):
            limit = 0
        if limit:
            find_args['limit'] = limit

        try:
            page = max(int(args['page']), 1)
        except (ValueError, KeyError):
            page = 1
        if limit and page > 1:
            find_args['skip'] = limit*(page-1)

        try:
            find_args['sort'] = [(args['sort_by'], args['sort_order'])]
        except:
            find_args['sort'] = None

        cursor = app.data.driver.db[resource].find(**find_args)
        total = cursor.count()
        pages = ((total + limit-1) // limit) if limit else 1
        items = list(cursor)

        meta = {'max_results': limit, 'total': total, 'page': page}

        links = {}
        if page > 1:
            links['prev'] = {'page': (page-1)}
        if page < pages:
            links['next'] = {'page': (page+1)}
            links['last'] = {'page': pages}

        for item in items:
            for k, v in item.iteritems():
                if isinstance(v, datetime):
                    # date_to_str converts a datetime value to the format defined in the
                    #   configuration file
                    item[k] = date_to_str(v)
                if isinstance(v, unicode):
                    item[k] = str(v)

        msg = {'_items': items, '_meta': meta, '_links': links}

        msg_json = json.dumps(msg, default=json_util.default)

        return Response(response=msg_json,
                        status=200,
                        mimetype="application/json")

    @app.route('/materials/search', methods=['POST'])
    def bulk_find_materials(**lookup):
        return _bulk_find('materials', request.json)

    @app.route('/containers/search', methods=['POST'])
    def bulk_find_containers(**lookup):
        return _bulk_find('containers', request.json)

    # Logging
    app.logger.setLevel(app.config.get('LOGGING_LEVEL', logging.WARNING))

    # enable logging to 'app.log' file
    log_handlers = [
        logging.FileHandler('app.log'),
        logging.StreamHandler(sys.stdout),
    ]

    # set a custom log format, and add request
    # metadata to each log line
    for handler in log_handlers:
        handler.setFormatter(logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s '
                '[in %(filename)s:%(lineno)d] -- ip: %(clientip)s, '
                'url: %(url)s, method: %(method)s'))

    for handler in log_handlers:
        app.logger.addHandler(handler)

    def log_request_start(resource, request, lookup=None):
        message = "%s resource=%r, request=%r" % (request.method, resource, request)
        app.logger.info(message)
        app.logger.info("Request data:\n"+request.data)

    def log_request_end(resource, request, response):
        message = "%s resource=%r, request=%r, response=%r" % (request.method,
                                                               resource,
                                                               request,
                                                               response)
        app.logger.info(message)
        if response:
            app.logger.debug("Response data:\n"+response.data)

    def logstash_logger(resource, request, response):
        # Something similar to lograge for Rails apps
        # [200] GET /materials?available=true (materials)
        message = "[%d] %s %s (%r)" % (response.status_code, request.method, request.full_path, resource)
        app.logger.info(message)

    if app.config.get('LOGSTASH_ENABLE') is True:
        logstash_handler = AsynchronousLogstashHandler(
            app.config.get('LOGSTASH_HOST'),
            app.config.get('LOGSTASH_PORT'),
            database_path='logstash.db', transport='logstash_async.transport.UdpTransport'
        )
        app.logger.addHandler(logstash_handler)

    for method in 'GET POST PATCH PUT DELETE'.split():
        if app.config.get('LOGSTASH_ENABLE'):
            events = getattr(app, 'on_post_'+method)
            events += logstash_logger
        else:
            events = getattr(app, 'on_pre_'+method)
            events += log_request_start
            events = getattr(app, 'on_post_'+method)
            events += log_request_end

    return app
Exemplo n.º 26
0
from flask import Flask, jsonify
from pymongo import MongoClient
from flask_login import LoginManager
import logging
from logstash_async.handler import AsynchronousLogstashHandler
import time

app = Flask(__name__)
app.secret_key = 'secret secret'
client = MongoClient('mongodb')
db = client.test_db
users = db.users
deleted_users = db.deleted_users
manager = LoginManager(app)
devices = db.devices

host = 'logstash'
port = 5000
logger = logging.getLogger('simple-app')
logger.setLevel(logging.DEBUG)
async_handler = AsynchronousLogstashHandler(host, port, database_path=None)
logger.addHandler(async_handler)
startup_time = time.time()

from . import routes
Exemplo n.º 27
0
    def __init__(self, level=logging.DEBUG):

        self.xpr_config = XprConfigParser(
            config_file_path=XprConfigParser.DEFAULT_CONFIG_PATH_XPR_LOG)
        if self.xpr_config[self.LOGGING_SECTION][self.FIND_CONFIG_RECURSIVE]:
            self.xpr_config = self.load_config("xpr")

        self.name = self.xpr_config[self.PROJECT_NAME]
        super(XprLogger, self).__init__(self.name)

        self.setLevel(level)

        logger_formatter = XprCustomFormatter(
            self.xpr_config[self.LOGGING_SECTION][self.FORMATTER])
        logstash_formatter = XprLogstashCustomFormatter(
            self.xpr_config[self.LOGGING_SECTION][self.FORMATTER])

        log_folder = os.path.expanduser(
            self.xpr_config[self.LOGGING_SECTION][self.LOGS_FOLDER_PATH])
        if not os.path.exists(log_folder):
            try:
                os.makedirs(log_folder, 0o755)
            except IOError as err:
                print(
                    "Permission Denied to create logs folder at the specidied directory. \n{}".format(
                        str(err)))

        # Adding file handler for levels below warning
        try:
            if self.xpr_config[self.LOGGING_SECTION][self.LOGGING_FILE_BOOL]:
                try:
                    wfh = logging.FileHandler(os.path.join(
                        log_folder,
                        '.'.join((self.xpr_config[self.PROJECT_NAME], "log"))), 'w')
                except IOError as err:
                    print("Permission denied to create log files. "
                          "Saving log files in base directory . \n{}".format(
                        str(err)))
                    wfh = logging.FileHandler(
                        os.path.join(os.path.expanduser("~"),
                                     '.'.join((self.xpr_config[
                                                   self.PROJECT_NAME], "log"))),
                        'w')
                wfh.setFormatter(logger_formatter)
                wfh.setLevel(logging.DEBUG)
                self.addHandler(wfh)
        except Exception as err:
            print("Unable to add file handler to logger. \n{}".format(str(err)))
            raise err

        # Adding file handler for levels more critical than warning
        try:
            if self.xpr_config[self.LOGGING_SECTION][self.LOGGING_FILE_BOOL]:
                try:
                    efh = logging.FileHandler(os.path.join(
                        log_folder,
                        '.'.join((self.xpr_config[self.PROJECT_NAME], "err"))), 'w')
                except IOError as err:
                    print("Permission denied to create log files. "
                          "Saving log files in base directory . \n{}".format(
                        str(err)))
                    efh = logging.FileHandler(
                        os.path.join(os.path.expanduser("~"),
                                     '.'.join((self.xpr_config[
                                                   self.PROJECT_NAME], "err"))),
                        'w')
                efh.setFormatter(logger_formatter)
                efh.setLevel(logging.ERROR)
                self.addHandler(efh)
        except Exception as err:
            print(
                "Unable to add file handler to logger . \n{}".format(str(err)))
            raise err

        # Adding logstash logging handler
        try:
            if self.xpr_config[self.LOGGING_SECTION][
                self.LOGGING_LOGSTASH_BOOL]:
                cache_filename = ""
                if self.xpr_config[self.LOGGING_SECTION][
                    self.LOGSTASH_CACHE_BOOL]:
                    cache_filename = os.path.join(
                        log_folder, "cache.persistence")

                lh = AsynchronousLogstashHandler(
                    host=self.xpr_config[self.LOGGING_SECTION][
                        self.LOGSTASH_HOST],
                    port=self.xpr_config[self.LOGGING_SECTION][
                        self.LOGSTASH_PORT],
                    database_path=cache_filename)
                lh.setFormatter(logstash_formatter)
                self.addHandler(lh)
        except Exception as err:
            print("Unable to add logstash handler to logger. \n{}".format(
                str(err)))
            raise err
Exemplo n.º 28
0
# file services object
file_check_obj = FileCheck()
file_extract_obj = FileExtract()
file_convert_obj = FileConvert()

# logger
logging.basicConfig(
    format=
    '%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
    datefmt='%Y-%m-%d:%H:%M:%S',
    level=logging.DEBUG)

# Get you a test logger
logger = logging.getLogger('python-logstash-logger')
# Set it to whatever level you want - default will be info
logger.setLevel(logging.DEBUG)
# Create a handler for it
async_handler = AsynchronousLogstashHandler(globals.LOGGER_SERVER_HOST,
                                            int(globals.LOGGER_SERVER_PORT),
                                            database_path=None)
# Add the handler to the logger
logger.addHandler(async_handler)


def send_log_msg(msg, error=False):
    msg = "INDEXING_MAIN " + msg
    if error:
        logger.error(msg)
    else:
        logger.info(msg)
Exemplo n.º 29
0
def prepare_logstash(args):
    """Prepare logstash handler based on the configuration stored in the Tango
    database.

    :param args: process execution arguments
    :type args: list<str>

    .. note::
        The prepare_logstash function has been included in Sardana
        on a provisional basis. Backwards incompatible changes
        (up to and including its removal) may occur if
        deemed necessary by the core developers.
    """
    log_messages = []

    try:
        from logstash_async.handler import AsynchronousLogstashHandler
    except ImportError:
        msg = ("Unable to import logstash_async. Skipping logstash " +
               "configuration...", )
        log_messages.append(msg, )
        return log_messages

    def get_logstash_conf(dev_name):
        try:
            props = db.get_device_property(dev_name, "LogstashHost")
            host = props["LogstashHost"][0]
        except IndexError:
            host = None
        try:
            props = db.get_device_property(dev_name, "LogstashPort")
            port = int(props["LogstashPort"][0])
        except IndexError:
            port = None
        try:
            props = db.get_device_property(dev_name, "LogstashCacheDbPath")
            cache_db_path = props["LogstashCacheDbPath"][0]
        except IndexError:
            cache_db_path = None
        return host, port, cache_db_path

    db = Database()

    bin_name = args[0]
    try:
        instance_name = args[1]
    except IndexError:
        msg = ("Unknown %s instance name. " % bin_name +
               "Skipping logstash configuration...")
        log_messages.append(msg, )
        return log_messages
    server_name = bin_name + "/" + instance_name
    if bin_name in ["Pool", "MacroServer"]:
        class_name = bin_name
        dev_name = get_dev_from_class_server(db, class_name, server_name)[0]
        host, port, cache = get_logstash_conf(dev_name)
    else:
        dev_name = get_dev_from_class_server(db, "Pool", server_name)[0]
        host, port, cache = get_logstash_conf(dev_name)
        if host is None:
            dev_name = get_dev_from_class_server(db, "MacroServer",
                                                 server_name)[0]
            host, port, cache = get_logstash_conf(dev_name)

    if host is not None:
        root = Logger.getRootLog()
        handler = AsynchronousLogstashHandler(host, port, database_path=cache)
        # don't use full path for program_name
        handler._create_formatter_if_necessary()
        _, handler.formatter._program_name = os.path.split(
            handler.formatter._program_name)
        root.addHandler(handler)
        msg = ("Log is being sent to logstash listening on %s:%d", host, port)
        log_messages.append(msg)

    return log_messages
Exemplo n.º 30
0
# formatting for log stash
logstash_formatter = LogstashFormatter(message_type='python-logstash',
                                       extra_prefix='dev',
                                       extra=dict(application='example-app',
                                                  environment='production'))

test_logger = logging.getLogger('python-logstash-logger')
test_logger.setLevel(logging.INFO)

streamHandler = logging.StreamHandler()
# streamHandler.setFormatter(logstash_formatter)
test_logger.addHandler(streamHandler)

logstashHandler = AsynchronousLogstashHandler(host,
                                              port,
                                              database_path='logstash.db')
logstashHandler.setFormatter(logstash_formatter)
test_logger.addHandler(logstashHandler)

# If you don't want to write to a SQLite database, then you do
# not have to specify a database_path.
# NOTE: Without a database, messages are lost between process restarts.
# test_logger.addHandler(AsynchronousLogstashHandler(host, port))

test_logger.error('python-logstash-async: test logstash error message.')
test_logger.info('python-logstash-async: test logstash info message.')
test_logger.warning('python-logstash-async: test logstash warning message.')

# add extra field to logstash message
extra = dict(test_string="python_version-" + repr(sys.version_info),
Exemplo n.º 31
0
import logging

from flask import Flask
app = Flask(__name__)


from logstash_async.handler import AsynchronousLogstashHandler
from logstash_async.formatter import LogstashFormatter

# Create the logger and set it's logging level
logger = logging.getLogger("logstash")
logger.setLevel(logging.ERROR)        

# Create the handler
handler = AsynchronousLogstashHandler(
    host='172.18.0.3', 
    port=5042, 
    ssl_enable=True, 
    ssl_verify=False,
    database_path='python-elk-logstash.db')
# Here you can specify additional formatting on your log record/message
formatter = LogstashFormatter()
handler.setFormatter(formatter)

# Assign handler to the logger
logger.addHandler(handler)

@app.route('/')
def hello_world():  
    logger.info("Hello there")
    return 'Hello, World!'
Exemplo n.º 32
0
# DEFAULT_DOWNLOADS_FOLDER = Path("media/downloads/")

# IMPORT ENV VARIABLES
# config_path = APP_ROOT / "config_environment.py"
# config_path_template = APP_ROOT / "TEMPLATE_config_environment.py"
config_path = os.path.join(APP_ROOT, "config_environment.py")
config_path_template = os.path.join(APP_ROOT, "TEMPLATE_config_environment.py")

# Sending loggs to logit.io
# Create the logger and set it's logging level
logger_logit = logging.getLogger("logstash")
logger_logit.setLevel(logging.ERROR)
# Create the handler
handler = AsynchronousLogstashHandler(
    host='fc652908-5b50-4887-8af2-89286e6febe1-ls.logit.io',
    port=17326,
    ssl_enable=True,
    ssl_verify=False,
    database_path='')
# Here you can specify additional formatting on your log record/message
formatter = LogstashFormatter(message_type='python-logstash',
                              extra_prefix='extra',
                              extra=dict(mikrostoritev='imageUpload',
                                         okolje='production'))
handler.setFormatter(formatter)
# Assign handler to the logger
logger_logit.addHandler(handler)

try:
    # if config_path.exists():
    if os.path.exists(config_path):
        exec(open(config_path).read())
Exemplo n.º 33
0
# -------------------------------------------
# Logging setup
# -------------------------------------------
# Create the logger and set it's logging level
logger = logging.getLogger("logstash")
logger.setLevel(logging.INFO)        

log_endpoint_uri = str(environ["LOGS_URI"]).strip()
log_endpoint_port = int(environ["LOGS_PORT"].strip())


# Create the handler
handler = AsynchronousLogstashHandler(
    host=log_endpoint_uri,
    port=log_endpoint_port, 
    ssl_enable=True, 
    ssl_verify=False,
    database_path='')

# Here you can specify additional formatting on your log record/message
formatter = LogstashFormatter()
handler.setFormatter(formatter)

# Assign handler to the logger
logger.addHandler(handler)

# -------------------------------------------
# Models
# -------------------------------------------
class User(db.Model):
    __tablename__ = 'users'