def _logging_stream(log_format='%(levelname)s %(message)s',
                     formatter='multiline_formatter.formatter.MultilineMessagesFormatter'):
     dictConfig({
         'version': 1,
         'disable_existing_loggers': False,
         'formatters': {
             'default': {
                 '()': formatter,
                 'format': log_format,
             },
         },
         'handlers': {
             'streamhandler': {
                 'level': 'DEBUG',
                 'class': 'logging.StreamHandler',
                 'formatter': 'default',
                 'stream': stream,
             },
         },
         'loggers': {
             '': {
                 'handlers': ['streamhandler'],
                 'level': 'DEBUG',
             }
         }
     })
     return stream
Example #2
0
 def __init__(self):
     try:
         with open('logging.json') as jl:
             dictConfig(json.load(jl))
         self.client = redis.Redis('db')
     except Exception, e:
         logging.exception('Problem instantiating batch/job repository (%s)' % e)
Example #3
0
def setup_loghandlers(level=None):
    if not logging._handlers:
        dictConfig({
            "version": 1,
            "disable_existing_loggers": False,

            "formatters": {
                "console": {
                    "format": "%(asctime)s %(message)s",
                    "datefmt": "%H:%M:%S",
                },
            },

            "handlers": {
                "console": {
                    "level": "DEBUG",
                    #"class": "logging.StreamHandler",
                    "class": "rq.utils.ColorizingStreamHandler",
                    "formatter": "console",
                    "exclude": ["%(asctime)s"],
                },
            },

            "root": {
                "handlers": ["console"],
                "level": level or "INFO",
            }
        })
Example #4
0
def configure_from_file(filename, default = None):
    '''
    :param filename: file (JSON or YAML) to read configuration from (may be
        ``None``)
    :param default: configuration to use in case when :obj:`filename` doesn't
        exist

    Function configures logging according to dict config read from
    :obj:`filename`. If :obj:`filename` is missing and :obj:`default` was
    specified, logging is configured according to that one. If no acceptable
    :obj:`filename` nor :obj:`default` was provided, :exc:`RuntimeError` is
    raised.

    :obj:`default` should be dict config, but as a shorthand, it may be
    ``"stderr"`` or ``"null"``. Logging will be configured then with
    :func:`log_config_stderr()` or :func:`log_config_null()`,
    respectively.
    '''
    if filename is not None and os.path.isfile(filename):
        # JSON is a valid YAML, so we'll stick to this parser, we'll just make
        # sure nothing as fancy as custom classes gets loaded
        config = yaml.safe_load(open(filename))
    elif default == "stderr":
        config = log_config_stderr()
    elif default == "null":
        config = log_config_null()
    elif isinstance(default, dict):
        config = default
    else:
        raise RuntimeError('no usable logging configuration specified')
    dictConfig(config)
    def setup_logging(cls, formatter='elk'):
        logging_conf = {
            'version': 1,
            'disable_existing_loggers': True,
            'formatters': {
                'simple': {
                    'format': '%(asctime)s [%(levelname)s] %(message)s',
                    'datefmt': '%Y-%m-%dT%H:%M:%S',
                },
                'elk': {
                    '()': 'mtp_common.logging.ELKFormatter'
                }
            },
            'handlers': {
                'console': {
                    'level': 'DEBUG',
                    'class': 'tests.test_logging.Handler',
                    'formatter': formatter,
                },
            },
            'root': {
                'level': 'WARNING',
                'handlers': ['console'],
            },
            'loggers': {
                'mtp': {
                    'level': 'INFO',
                    'handlers': ['console'],
                    'propagate': False,
                },
            },
        }

        dictConfig(logging_conf)
        messages.clear()
Example #6
0
def newLogger(name, configuration=None, level=10):
    """
    Create and return a new logger connected to the main logging queue.

    :param name: Name (preferably of the calling module) that will show in the log records
    :param configuration: a dict configuration for the logger, or None to use the basic config
    :param level: log message level; default is 10 (DEBUG)
    :return: logger object
    """

    if __listener is None:
        setupLogListener()

    if configuration is not None:
        # logging.config.dictConfig(base_config(name))
        config.dictConfig(configuration)
    # else:

    q_handler = handlers.QueueHandler(__logging_queue)
    logger = logging.getLogger(name)

    try:
        # logger.setLevel(level.upper())
        logger.setLevel(level)
    except ValueError:
        pass # let it default to warning

    # check for handlers, or we could get one logger spitting out
    # dozens of duplicate messages everytime it's called
    if not logger.hasHandlers():
        logger.addHandler(q_handler)

    return logger
Example #7
0
def commandline():
    parser = argparse.ArgumentParser(description=u'Build, orchestrate, run, and '
                                                 u'ship Docker containers with '
                                                 u'Ansible playbooks')
    parser.add_argument('--debug', action='store_true', dest='debug',
                        help=u'Enable debug output', default=False)
    parser.add_argument('--engine', action='store', dest='engine_name',
                        help=u'Select your container engine and orchestrator',
                        default='docker')
    parser.add_argument('--project', '-p', action='store', dest='base_path',
                        help=u'Specify a path to your project. Defaults to '
                             u'current working directory.', default=os.getcwd())
    subparsers = parser.add_subparsers(title='subcommand', dest='subcommand')
    for subcommand in AVAILABLE_COMMANDS:
        logger.debug('Registering subcommand %s', subcommand)
        subparser = subparsers.add_parser(subcommand, help=AVAILABLE_COMMANDS[subcommand])
        globals()['subcmd_%s_parser' % subcommand](parser, subparser)

    args = parser.parse_args()

    if args.subcommand == 'help':
        parser.print_help()
        sys.exit(0)

    if args.debug:
        LOGGING['loggers']['container']['level'] = 'DEBUG'
    config.dictConfig(LOGGING)

    try:
        getattr(engine, u'cmdrun_{}'.format(args.subcommand))(**vars(args))
    except exceptions.AnsibleContainerAlreadyInitializedException, e:
        logger.error('Ansible Container is already initialized')
        sys.exit(1)
Example #8
0
def main():
    # setup logging
    dictConfig(config.LOG_CONFIG)

    tcp_server = LogRecordSocketReceiver()
    print('About to start TCP server...')
    tcp_server.serve_until_stopped()
Example #9
0
    def from_configuration(cls, configuration):
        """
        Load configuration from a data structure loaded from the configuration
        file and only minimally processed.

        :param dict configuration: Agent configuration as returned by
            ``get_configuration``.

        :return: A new instance of ``cls`` with values loaded from the
            configuration.
        """
        if 'logging' in configuration:
            from logging.config import dictConfig
            dictConfig(configuration['logging'])

        host = configuration['control-service']['hostname']
        port = configuration['control-service']['port']

        node_credential = configuration['node-credential']
        ca_certificate = configuration['ca-certificate']

        api_args = configuration['dataset']
        backend_name = api_args.pop('backend')

        return cls(
            control_service_host=host,
            control_service_port=port,

            node_credential=node_credential,
            ca_certificate=ca_certificate,

            backend_name=backend_name.decode("ascii"),
            api_args=api_args,
        )
Example #10
0
 def __init__(self):
     LOGGING_CONF = dict(
         version=1,
         formatters=dict(
             bare={
                 "datefmt": "%Y-%m-%d %H:%M:%S",
                 "format": "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
             },
         ),
         handlers=dict(
             console={
                 "class": "logging.StreamHandler",
                 "formatter": "bare",
                 "level": "DEBUG",
                 "stream": "ext://sys.stdout",
             }
         ),
         loggers=dict(
             bugyou={
                 "level": "DEBUG",
                 "propagate": False,
                 "handlers": ["console"],
             }
         ),
     )
     dictConfig(LOGGING_CONF)
Example #11
0
def main(argv):
    conf = {
        "debug":                    None,
        "logging":                  None,
        }
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf,
                                     strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print ("""Usage: python -m Abe.admin [-h] [--config=FILE] COMMAND...

Options:

  --help                    Show this help message and exit.
  --config FILE             Abe configuration file.

Commands:

  delete-chain-blocks NAME  Delete all blocks in the specified chain
                            from the database.

  delete-chain-transactions NAME  Delete all blocks and transactions in
                            the specified chain.

  delete-tx TX_ID           Delete the specified transaction.
  delete-tx TX_HASH

  link-txin                 Link transaction inputs to previous outputs.

  rewind-datadir DIRNAME    Reset the pointer to force a rescan of
                            blockfiles in DIRNAME.""")
        return 0

    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config
        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)

    while len(argv) != 0:
        command = argv.pop(0)
        if command == 'delete-chain-blocks':
            delete_chain_blocks(store, argv.pop(0))
        elif command == 'delete-chain-transactions':
            delete_chain_transactions(store, argv.pop(0))
        elif command == 'delete-tx':
            delete_tx(store, argv.pop(0))
        elif command == 'rewind-datadir':
            rewind_datadir(store, argv.pop(0))
        elif command == 'link-txin':
            link_txin(store)
        else:
            raise ValueError("Unknown command: " + command)

    return 0
Example #12
0
def setup_logging(level=None):
    conf = copy.deepcopy(default_log_config)

    if level is not None:
        conf['root']['level'] = level

    dictConfig(conf)
Example #13
0
def set_logging_config(config, debug, verbosity, uncaught_logger, uncaught_handler):
	# configure logging globally
	import logging.config as logconfig
	logconfig.dictConfig(config)

	# make sure we log any warnings
	log.captureWarnings(True)

	import warnings

	categories = (DeprecationWarning, PendingDeprecationWarning)
	if verbosity > 2:
		warnings.simplefilter("always")
	elif debug or verbosity > 0:
		for category in categories:
			warnings.simplefilter("always", category=category)

	# make sure we also log any uncaught exceptions
	if uncaught_logger is None:
		logger = log.getLogger(__name__)
	else:
		logger = log.getLogger(uncaught_logger)

	if uncaught_handler is None:
		def exception_logger(exc_type, exc_value, exc_tb):
			logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))

		uncaught_handler = exception_logger
	sys.excepthook = uncaught_handler

	return logger
Example #14
0
    def configure_logging(self):
        logging = getattr(self, "LOGGING", None)
        if not logging:
            return

        self.remove_unused_handlers(logging, {})
        dictConfig(logging)
Example #15
0
    def make_logging_handlers_and_tools(self, multiproc=False):
        """Creates logging handlers and redirects stdout."""

        log_stdout = self.log_stdout
        if sys.stdout is self._stdout_to_logger:
            # If we already redirected stdout we don't neet to redo it again
            log_stdout = False

        if self.log_config:
            if multiproc:
                proc_log_config = self._mp_config
            else:
                proc_log_config = self._sp_config

            if proc_log_config:
                if isinstance(proc_log_config, dict):
                    new_dict = self._handle_dict_config(proc_log_config)
                    dictConfig(new_dict)
                else:
                    parser = self._handle_config_parsing(proc_log_config)
                    memory_file = self._parser_to_string_io(parser)
                    fileConfig(memory_file, disable_existing_loggers=False)

        if log_stdout:
            #  Create a logging mock for stdout
            std_name, std_level = self.log_stdout

            stdout = StdoutToLogger(std_name, log_level=std_level)
            stdout.start()
            self._tools.append(stdout)
Example #16
0
def _set_debug_dict(__loglevel__):
    """ set the debug dict """

    _lconfig.dictConfig({
    'version': 1,
    'disable_existing_loggers': False,

    'formatters': {
        'standard': {
            'format': "%(asctime)s \t"\
                     +"pid=%(process)d \t"\
                     +"[%(filename)s]\t"\
                     +"%(levelname)s \t"\
                     +"%(message)s"
        },
    },
    'handlers': {
        __name__: {
            'level':__loglevel__,
            'class':'logging.FileHandler',
            'filename':__debugfile__,
            'formatter':"standard",
            'mode':'a+'
        }
    },
    'loggers':{
        __name__: {
            'handlers': [__name__],
            'level': __loglevel__,
            'propogate': True
        }
    }
    })
def enable_log(log_level=logging.INFO):
    config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'simple': {
                'format': '%(asctime)s [%(levelname)s] %(message)s',
            },
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'formatter': 'simple',
            },
        },
        'loggers': {
            'requests.packages.urllib3': {
                'level': logging.WARNING
            },
            'paramiko': {
                'level': logging.WARNING
            },
        },
        'root': {
            'handlers': ['console'],
            'level': log_level,
        },
    }
    dictConfig(config)
Example #18
0
def init_defaults(argv=None, debug=False):
    argv = argv or sys.argv[1:]
    config = Config.from_defaults()
    config = config.dict_merge(config, Config.from_files(config.config_files.main,
                                                         config.config_root))
    main_parser = Argparser(argv=argv, description='Aminator: bringing AMIs to life', add_help=False,
                            argument_default=argparse.SUPPRESS)
    config.logging = LoggingConfig.from_defaults()
    config.logging = config.dict_merge(config.logging, LoggingConfig.from_files(config.config_files.logging,
                                                                                config.config_root))
    config.environments = EnvironmentConfig.from_defaults()
    config.environments = config.dict_merge(config.environments,
                                            EnvironmentConfig.from_files(config.config_files.environments,
                                                                         config.config_root))

    if config.logging.base.enabled:
        dictConfig(config.logging.base.config.toDict())
    if debug:
        logging.getLogger().setLevel(logging.DEBUG)
        for handler in logging.getLogger().handlers: handler.setLevel(logging.DEBUG)

    add_base_arguments(parser=main_parser, config=config)
    plugin_parser = Argparser(argv=argv, add_help=True, argument_default=argparse.SUPPRESS,
                              parents=[main_parser])
    log.info('Aminator {0} default configuration loaded'.format(aminator.__version__))
    return config, plugin_parser
Example #19
0
def main():
    fedmsg_config = fedmsg.config.load_config()
    dictConfig(fedmsg_config.get('logging', {'version': 1}))

    log.info("Listening to the bus via fedmsg.tail_messages()")
    for _, _, topic, msg in fedmsg.tail_messages():

        # XXX - if you want to debug whether or not this is receiving fedmsg
        # messages, you can put a print statement here, before the 'continue'
        # statement.

        if not topic.endswith(target):
            continue

        log.info("A meeting just ended!  Sleeping 2s.  %r" % msg.get('msg_id'))
        time.sleep(2)

        teams_cmd = "/usr/local/bin/meetings_by_team.sh"
        log.info("Running %r" % teams_cmd)
        proc = sp.Popen(teams_cmd.split(), stdout=sp.PIPE, stderr=sp.PIPE)
        stdout, stderr = proc.communicate()
        if proc.returncode:
            # Calling log.error in fedora infrastructure with fedmsg logging
            # configured, should send an email to the sysadmin-datanommer
            # group.
            log.error("Error %r running %r.\n  STDOUT:  %s\n  STDERR:  %s" % (
                proc.returncode, teams_cmd, stdout, stderr))

        log.info("Running soke.run()...")
        soke.run()

        log.info("Done.")
Example #20
0
def main():
    opts, args = parse_args()

    config = fedmsg.config.load_config()
    config.update({
        'name': 'relay_inbound',
        'active': True,
    })

    dictConfig(config.get('logging', {'version': 1}))
    log.info("Starting summershum ingestion")

    fedmsg.init(**config)

    session = summershum.model.create_session(
        config['summershum.sqlalchemy.url'],
        create=True,
    )

    datagrepper_url = config['summershum.datagrepper']
    messages = __get_messages(datagrepper_url, opts.msg_id)
    for message in messages:
        msg = message['msg']
        summershum.core.ingest(
            session=session,
            msg=msg,
            config=config,
            msg_id=message.get('msg_id', None),
            force=opts.force,
        )
Example #21
0
    def start(self):
        # Only two main green threads are required for APGW bgp-agent.
        # One for NetworkController, another for BGPS core.

        # If configuration file was provided and loaded successfully. We start
        # BGPS core using these settings. If no configuration file is provided
        # or if configuration file is missing minimum required settings BGPS
        # core is not started.
        if self.config_file:
            LOG.debug('Loading config. from settings file.')
            settings = self.load_config(self.config_file)
            # Configure log settings, if available.
            if getattr(settings, 'LOGGING', None):
                dictConfig(settings.LOGGING)

            if getattr(settings, 'BGP', None):
                self._start_core(settings)

            if getattr(settings, 'SSH', None) is not None:
                hub.spawn(ssh.SSH_CLI_CONTROLLER.start, None, **settings.SSH)
        # Start Network Controller to server RPC peers.
        t = hub.spawn(net_ctrl.NET_CONTROLLER.start, *[],
                      **{net_ctrl.NC_RPC_BIND_IP: self.bind_ip,
                      net_ctrl.NC_RPC_BIND_PORT: self.bind_port})
        LOG.debug('Started Network Controller')

        super(RyuBGPSpeaker, self).start()

        return t
Example #22
0
def configure(settings):
    dictConfig(settings)
    for module, message in IGNORE_DJANGO_110_WARNINGS.items():
        warnings.filterwarnings(
            action='ignore',
            category=RemovedInDjango110Warning, module=module, message=message
        )
Example #23
0
 def start(self):
     try:
         logging.raiseExceptions = self.raise_exceptions
         dictConfig(self.config)
         self.bus.log("Loggers configured")
     except Exception:
         self.bus.log(traceback=True)
Example #24
0
def main(argv):
    conf = {"debug": None, "logging": None}
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf, strict=False)
    if argv and argv[0] in ("-h", "--help"):
        print(
            """Usage: python -m Abe.reconfigure [-h] [--config=FILE] [--CONFIGVAR=VALUE]...

Apply configuration changes to an existing Abe database, if possible.

  --help                    Show this help message and exit.
  --config FILE             Read options from FILE.
  --use-firstbits {true|false}
                            Turn Firstbits support on or off.
  --keep-scriptsig false    Remove input validation scripts from the database.

All configuration variables may be given as command arguments."""
        )
        return 0

    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config

        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)
    firstbits.reconfigure(store, args)
    keep_scriptsig_reconfigure(store, args)
    return 0
Example #25
0
def configure_logging(config):
    structlog.configure(processors=processors,
                        logger_factory=structlog.stdlib.LoggerFactory(),
                        wrapper_class=structlog.stdlib.BoundLogger,
                        cache_logger_on_first_use=True)
    config.setdefault('logging', {})
    config['logging'].setdefault('version', 1)
    config['logging'].setdefault('handlers', {})
    config['logging'].setdefault('formatters', {})
    config['logging'].setdefault('loggers', {})
    config['logging']['handlers'].setdefault('raw', {
        'level': 'DEBUG',
        'class': 'logging.StreamHandler',
        'formatter': 'raw',
    })
    config['logging']['loggers'].setdefault('root', {
        'handlers': ['raw'],
        'level': 'DEBUG',
        'propagate': False,
    })
    config['logging']['loggers'].setdefault('graphite_api', {
        'handlers': ['raw'],
        'level': 'DEBUG',
    })
    config['logging']['formatters']['raw'] = {'()': StructlogFormatter}
    dictConfig(config['logging'])
    if 'path' in config:
        logger.info("loading configuration", path=config['path'])
    else:
        logger.info("loading default configuration")
def setup_logging():
    try:
        with open('logger/logging_config.json', 'rt') as file:
            config = json.load(file)
            dictConfig(config)
    except IOError as e:
        raise(Exception('Failed to load logging configuration', e))
Example #27
0
def parse_logger(conf):
    # TODO 根据配置和命令行参数控制日志
    conf_dict = {'version': 1,
                 'disable_existing_loggers': False,
                 'formatters': {'verbose': {'format': LOG_FORMATTER,
                                            'datefmt': DATE_FMT},
                                'simple': {'format': SIMPLE_FORMATTER,
                                           'datefmt': DATE_FMT}},
                 'handlers': {
                     'console': {'level': 'DEBUG',
                                 'class': 'logging.StreamHandler',
                                 'stream': 'ext://sys.stdout',
                                 'formatter': 'verbose'},
                     'file': {'level': 'DEBUG',
                              'class': 'logging.handlers.RotatingFileHandler',
                              'filename': '/tmp/robber.log',
                              'maxBytes': 10485760,
                              'backupCount': 9,
                              'formatter': 'verbose'}},
                     'loggers': {'robber': {'handlers': ['console'],
                                            'level': 'DEBUG'},
                                 'robber.engine': {'handlers': [],
                                                   'level': 'DEBUG'}}}

    dictConfig(conf_dict)
    return
Example #28
0
 def __init__(self, system, logDefs = None):
     self.system = system
     self._pendingSends = []
     if logDefs is not None:
         dictConfig(logDefs or defaultLoggingConfig)
     self._primaryActors = []
     self._primaryCount  = 0
     self._globalNames = {}
     self.procLimit = 0
     self._wakeUps = {}  # key = datetime for wakeup, value = list
                         # of (targetAddress, pending
                         # WakeupMessage) to restart at
                         # that time
     self._sources = {}  # key = sourcehash, value = encrypted zipfile data
     self._sourceAuthority = None  # ActorAddress of Source Authority
     asys = self._newRefAndActor(system, system.systemAddress,
                                 system.systemAddress,
                                 External)
     extreq = self._newRefAndActor(system, system.systemAddress,
                                   ActorAddress('System:ExternalRequester'),
                                   External)
     badActor = self._newRefAndActor(system, system.systemAddress,
                                     ActorAddress('System:BadActor'), BadActor)
     self.actorRegistry = {  # key=ActorAddress string, value=ActorRef
         system.systemAddress.actorAddressString: asys,
         'System:ExternalRequester': extreq,
         'System:BadActor': badActor,
     }
Example #29
0
    def __init__(self, system, logDefs = None):
        super(ActorSystemBase, self).__init__()
        self.system = system
        self._pendingSends = []
        if logDefs is not False: dictConfig(logDefs or defaultLoggingConfig)
        self._primaryActors = []
        self._primaryCount  = 0
        self._globalNames = {}
        self.procLimit = 0
        self._sources = {}  # key = sourcehash, value = encrypted zipfile data
        self._sourceAuthority = None  # ActorAddress of Source Authority
        self._sourceNotifications = [] # list of actor addresses to notify of loads
        asys = self._newRefAndActor(system, system.systemAddress,
                                    system.systemAddress,
                                    External)
        extreq = self._newRefAndActor(system, system.systemAddress,
                                      ActorAddress('System:ExternalRequester'),
                                      External)
        badActor = self._newRefAndActor(system, system.systemAddress,
                                        ActorAddress('System:BadActor'), BadActor)
        self.actorRegistry = {  # key=ActorAddress string, value=ActorRef
            system.systemAddress.actorAddressString: asys,
            'System:ExternalRequester': extreq,
            'System:BadActor': badActor,
        }
        self._internalAddresses = list(self.actorRegistry.keys())

        system.capabilities['Python Version'] = tuple(sys.version_info)
        system.capabilities['Thespian Generation'] = ThespianGeneration
        system.capabilities['Thespian Version'] = str(int(time.time()*1000))
        system.capabilities['Thespian ActorSystem Name'] = 'simpleSystem'
        system.capabilities['Thespian ActorSystem Version'] = 2
        system.capabilities['Thespian Watch Supported'] = False
        system.capabilities['AllowRemoteActorSources'] = 'No'
        def test_dict_config(self):
            """Build the logger from a dictionary"""
            dictConfig({
                'version': 1,

                'formatters': {
                    'colored': {
                        '()': 'colorlog.ColoredFormatter',
                        'format': self.LOGFORMAT,
                    }
                },

                'handlers': {
                    'stream': {
                        'class':        'logging.StreamHandler',
                        'formatter':    'colored',
                    },
                },

                'loggers': {
                    'dictConfig': {
                        'handlers':    ['stream'],
                        'level': 'DEBUG',
                    },
                },
            })

            self.example_log_messages(getLogger('dictConfig'))
Example #31
0
"""
Module defining global configuration for the collab-compet package
"""

from logging.config import dictConfig
import yaml
import torch
import logging

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

with open("logging.yaml") as log_conf_file:
    log_conf = yaml.load(log_conf_file)
    dictConfig(log_conf)

log = logging.getLogger("config")

with open("config.yaml") as conf_file:
    config = yaml.load(conf_file)
    log.info(f"Running with config: {config}")


def set_config(new_config):
    global config
    config = new_config
Example #32
0
            'address': '/dev/log',
            'facility': "local6",
            'level': logging.DEBUG,
            'formatter': 'verbose',
        },
    },
    'loggers': {
        'user-commands': {
            'handlers': ['stdout', 'sys-logger6'],
            'level': logging.INFO,
            'propagate': True,
        },
    }
}

config.dictConfig(LOGGING)
logger = logging.getLogger("user-commands")

if __name__ == "__main__":
    try:
        if os.geteuid() != 0:
            logger.critical("User Must be ROOT!")
            exit(1)
        if args.e and args.c and args.u and not args.d:
            enable_command(args.e, args.u, args.c)
        elif args.e and args.c and not args.u:
            logger.critical("the option -u shall be used")
        elif args.e and not args.c and args.u:
            logger.critical("the option -c shall be used")
        elif not args.e and not args.c and args.u:
            logger.critical("Missing arguments")
Example #33
0
 def __init__(self):
     dictConfig(LOGGING_CONFIG)
     self._logger = logging.getLogger('nightsight')
Example #34
0
# -*- coding: utf-8 -*-

import json
import logging
import math
from collections import defaultdict
from logging.config import dictConfig

import requests

import config
from models import Quiz

dictConfig(config.LOGGING_CONFIG)
logger = logging.getLogger("app")

headers = {"Authorization": "Bearer " + config.API_KEY}
json_headers = {
    "Authorization": "Bearer " + config.API_KEY,
    "Content-type": "application/json",
}


def extend_quiz(course_id, quiz, percent, user_id_list):
    """
    Extends a quiz time by a percentage for a list of users.

    :param quiz: A quiz object from Canvas
    :type quiz: dict
    :param percent: The percent of original quiz time to be applied.
        e.g. 200 is double time, 100 is normal time, <100 is invalid.
Example #35
0
            "formatter": "json"
        }
    },
    "loggers": {
        "root": {
            "level": "INFO",
            "handlers": ["json"]
        },
        "werkzeug": {
            "level": "WARN",  # Disable werkzeug hardcoded logger
            "handlers": ["json"]
        }
    }
}

config.dictConfig(LOG_CONFIG)  # load log config from dict
logger = logging.getLogger("root")  # get root logger instance
FlaskGoogleCloudLogger(app)


@app.teardown_request  # log request and response info after extension's callbacks
def log_request_time(_exception):
    logger.info(
        f"{request.method} {request.path} - Sent {g.response.status_code}" +
        " in {g.request_time:.5f}ms")


if __name__ == '__main__':

    keep_scaler_alive = KeepScalerAlive(name="KeepScalerAlive")
    keep_scaler_alive.start()
Example #36
0
from logging.config import dictConfig

dictConfig({
    'version': 1,
    'formatters': {'default': {
        'format': '%(levelname)s/%(module)s %(message)s',
    }},
    'handlers': {'wsgi': {
        'class': 'logging.StreamHandler',
        'stream': 'ext://flask.logging.wsgi_errors_stream',
        'formatter': 'default'
    }},
    'root': {
        'level': 'INFO',
        'handlers': ['wsgi']
    }
})

import time,os,sched,random,threading,traceback,datetime
import re,base64
import zlib

import requests as r

import Identicon
Identicon._crop_coner_round = lambda a,b:a # don't cut corners, please
import mimetypes as mt

from commons import *

from flask_cors import CORS
Example #37
0
def _setup():
    dictConfig(settings.BROKENLINKS_LOGGING)
Example #38
0
dictConfig({
    "version": 1,
    "disable_existing_loggers": False,
    "formatters": {
        "standard": {
            "format": "%(asctime)s [%(levelname)-8s] %(name)-12s: %(message)s"
        },
    },
    "handlers": {
        "default": {
            "level": "NOTSET",
            "formatter": "standard",
            "class": "logging.StreamHandler",
        }
    },
    "loggers": {
        "": {
            "handlers": ["default"],
            "level": "WARNING",
            "propagate": True
        },
        "cloudflare_ddns": {
            "handlers": ["default"],
            "level": log_level,
            "propagate": False
        },
        "__main__": {
            "handlers": ["default"],
            "level": log_level,
            "propagate": False
        }
    }
})
Example #39
0
            'formatter': 'detail',
            'encoding': 'utf-8',
        },
    },
    'loggers': {
        'download_logger': {
            'handlers': ['console', 'file'],
            'level': 'DEBUG',
        },
        'parser_logger': {
            'handlers': ['file'],
            'level': 'INFO',
        },
        'other_logger': {
            'handlers': ['console', 'file'],
            'level': 'INFO',
        },
        'db_logger': {
            'handlers': ['file'],
            'level': 'INFO',
        }
    }
}

log_conf.dictConfig(log_config)

db_logger = logging.getLogger("db_logger")
parse_logger = logging.getLogger("parser_logger")
download_logger = logging.getLogger("download_logger")
other_logger = logging.getLogger("other_logger")
Example #40
0
    },
)

if output_error_file != "":
    logging_config['handlers']['file_error'] = {
        'class': 'logging.handlers.RotatingFileHandler',
        'level': 'ERROR',
        'formatter': 'f',
        'filename': output_error_file,
        'mode': 'a',
        'maxBytes': 10485760,
        'backupCount': 5,
    }
    logging_config['root']['handlers'].append('file_error')

dictConfig(logging_config)
logger = logging.getLogger('SAMbot')
slack_client = SlackClient(token)
logger.info("Slack client created")
logger.info("Connecting to misp server")
misp = misp_custom(data['misp']['url'], data['misp']['key'],
                   data['misp']['ssl'])
logger.info("Connected to misp server successfully")
helperFunc = helper.TonyTheHelper(slack_client)
# starterbot's user ID in Slack: value is assigned after the bot starts up
starterbot_id = None

# constants
RTM_READ_DELAY = 1  # 1 second delay between reading from RTM
EXAMPLE_COMMAND = "Tell a joke"
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
Example #41
0
def setup_logging(file):
    with open(file, 'r') as f:
        config = yaml.safe_load(f.read())
        dictConfig(config)
Example #42
0
def reset_logging_config():
    # root_logger_level = getLogger().level
    root_logger_level = 'DEBUG'
    dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
    yield
    getLogger().setLevel(root_logger_level)
Example #43
0
import faust

from simple_settings import settings
from logging.config import dictConfig


app = faust.App(
    version=1,  # fmt: off
    autodiscover=True,
    origin="{{cookiecutter.project_slug}}",
    id="1",
    broker=settings.{{cookiecutter.kafka_server_environment_variable}},
    logging_config=dictConfig(settings.LOGGING),
)


def main() -> None:
    app.main()
Example #44
0
def configure_logger(args) -> logging.Logger:
    """Setup the global logging configurations and instanciate a specific logger for the current script

    Parameters
    ----------
    args : dict
        The arguments given to the script

    Returns
    --------
    the logger: logger.Logger
    """
    # create logger and formatter
    logger = logging.getLogger()

    # Verbose level => logging level
    log_level = args.verbosity
    if args.verbosity >= len(LEVEL):
        log_level = len(LEVEL) - 1
        # logging.warning("verbosity level is too high, I'm gonna assume you're taking the highest (%d)" % log_level)

    # Define the default logger configuration
    logging_config = dict(
        version=1,
        disable_existing_logger=True,
        formatters={
            "f": {
                "format":
                "[%(asctime)s] [%(levelname)s] — [%(name)s — %(funcName)s:%(lineno)d] %(message)s",
                "datefmt": "%d/%b/%Y: %H:%M:%S ",
            }
        },
        handlers={
            "h": {
                "class": "logging.StreamHandler",
                "formatter": "f",
                "level": LEVEL[log_level],
            }
        },
        root={
            "handlers": ["h"],
            "level": LEVEL[log_level]
        },
    )

    # Add file handler if file logging required
    if args.log_file is not None:
        logging_config["handlers"]["f"] = {
            "class": "logging.FileHandler",
            "formatter": "f",
            "level": LEVEL[log_level],
            "filename": args.log_file,
        }
        logging_config["root"]["handlers"] = ["h", "f"]

    # Setup logging configuration
    dictConfig(logging_config)

    # Retrieve and return the logger dedicated to the script
    logger = logging.getLogger(__name__)
    return logger
Example #45
0
            'propagate': False,
        },
        'raven': {
            'level': 'DEBUG',
            'handlers': ['console'],
            'propagate': False,
        },
        'sentry.errors': {
            'level': 'DEBUG',
            'handlers': ['console'],
            'propagate': False,
        },
    },
}

dictConfig(LOGGING)

RAVEN_CONFIG = {
    'dsn':
    'https://*****:*****@sentry.io/98255',
    # If you are using git, you can also automatically configure the
    # release based on the git info.
    'release':
    raven.fetch_git_sha(os.path.join(os.path.dirname(__file__), '..')),
}

handler = SentryHandler(
    'https://*****:*****@sentry.io/53555'
)
raven.setup_logging(handler)
 def setUp(self):
     dictConfig(LOGGING_CONFIG)
Example #47
0
from config import config, logfile

from logging.config import dictConfig

dictConfig({
    'version': 1,
    'formatters': {
        'default': {
            'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
        }
    },
    'handlers': {
        'wsgi': {
            'class': 'logging.StreamHandler',
            'stream': 'ext://flask.logging.wsgi_errors_stream',
            'formatter': 'default'
        },
        'file': {
            'class': 'logging.FileHandler',
            'filename': logfile,
            'formatter': 'default'
        }
    },
    'root': {
        'level': 'INFO',
        'handlers': ['wsgi']
    }
})

bootstrap = Bootstrap()
# mail = Mail()
# moment = Moment()
Example #48
0
def create_app() -> Flask:
    """
    Sets up a Flask application
    """

    # Load the .env file into environment variables
    load_dotenv(ENV_FILE_PATH)
    # Configure logging
    dictConfig({
        "version": 1,
        "disable_existing_loggers": False,
        "formatters": {
            "standard": {
                "format":
                "[%(asctime)s] {%(module)s:%(lineno)d} %(levelname)s - %(message)s"
            },
        },
        "handlers": {
            "stdout": {
                "class": "logging.StreamHandler",
                "formatter": "standard"
            },
            "file": {
                "class": "logging.handlers.RotatingFileHandler",
                "level": "DEBUG",
                "formatter": "standard",
                "filename": "logs/api.log",
                "mode": "a",
                "maxBytes": 1048576,
                "backupCount": 10,
            },
        },
        "loggers": {
            "": {
                "handlers": ["stdout", "file"],
                "level": get_env_var("DIVVYDOSE_LOG_LEVEL"),
            }
        },
    })
    # Instantiate a Flask application
    flask_app = Flask(get_env_var("DIVVYDOSE_API_NAME"), static_folder=None)
    # Set the Flask app's configuration from environment variables
    flask_app.config["API_BEARER_TOKEN"] = get_env_var(
        "DIVVYDOSE_API_BEARER_TOKEN")
    flask_app.config["GITHUB_API_TOKEN"] = get_env_var(
        "DIVYYDOSE_GITHUB_API_TOKEN")
    flask_app.config["BITBUCKET_USERNAME"] = get_env_var(
        "DIVYYDOSE_BITBUCKET_USERNAME")
    flask_app.config["BITBUCKET_PASSWORD"] = get_env_var(
        "DIVYYDOSE_BITBUCKET_PASSWORD")
    # Register HTTP error handlers
    flask_app.register_error_handler(401, http_unauthorized_response)
    flask_app.register_error_handler(403, http_forbidden_response)
    flask_app.register_error_handler(404, http_page_not_found_response)
    flask_app.register_error_handler(418, http_tea_pot_response)
    flask_app.register_error_handler(500, http_internal_server_error_response)
    flask_app.register_error_handler(Exception,
                                     http_internal_server_error_response)
    # Instantiate a Flask-RESTful instance
    rest_api = Api(flask_app)
    # Register a Flask-RESTful resource
    rest_api.add_resource(GitProfileResource,
                          "/api/git/profile/<profile>/",
                          endpoint="git-profile")

    return flask_app
Example #49
0
def create_app():
    configuration = randovania.get_configuration()

    dictConfig({
        'version': 1,
        'formatters': {
            'default': {
                'format':
                '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
            }
        },
        'handlers': {
            'wsgi': {
                'class': 'logging.StreamHandler',
                'stream': 'ext://flask.logging.wsgi_errors_stream',
                'formatter': 'default'
            }
        },
        'root': {
            'level': 'INFO',
            'handlers': ['wsgi']
        }
    })

    app = flask.Flask(__name__)
    app.config['SECRET_KEY'] = configuration["server_config"]["secret_key"]
    app.config["GUEST_KEY"] = configuration["guest_secret"].encode(
        "ascii") if "guest_secret" in configuration else None
    app.config["DISCORD_CLIENT_ID"] = configuration["discord_client_id"]
    app.config["DISCORD_CLIENT_SECRET"] = configuration["server_config"][
        "discord_client_secret"]
    app.config[
        "DISCORD_REDIRECT_URI"] = "http://127.0.0.1:5000/callback/"  # Redirect URI.
    app.config["FERNET_KEY"] = configuration["server_config"][
        "fernet_key"].encode("ascii")
    version_checking = ClientVersionCheck(
        configuration["server_config"]["client_version_checking"])

    database.db.init(configuration["server_config"]['database_path'])
    database.db.connect(reuse_if_open=True)
    database.db.create_tables(database.all_classes)

    sio = ServerApp(app)
    app.sio = sio
    game_session.setup_app(sio)
    user_session.setup_app(sio)

    connected_clients = sio.metrics.info(
        "connected_clients", "How many clients are connected right now.")
    connected_clients.set(0)

    @app.route("/")
    def index():
        return "ok"

    server_version = randovania.VERSION

    @sio.sio.server.on("connect")
    def connect(sid, environ):
        if "HTTP_X_RANDOVANIA_VERSION" not in environ:
            raise ConnectionRefusedError("unknown client version")

        client_app_version = environ["HTTP_X_RANDOVANIA_VERSION"]
        check_client_version(version_checking, client_app_version,
                             server_version)
        connected_clients.inc()

        forwarded_for = environ.get('HTTP_X_FORWARDED_FOR')
        app.logger.info(
            f"Client at {environ['REMOTE_ADDR']} ({forwarded_for}) with "
            f"version {client_app_version} connected.")

    @sio.sio.server.on("disconnect")
    def disconnect(sid):
        connected_clients.dec()
        sio_environ = sio.get_server().environ

        forwarded_for = sio_environ[sid].get('HTTP_X_FORWARDED_FOR')
        app.logger.info(
            f"Client at {sio_environ[sid]['REMOTE_ADDR']} ({forwarded_for}) disconnected."
        )

        session = sio.get_server().get_session(sid)
        if "user-id" in session:
            game_session.report_user_disconnected(sio, session["user-id"],
                                                  app.logger)

    return app
def init_logging():
    dictConfig(the_logging_config)
    global init_logging
    init_logging = lambda: None
Example #51
0
def _setup_logging():
    try:
        dictConfig(LOGGING_CONFIG)
    except IOError as e:
        raise (Exception('Failed to load logging configuration', e))
else:
    DATA_DIR_RE += '$'

# -----------------------------------------------------------------------------

# Load logger configuration (from cwd)...
# But only if the logging configuration is present!
LOGGING_CONFIG_FILE = 'logging.yml'
if os.path.isfile(LOGGING_CONFIG_FILE):
    LOGGING_CONFIG = None
    with open(LOGGING_CONFIG_FILE, 'r') as stream:
        try:
            LOGGING_CONFIG = yaml.load(stream)
        except yaml.YAMLError as exc:
            print(exc)
    dictConfig(LOGGING_CONFIG)
# Our logger...
LOGGER = logging.getLogger(os.path.basename(sys.argv[0])[:-3])

# -----------------------------------------------------------------------------

LOGGER.info('SOURCE_DATA_ROOT="%s"', SOURCE_DATA_ROOT)
LOGGER.info('TARGET_IMAGE="%s"', TARGET_IMAGE)
LOGGER.info('FORCE_BUILD=%s', FORCE_BUILD)
LOGGER.info('HOURLY_DATA=%s', HOURLY_DATA)
LOGGER.info('INSIST_ON_READY=%s', INSIST_ON_READY)
LOGGER.info('READY_FILE=%s', READY_FILE)
LOGGER.info('REGISTRY_USER=%s', REGISTRY_USER)

# Does the root data directory exist?
# If it does not maybe the Agent volume mounts are missing - so it's bad!
Example #53
0
def startupASLogger(addrOfStarter, logEndpoint, logDefs, transportClass,
                    aggregatorAddress):
    # Dirty trick here to completely re-initialize logging in this
    # process... something the standard Python logging interface does
    # not allow via the API.  We also do not want to run
    # logging.shutdown() because (a) that does not do enough to reset,
    # and (b) it shuts down handlers, but we want to leave the
    # parent's handlers alone.  Dirty trick here to completely
    # re-initialize logging in this process... something the standard
    # Python logging interface does not allow via the API.
    logging.root = logging.RootLogger(logging.WARNING)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)
    if logDefs:
        dictConfig(logDefs)
    else:
        logging.basicConfig()
    # Disable thesplog from within the logging process (by setting the
    # logfile size to zero) to try to avoid recursive logging loops.
    thesplog_control(logging.WARNING, False, 0)
    #logging.info('ActorSystem Logging Initialized')
    transport = transportClass(logEndpoint)
    setProcName('logger', transport.myAddress)
    transport.scheduleTransmit(
        None, TransmitIntent(addrOfStarter, LoggerConnected()))
    fdup = None
    last_exception = None
    last_exception_time = None
    exception_count = 0
    while True:
        try:
            r = transport.run(None)
            logrecord = r.message
            if isinstance(logrecord, LoggerExitRequest):
                logging.info('ActorSystem Logging Shutdown')
                return
            elif isinstance(logrecord, LoggerFileDup):
                fdup = getattr(logrecord, 'fname', None)
            elif isinstance(logrecord, logging.LogRecord):
                logging.getLogger(logrecord.name).handle(logrecord)
                if fdup:
                    with open(fdup, 'a') as ldf:
                        ldf.write('%s\n' % str(logrecord))
                if aggregatorAddress and \
                   logrecord.levelno >= logging.WARNING:
                    transport.scheduleTransmit(
                        None, TransmitIntent(aggregatorAddress, logrecord))
            else:
                logging.warn('Unknown message rcvd by logger: %s' %
                             str(logrecord))
        except Exception:
            logging.error('Thespian Logger aborting (#%d) with error',
                          exception_count,
                          exc_info=True)
            if last_exception is None or datetime.now(
            ) - last_exception_time > timedelta(seconds=1):
                last_exception_time = datetime.now()
                exception_count = 0
            else:
                exception_count += 1
                if exception_count >= MAX_LOGGING_EXCEPTIONS_PER_SECOND:
                    logging.error(
                        'Too many Thespian Logger exceptions (#%d in %s); exiting!',
                        exception_count,
                        datetime.now() - last_exception_time)
                    return
Example #54
0
dictConfig({
    'version': 1,
    'disable_existing_loggers': False,
    'formatters': {
        'standard': {
            'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
        },
    },
    'handlers': {
        'stream': {
            'level': 'DEBUG',
            'formatter': 'standard',
            'class': 'logging.StreamHandler',
        },
        'file': {
            'level': 'DEBUG',
            'formatter': 'standard',
            'class': 'logging.FileHandler',
            'filename': f"{os.getcwd()}/src/stream/logging/sync.log",
        }
    },
    'loggers': {
        'sync': {
            'handlers': ['stream', 'file'],
            'level': 'DEBUG',
            'propagate': True
        },
    }
})
Example #55
0
File: main.py Project: siwiwit/zato
def run(base_dir, start_gunicorn_app=True):

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(
            config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None,
            config.newrelic.log_file or None, config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls')
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename = os.path.join(profiler_dir, config.profiler.log_filename),
            cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request = config.profiler.discard_first_request,
            flush_at_shutdown = config.profiler.flush_at_shutdown,
            path = config.profiler.url_path,
            unwind = config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Example #56
0
import tkinter as tk
from json import load
from logging import config, getLogger
with open('./config/log_conf.json', 'r') as f:
    config.dictConfig(load(f))


class BaseFrame(tk.Frame):
    def __init__(self, master):
        super().__init__(master)
        self.logger = getLogger("gui.frame")
        self.master = master
        self.bg = 'light gray'
        self.config(bg=self.bg)
        self.logger.debug("%sFrame is initialized.", self.__class__)

    def create_widgets(self):
        #self.pack(fill=tk.BOTH, expand=True, anchor=tk.CENTER)
        self.grid(row=0, column=0, sticky="nsew")

    def finish(self):
        self.master.remove_frame()
Example #57
0
def logging_configuration():

    from colorlog import default_log_colors
    colors = default_log_colors.copy()
    colors['DEBUG'] = 'purple'

    LOGGING = {
        'version': 1,
        'disable_existing_loggers': True,
        'formatters': {
            'verbose': {
                'format':
                '%(levelname)s %(asctime)s %(module)s %(name)s %(message)s'
            },
            'simple': {
                'format': '%(levelname)s %(message)s'
            },
            'colorful': {
                '()': 'colorlog.ColoredFormatter',
                'format':
                '%(threadName)s:%(asctime)s %(log_color)s%(name)s%(reset)s %(levelname)s:%(message)s',
                'log_colors': colors,
                # 'format': "%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s"
            },
            'old_caplog_format': {
                'format':
                '%(threadName)s:%(asctime)s:%(name)s:%(levelname)s:%(message)s'
            }
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                # 'formatter': 'verbose',
                'formatter': 'colorful',
                'level': 'DEBUG',
            },
            'caplog': {
                'class': 'automate.test_utils.CaptureLogHandler',
                'formatter': 'old_caplog_format',
                'level': 'DEBUG',
            }
        },
        'loggers': {
            '': {
                'handlers': ['caplog', 'console'],
                'level': 'DEBUG',
                'propagate': True,
            },
            'traits': {
                'handlers': ['caplog', 'console'],
                'level': 'DEBUG',
                'propagate': False,
            },
            'automate': {
                'handlers': ['caplog', 'console'],
                'level': 'DEBUG',
                'propagate': False,
            },
        },
    }
    dictConfig(LOGGING)
    yield None
Example #58
0
                i = 0
                click.echo(" ")
                click.echo("    --> ", nl=False)
            time.sleep(mining_every_x_seconds)


if __name__ == "__main__":
    # central and early configuring of logging see
    # https://flask.palletsprojects.com/en/1.1.x/logging/#basic-configuration
    dictConfig({
        "version": 1,
        "formatters": {
            "default": {
                "format":
                "[%(asctime)s] %(levelname)s in %(module)s: %(message)s"
            }
        },
        "handlers": {
            "wsgi": {
                "class": "logging.StreamHandler",
                "stream": "ext://flask.logging.wsgi_errors_stream",
                "formatter": "default",
            }
        },
        "root": {
            "level": "INFO",
            "handlers": ["wsgi"]
        },
    })
    cli()
Example #59
0
dictConfig({
    'version': 1,
    'formatters': {
        'default': {
            'format':
            '[%(asctime)s] %(process)s %(levelname)s in %(name)s: %(message)s',
        }
    },
    'handlers': {
        'wsgi': {
            'class': 'logging.StreamHandler',
            'stream': 'ext://flask.logging.wsgi_errors_stream',
            'formatter': 'default'
        }
    },
    'root': {
        'level': 'INFO',
        'handlers': ['wsgi']
    },
    'loggers': {
        'werkzeug': {
            'level': 'DEBUG'
        },
        'flask': {
            'level': 'DEBUG'
        },
        'openeo': {
            'level': 'DEBUG'
        },
        'openeo_driver': {
            'level': 'DEBUG'
        },
        'kazoo': {
            'level': 'WARN'
        },
    }
})
Example #60
0
def create_logger():
    config = get_logger_config()
    dictConfig(config)
    logger = logging.getLogger()
    return logger