Exemple #1
0
 def shutdown(self):
     self.__init__()
     try:
         logging.shutdown()
         logging.captureWarnings(None)
     except:
         pass
def __remove_temp_logging_handler():
    '''
    This function will run once logging has been configured. It just removes
    the temporary stream Handler from the logging handlers.
    '''
    if is_logging_configured():
        # In this case, the temporary logging handler has been removed, return!
        return

    # This should already be done, but...
    __remove_null_logging_handler()

    root_logger = logging.getLogger()
    global LOGGING_TEMP_HANDLER

    for handler in root_logger.handlers:
        if handler is LOGGING_TEMP_HANDLER:
            root_logger.removeHandler(LOGGING_TEMP_HANDLER)
            # Redefine the null handler to None so it can be garbage collected
            LOGGING_TEMP_HANDLER = None
            break

    if sys.version_info >= (2, 7):
        # Python versions >= 2.7 allow warnings to be redirected to the logging
        # system now that it's configured. Let's enable it.
        logging.captureWarnings(True)
Exemple #3
0
    def initialize_logging(self):

        log_file = self.config['main']['log_file']
        log_level = self.config['main']['log_level']

        level_map = {'CRITICAL': logging.CRITICAL,
                     'ERROR': logging.ERROR,
                     'WARNING': logging.WARNING,
                     'INFO': logging.INFO,
                     'DEBUG': logging.DEBUG
                     }

        handler = logging.FileHandler(os.path.expanduser(log_file))

        formatter = logging.Formatter(
            '%(asctime)s (%(process)d/%(threadName)s) '
            '%(name)s %(levelname)s - %(message)s')

        handler.setFormatter(formatter)

        root_logger = logging.getLogger('mycli')
        root_logger.addHandler(handler)
        root_logger.setLevel(level_map[log_level.upper()])

        logging.captureWarnings(True)

        root_logger.debug('Initializing mycli logging.')
        root_logger.debug('Log file %r.', log_file)
    def make_logger(self, f=None):
        self.fhandler = None
        """self.make_logger_240(file) -> logger. Creates a logging object to
        be used within the DebugPrint class that sends the debugging
        output to file.
        We will configure log so it outputs to both stderr and a file.

        """
        # Setup root logger
        self.logger = logging.getLogger('VisLog')
        self.logger.setLevel(logging.DEBUG)
        self.format = logging.Formatter("%(asctime)s %(levelname)s:\n%(message)s")

        # Setup warnings logger
        if hasattr(logging, 'captureWarnings'):
            wlogger = logging.getLogger('py.warnings')
            wlogger.propagate = False
            wlogger.addHandler(EmitWarnings(self.logger))
            logging.captureWarnings(True)

        # first we define a handler for logging to a file
        if f:
            self.set_logfile(f)

        #then we define a handler to log to the console
        self.console = logging.StreamHandler()
        self.console.setFormatter(self.format)
        self.console.setLevel(logging.WARNING)
        self.logger.addHandler(self.console)
Exemple #5
0
    def setup(self, verbose_level, error_level, logdir):
        self.presetup()
        logger_dnf = logging.getLogger("dnf")

        # setup file logger
        logfile = os.path.join(logdir, dnf.const.LOG)
        handler = _create_filehandler(logfile)
        logger_dnf.addHandler(handler)
        # temporarily turn off stdout/stderr handlers:
        self.stdout_handler.setLevel(SUPERCRITICAL)
        self.stderr_handler.setLevel(SUPERCRITICAL)
        # put the marker in the file now:
        _paint_mark(logger_dnf)
        # bring std handlers to the preferred level
        self.stdout_handler.setLevel(verbose_level)
        self.stderr_handler.setLevel(error_level)

        # setup Python warnings
        logging.captureWarnings(True)
        logger_warnings = logging.getLogger("py.warnings")
        logger_warnings.addHandler(self.stderr_handler)
        logger_warnings.addHandler(handler)

        # setup RPM callbacks logger
        logger_rpm = logging.getLogger("dnf.rpm")
        logger_rpm.propagate = False
        logger_rpm.setLevel(SUBDEBUG)
        logfile = os.path.join(logdir, dnf.const.LOG_RPM)
        handler = _create_filehandler(logfile)
        logger_rpm.addHandler(handler)
        _paint_mark(logger_rpm)
Exemple #6
0
def init_log(args):
    """Init loggers based on the argparse namespace passed."""
    level = args.loglevel.upper()
    try:
        numeric_level = getattr(logging, level)
    except AttributeError:
        raise ValueError("Invalid log level: {}".format(args.loglevel))

    if numeric_level > logging.DEBUG and args.debug:
        numeric_level = logging.DEBUG

    console, ram = _init_handlers(numeric_level, args.color, args.force_color,
                                  args.json_logging, args.loglines)
    root = logging.getLogger()
    if console is not None:
        if args.logfilter is not None:
            console.addFilter(LogFilter(args.logfilter.split(',')))
        root.addHandler(console)
    if ram is not None:
        root.addHandler(ram)
    root.setLevel(logging.NOTSET)
    logging.captureWarnings(True)
    _init_py_warnings()
    QtCore.qInstallMessageHandler(qt_message_handler)
    global _log_inited
    _log_inited = True
Exemple #7
0
    def start(self, level):

        self._level = level
        logging.captureWarnings(True)

        # Set new log file path. Ensure that the directory exists
        # Ensure that the log directory is not too full. We'll determine this by size and not date archiving
        # This is suboptimal compared native Windows or UNIX size listing, but python does not offer anything faster
        #   without system scripting. However, based on size limit (10MB), this method will not take more than 5ms.
        if platform.system() is "Linux" or platform.system() is "Darwin":
            self.__config_unix__()
        # window case removed. TODO
        else:
            raise lexcep("Error: Incompatible OS detected. Only Linux, Mac, or Windows is supported").with_traceback(sys.exc_info()[2])

        # Verify that the file can be created and does not exist
        try:
            with open(self._fn) as f: pass
        except IOError as e:
            raise lexcep("log file\n\n" + self._fn + "\n\nexists. Uniquely named log file should not exist prior to initialization.\nPlease contact your System Administrator\n\n" + str(e)).with_traceback(sys.exc_info()[2])

        try:
            logging.basicConfig(filename=self._fn, level=self._level)
        except FileNotFoundError as e:
            raise lexcep("log file\n\n" + self._fn + "\n\nwas not created for logging.\nCheck log directory permissions or contact your System Administrator\n\n" + str(e)).with_traceback(sys.exc_info()[2])
        except ValueError as e:
            raise lexcep("ValueError was encountered while initializing logging:\n\n" + str(e)).with_traceback(sys.exc_info()[2])
        except Exception as e:
            raise lexcep("Internal Error: unhandled exception occured:\n\n" + str(e)).with_traceback(sys.exc_info()[2])
Exemple #8
0
    def config_logging(self):
        import logging.config
        logging_config = {'version': 1, 'incremental': False,
                          'formatters': {'standard': {'format': '-- %(levelname)-8s [%(name)s] -- %(message)s'},
                                         'debug':    {'format': '''\
-- %(levelname)-8s %(asctime)24s PID %(process)-12d TID %(thread)-20d
   from logger "%(name)s" 
   at location %(pathname)s:%(lineno)d [%(funcName)s()] 
   ::
   %(message)s
'''}},
                          'handlers': {'console': {'class': 'logging.StreamHandler',
                                                   'stream': 'ext://sys.stdout',
                                                   'formatter': 'standard'}},
                          'loggers': {'mdtools': {'handlers': ['console'], 'propagate': False},
                                      'work_managers': {'handlers': ['console'], 'propagate': False},
                                      'py.warnings': {'handlers': ['console'], 'propagate': False}},
                          'root': {'handlers': ['console']}}
        
        logging_config['loggers'][self.process_name] = {'handlers': ['console'], 'propagate': False}
            
        if self.verbosity == 'debug':
            logging_config['root']['level'] = 5 #'DEBUG'
            logging_config['handlers']['console']['formatter'] = 'debug'
        elif self.verbosity == 'verbose':
            logging_config['root']['level'] = 'INFO'
        else:
            logging_config['root']['level'] = 'WARNING'

        logging.config.dictConfig(logging_config)
        logging_config['incremental'] = True
        logging.captureWarnings(True)
Exemple #9
0
def configure_stream_logger(level, logger):
	"""
	Configure the default stream handler for logging messages to the console.
	This also configures the basic logging environment for the application.

	:param level: The level to set the logger to.
	:type level: int, str
	:param str logger: The logger to add the stream handler for.
	:return: The new configured stream handler.
	:rtype: :py:class:`logging.StreamHandler`
	"""
	if isinstance(level, str):
		level = getattr(logging, level)
	root_logger = logging.getLogger('')
	for handler in root_logger.handlers:
		root_logger.removeHandler(handler)

	logging.getLogger(logger).setLevel(logging.DEBUG)
	console_log_handler = logging.StreamHandler()
	console_log_handler.setLevel(level)
	if its.on_linux:
		console_log_handler.setFormatter(color.ColoredLogFormatter("%(levelname)s %(message)s"))
	else:
		console_log_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
	logging.getLogger(logger).addHandler(console_log_handler)
	logging.captureWarnings(True)
	return console_log_handler
Exemple #10
0
    def _configure_logging(self):
        if self.options.debug:
            warnings.simplefilter('default')
            try:
                logging.captureWarnings(True)
            except AttributeError:
                pass

        root_logger = logging.getLogger('')
        root_logger.setLevel(logging.INFO)

        # Set up logging to a file
        if self.options.log_file:
            file_handler = logging.FileHandler(filename=self.options.log_file)
            formatter = logging.Formatter(self.LOG_GEARBOX_FORMAT, datefmt=self.LOG_DATE_FORMAT)
            file_handler.setFormatter(formatter)
            root_logger.addHandler(file_handler)

        # Always send higher-level messages to the console via stderr
        console = logging.StreamHandler(sys.stderr)
        console_level = {0: logging.WARNING,
                         1: logging.INFO,
                         2: logging.DEBUG,
                         }.get(self.options.verbose_level, logging.DEBUG)
        console.setLevel(console_level)
        formatter = logging.Formatter(self.LOG_GEARBOX_FORMAT, datefmt=self.LOG_DATE_FORMAT)
        console.setFormatter(formatter)
        root_logger.addHandler(console)
 def config_production(self):
     logger = logging.getLogger(self.name)
     if not logger.root.handlers:
         self._read_config_file()
         logging.captureWarnings(True)
         logger = logging.getLogger(self.name)
     return logger
def main(earliest_night, latest_night, data_dir, jar, xml, db, out, queue, walltime, engine, num_runs, vmem, log_level, port, source, conditions, max_delta_t, local, password):

    level=logging.INFO
    if log_level is 'DEBUG':
        level = logging.DEBUG
    elif log_level is 'WARN':
        level = logging.WARN
    elif log_level is 'INFO':
        level = logging.INFO

    logging.captureWarnings(True)
    logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +  '%(message)s'), level=level)

    jarpath = os.path.abspath(jar)
    xmlpath =os. path.abspath(xml)
    outpath = os.path.abspath(out)
    erna.ensure_output(out)
    db_path = os.path.abspath(db)
    output_directory = os.path.dirname(outpath)
    #create dir if it doesnt exist
    os.makedirs(output_directory, exist_ok=True)
    logger.info("Writing output data  to {}".format(out))
    factdb = sqlalchemy.create_engine("mysql+pymysql://factread:{}@129.194.168.95/factdata".format(password))
    data_conditions=dcc.conditions[conditions]
    df_runs = erna.load(earliest_night, latest_night, data_dir, source_name=source, timedelta_in_minutes=max_delta_t, factdb=factdb, data_conditions=data_conditions)

    logger.info("Would process {} jobs with {} runs per job".format(len(df_runs)//num_runs, num_runs))
    click.confirm('Do you want to continue processing and start jobs?', abort=True)

    job_list = make_jobs(jarpath, xmlpath, db_path, output_directory, df_runs,  engine, queue, vmem, num_runs, walltime)
    job_outputs = gridmap.process_jobs(job_list, max_processes=len(job_list), local=local)
    erna.collect_output(job_outputs, out, df_runs)
    def _configure_logging(self):
        """
        Setup logging from LOGGING_CONFIG and LOGGING settings.
        """
        try:
            # Route warnings through python logging
            logging.captureWarnings(True)
            # Allow DeprecationWarnings through the warnings filters
            warnings.simplefilter("default", DeprecationWarning)
        except AttributeError:
            # No captureWarnings on Python 2.6, DeprecationWarnings are on anyway
            pass

        if self.LOGGING_CONFIG:
            from django.utils.log import DEFAULT_LOGGING
            # First find the logging configuration function ...
            logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
            logging_config_module = importlib.import_module(logging_config_path)
            logging_config_func = getattr(logging_config_module, logging_config_func_name)

            logging_config_func(DEFAULT_LOGGING)

            if self.LOGGING:
                # Backwards-compatibility shim for #16288 fix
                compat_patch_logging_config(self.LOGGING)

                # ... then invoke it with the logging settings
                logging_config_func(self.LOGGING)
Exemple #14
0
def initialize_logger():
    """sets up the logger including a console, file and qt handler
    """
    # initialize logger
    logging.basicConfig(format="%(message)s", level=logging.INFO)
    logging.addLevelName(logging.CRITICAL, 'critical')
    logging.addLevelName(logging.ERROR, 'error')
    logging.addLevelName(logging.WARNING, 'warning')
    logging.addLevelName(logging.INFO, 'info')
    logging.addLevelName(logging.DEBUG, 'debug')
    logging.addLevelName(logging.NOTSET, 'not set')
    logging.captureWarnings(True)
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # set level of stream handler which logs to stderr
    logger.handlers[0].setLevel(logging.WARNING)

    # add file logger
    rotating_file_handler = logging.handlers.RotatingFileHandler(
        'qudi.log', maxBytes=10*1024*1024, backupCount=5)
    rotating_file_handler.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)s %(name)s %(message)s',
        datefmt="%Y-%m-%d %H:%M:%S"))
    rotating_file_handler.doRollover()
    rotating_file_handler.setLevel(logging.DEBUG)
    logger.addHandler(rotating_file_handler)

    # add Qt log handler
    qt_log_handler = QtLogHandler()
    qt_log_handler.setLevel(logging.DEBUG)
    logging.getLogger().addHandler(qt_log_handler)

    for logger_name in ['core', 'gui', 'logic', 'hardware']:
            logging.getLogger(logger_name).setLevel(logging.DEBUG)
def main():
	parser = argparse.ArgumentParser(description='King Phisher SMTP Debug Server', conflict_handler='resolve')
	parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version)
	parser.add_argument('-L', '--log', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='CRITICAL', help='set the logging level')
	parser.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='run in forground (do not fork)')
	parser.add_argument('-a', '--address', dest='address', default='127.0.0.1', help='address to listen on')
	parser.add_argument('-p', '--port', dest='port', type=int, default=2525, help='port to listen on')
	arguments = parser.parse_args()

	logging.getLogger('').setLevel(logging.DEBUG)
	console_log_handler = logging.StreamHandler()
	console_log_handler.setLevel(getattr(logging, arguments.loglvl))
	console_log_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s"))
	logging.getLogger('').addHandler(console_log_handler)
	logging.captureWarnings(True)
	del parser

	if (not arguments.foreground) and os.fork():
		return

	bind_address = (arguments.address, arguments.port)
	server = smtp_server.KingPhisherSMTPServer(bind_address, None, debugging=True)
	color.print_status("smtp server listening on {0}:{1}".format(bind_address[0], bind_address[1]))
	try:
		server.serve_forever()
	except KeyboardInterrupt:
		color.print_status('keyboard interrupt caught, now exiting')
Exemple #16
0
def setuplogger(consolelevel, filename=None, filelevel=None):
    """ setup the python root logger to log to the console with defined log
        level. Optionally also log to file with the provided level """

    if filelevel == None:
        filelevel = consolelevel

    if sys.version.startswith("2.7"):
        logging.captureWarnings(True)

    rootlogger = logging.getLogger()
    rootlogger.setLevel(min(consolelevel, filelevel))

    formatter = logging.Formatter('%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')

    if filename != None:
        filehandler = logging.FileHandler(filename)
        filehandler.setLevel(filelevel)
        filehandler.setFormatter(formatter)
        rootlogger.addHandler(filehandler)

    consolehandler = logging.StreamHandler()
    consolehandler.setLevel(consolelevel)
    consolehandler.setFormatter(formatter)
    rootlogger.addHandler(consolehandler)
Exemple #17
0
def set_up_log(filename):

    # Add file extension.
    filename += '.log'

    print 'Preparing log file:', filename

    # Capture warnings.
    logging.captureWarnings(True)

    # Set output format.
    formatter = logging.Formatter(fmt='%(asctime)s %(message)s',
                                  datefmt='%d/%m/%Y %H:%M:%S')

    # Create file handler.
    fh = logging.FileHandler(filename=filename, mode='w')
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(formatter)

    # Create log.
    log = logging.getLogger('log')
    log.setLevel(logging.DEBUG)
    log.addHandler(fh)

    # Send test message.
    log.info('The log file has been set-up.')

    return log
Exemple #18
0
def load_log(level):
    """
    Load a log that controls the amount of information that is shown to the
    user.

    Parameter
    ---------
        level : logging.Level

    Return
    ------
        log : logging.log
    """

    logging.captureWarnings(False)

    formatter = logging.Formatter()

    handler = logging.StreamHandler()
    handler.setFormatter(formatter)

    log = logging.getLogger(__name__)
    log.addHandler(handler)
    log.setLevel(level)

    return log
Exemple #19
0
def main():

    logging.captureWarnings(True)

    # Request the list of contributors
    print('GET {}'.format(URL))
    resp = requests.get(URL)
    contributors = resp.json()

    lines = []
    for contributor in contributors:
        time.sleep(1.0)

        # Request each contributor individually to get the full name
        print('GET {}'.format(contributor['url']))
        resp = requests.get(contributor['url'])
        user = resp.json()

        name = user.get('name') or user['login']
        url = user['html_url']
        lines.append('* `{} <{}>`_'.format(name, url))

    print('Writing to {}'.format(FILENAME))
    text = HEADER + '\n'.join(lines)
    text = text.encode('utf-8')
    with open(FILENAME, 'wb') as fp:
        fp.write(text)
Exemple #20
0
def log(debug=False, path=None):
    """Log messages.

    If path is None, logging messages will be printed to the console (stdout).
    If it not None, logging messages will be appended to the file at that path.

    Typically someone using Nengo as a library will set up their own
    logging things, and Nengo will just populate their log.
    However, if the user is using Nengo directly, they can use this
    function to get log output.
    """
    level = logging.DEBUG if debug else logging.WARNING
    logging.root.setLevel(level)

    if path is None:
        handler = console_handler
    else:
        for handler in logging.root.handlers:
            if (isinstance(handler, logging.FileHandler)
                    and handler.baseFilename == path
                    and handler.formatter == file_formatter):
                break
        else:
            handler = logging.FileHandler(path, encoding='utf-8')
            handler.setFormatter(file_formatter)

    if handler not in logging.root.handlers:
        logging.root.addHandler(handler)
    handler.setLevel(level)
    try:
        logging.captureWarnings(True)
    except AttributeError:
        # logging.captureWarnings doesn't exist in Python 2.6; ignore it
        pass
    def test_warnings(self):
        with warnings.catch_warnings():
            logging.captureWarnings(True)
            try:
                warnings.filterwarnings("always", category=UserWarning)
                file = io.StringIO()
                h = logging.StreamHandler(file)
                logger = logging.getLogger("py.warnings")
                logger.addHandler(h)
                warnings.warn("I'm warning you...")
                logger.removeHandler(h)
                s = file.getvalue()
                h.close()
                self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)

                #See if an explicit file uses the original implementation
                file = io.StringIO()
                warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
                                        file, "Dummy line")
                s = file.getvalue()
                file.close()
                self.assertEqual(s,
                        "dummy.py:42: UserWarning: Explicit\n  Dummy line\n")
            finally:
                logging.captureWarnings(False)
Exemple #22
0
def configure_logger(level):
    """
    Configure a root logger to print records in pretty format.

    The format is more readable for end users, since it's not necessary at
    all to know a record's dateime and a source of the record.

    Examples::

        [INFO] message
        [WARN] message
        [ERRO] message

    :param level: a minimum logging level to be printed
    """
    class _Formatter(logging.Formatter):
        def format(self, record):
            record.levelname = record.levelname[:4]
            return super(_Formatter, self).format(record)

    # create stream handler with custom formatter
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(_Formatter('[%(levelname)s] %(message)s'))

    # configure root logger
    logger = logging.getLogger()
    logger.addHandler(stream_handler)
    logger.setLevel(level)

    # capture warnings issued by 'warnings' module
    logging.captureWarnings(True)
Exemple #23
0
    def _configure_logging(self):
        """Configure logging for Insights.

        It will load configuration from logging.conf if present
        in root directory, otherwise custom logging format is used by
        default

        """

        if self.configured:
            LOGGER.info("Already configured")
            return

        # All output should be made by the logging module, including warnings
        logging.captureWarnings(True)

        # Allow overriding logging config based on the presence of logging.conf
        # file on Insights's project root
        logging_conf_path = os.path.join(get_project_root(), 'logging.conf')
        if os.path.isfile(logging_conf_path):
            config.fileConfig(logging_conf_path)
        else:
            logging.basicConfig(
                format='%(levelname)s %(module)s:%(lineno)d: %(message)s'
            )
Exemple #24
0
def set_logging_config(config, debug, verbosity, uncaught_logger, uncaught_handler):
	# configure logging globally
	import logging.config as logconfig
	logconfig.dictConfig(config)

	# make sure we log any warnings
	log.captureWarnings(True)

	import warnings

	categories = (DeprecationWarning, PendingDeprecationWarning)
	if verbosity > 2:
		warnings.simplefilter("always")
	elif debug or verbosity > 0:
		for category in categories:
			warnings.simplefilter("always", category=category)

	# make sure we also log any uncaught exceptions
	if uncaught_logger is None:
		logger = log.getLogger(__name__)
	else:
		logger = log.getLogger(uncaught_logger)

	if uncaught_handler is None:
		def exception_logger(exc_type, exc_value, exc_tb):
			logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))

		uncaught_handler = exception_logger
	sys.excepthook = uncaught_handler

	return logger
Exemple #25
0
def sync_netscaler_config(collector, stack, **kwargs):
    """Sync netscaler configuration with the specified netscaler"""
    logging.captureWarnings(True)
    logging.getLogger("py.warnings").setLevel(logging.ERROR)
    configuration = collector.configuration

    if stack.netscaler is NotSpecified or stack.netscaler.configuration is NotSpecified:
        raise BespinError("Please configure {netscaler.configuration}")

    if stack.netscaler.syncable_environments is not NotSpecified:
        if configuration["environment"] not in stack.netscaler.syncable_environments:
            raise BespinError("Sorry, can only sync netscaler config for particular environments", wanted=configuration["environment"], available=list(stack.netscaler.syncable_environments))

    for_layers = []
    all_configuration = {}
    for vkey, value in stack.netscaler.configuration.items():
        for key, thing in value.items():
            if thing.environments is NotSpecified or configuration["environment"] in thing.environments:
                for_layers.append(thing.long_name)
                all_configuration[thing.long_name] = thing
                if vkey not in all_configuration:
                    all_configuration[vkey] = {}
                all_configuration[vkey][key] = thing

    layers = Layers(for_layers, all_stacks=all_configuration)
    layers.add_all_to_layers()

    stack.netscaler.syncing_configuration = True
    with stack.netscaler as netscaler:
        for layer in layers.layered:
            for _, thing in layer:
                netscaler.sync(all_configuration, configuration["environment"], thing)
Exemple #26
0
def cli(debug, config_dir, output, query):
    """CloudMunch CLI.\n
    This is a CLI tool for executing commands against CloudMunch API.
    """
    log = logging.getLogger('cloudmunch')

    stdoutHanlder = logging.StreamHandler()
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    stdoutHanlder.setFormatter(formatter)
    log.addHandler(stdoutHanlder)

    if debug:
        log.setLevel(logging.DEBUG)
        import httplib
        httplib.HTTPConnection.debuglevel = 1

        requests_log = logging.getLogger("requests.packages.urllib3")
        requests_log.setLevel(logging.DEBUG)
        requests_log.propagate = True
        if sys.version_info >= (2,7):
            logging.captureWarnings(True)
    else:
        log.setLevel(logging.NOTSET)
        urllib3.disable_warnings()
        if sys.version_info >= (2,7):
            logging.captureWarnings(False)


    log.info('Setting config dir to %s' % config_dir)
    log.info('Python version is %s - %s' % (sys.version, sys.hexversion))
    cloudmunch.config.config_dir = config_dir
    cloudmunch.config.credentials = cloudmunch.config.Credentials(cloudmunch.config.config_dir)
    cloudmunch.config.config = cloudmunch.config.Config(cloudmunch.config.config_dir)
Exemple #27
0
def main():
    """
    main entry point for script
    """
    opts = getoptions()

    logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%dT%H:%M:%S', level=opts['log'])
    logging.captureWarnings(True)

    config = Config()

    threads = opts['threads']

    if threads <= 1:
        processjobs(config, opts, None)
        return
    else:
        proclist = []
        for procid in xrange(threads):
            p = Process(target=processjobs, args=(config, opts, procid))
            p.start()
            proclist.append(p)

        for proc in proclist:
            p.join()
Exemple #28
0
    def __init__(self, default_logging_levels={}):

        logging.basicConfig(level=self.logging_levels.get('logging_level', logging.INFO))

        self.system_logger = logging.getLogger("system")
        self.world_logger = logging.getLogger("world")
        self.nodenet_logger = logging.getLogger("nodenet")

        self.system_logger.setLevel(self.logging_levels.get(default_logging_levels.get('system', {}), logging.WARNING))
        self.world_logger.setLevel(self.logging_levels.get(default_logging_levels.get('world', {}), logging.WARNING))
        self.nodenet_logger.setLevel(self.logging_levels.get(default_logging_levels.get('nodenet', {}), logging.WARNING))

        logging.captureWarnings(True)

        self.handlers = {
            'system': RecordWebStorageHandler(self.system_record_storage),
            'world': RecordWebStorageHandler(self.world_record_storage),
            'nodenet': RecordWebStorageHandler(self.nodenet_record_storage)
        }

        logging.getLogger("py.warnings").addHandler(self.handlers['system'])

        logging.getLogger("system").addHandler(self.handlers['system'])
        logging.getLogger("world").addHandler(self.handlers['world'])
        logging.getLogger("nodenet").addHandler(self.handlers['nodenet'])

        logging.getLogger("system").debug("System logger ready.")
        logging.getLogger("world").debug("World logger ready.")
        logging.getLogger("nodenet").debug("Nodenet logger ready.")
Exemple #29
0
def configLogging(build):
    global log
    logging.captureWarnings(True)
    term = os.environ.get('TERM')
    if term and term.startswith('screen.'):
        term = term[7:]
    record_format = '%(name)s: %(message)s'
    try:
        clifmt = utils.ColoredFormatter(
            blessings.Terminal(term, force_styling=build.force_color),
            record_format)
    except curses.error:
        try:
            # try falling back to basic term type
            clifmt = utils.ColoredFormatter(
                blessings.Terminal('linux', force_styling=build.force_color),
                record_format)
        except curses.error:
            # fall back to uncolored formatter
            clifmt = logging.Formatter(record_format)
    root_logger = logging.getLogger()
    clihandler = logging.StreamHandler(sys.stdout)
    clihandler.setFormatter(clifmt)
    if isinstance(build.log_level, str):
        build.log_level = build.log_level.upper()
    root_logger.setLevel(build.log_level)
    log.setLevel(build.log_level)
    root_logger.addHandler(clihandler)
    requests_logger = logging.getLogger("requests")
    requests_logger.setLevel(logging.WARN)
    urllib_logger = logging.getLogger("urllib3")
    urllib_logger.setLevel(logging.CRITICAL)
Exemple #30
0
    def init(self, parser, opts, args):
        if len(args) < 1:
            parser.error("No application module specified.")

        self.cfg.set("default_proc_name", args[0])
        self.app_uri = args[0]

        logging.captureWarnings(True)
        if opts.auto_config:
            # Default config_dir
            config_dir = os.path.join(self.cfg.chdir,
                                      'etc',
                                      self.app_uri.split(":", 1)[0])
            if os.path.exists(config_dir):
                self.cfg.set('config_dir', config_dir)  # Define dev etc folder as default config directory

            # generate config_dir directly: egg or chicken problem
            if opts.config_dir:
                self.cfg.set('config_dir', opts.config_dir)

            if not opts.config:
                opts.config = os.path.join(self.cfg.config_dir, 'api_hour/gunicorn_conf.py')

            if not self.cfg.logconfig:
                self.cfg.set('logconfig', os.path.join(self.cfg.config_dir, 'api_hour/logging.ini'))
        else:
            # To avoid with Gunicorn 19 that the console it's empty when you test
            if not opts.errorlog:
                opts.errorlog = '-'
            if not opts.accesslog:
                opts.accesslog = '-'
Exemple #31
0
pyFAI-average is a small utility that averages out a serie of files,
for example for dark, or flat or calibration images
"""
__author__ = "Jerome Kieffer, Picca Frédéric-Emmanuel"
__contact__ = "*****@*****.**"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "05/03/2018"
__status__ = "production"

import os
import logging

logger = logging.getLogger("average")
logging.basicConfig(level=logging.INFO)
logging.captureWarnings(True)

import pyFAI.utils.shell
import pyFAI.utils.stringutil
from pyFAI import average
from pyFAI.third_party.argparse import ArgumentParser


class PreEmitStreamHandler(logging.Handler):
    """Handler allowing to hook a function before the emit function.

    The main logging feature is delegated to a sub handler.
    """

    def __init__(self, handler):
        self._handler = handler
def main(argv=None):
    """
    Handles command line arguments and gets things started.

    Parameters
    ----------
    argv : list of str
        List of arguments, as if specified on the command-line.
        If None, ``sys.argv[1:]`` is used instead.
    """

    parser = argparse.ArgumentParser(description="Prints out the weights of a \
                                                  given model.",
                                     conflict_handler='resolve',
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('model_file', help='model file to load')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--k',
                       help='number of top features to print (0 for all)',
                       type=int, default=50)
    group.add_argument("--sort_by_labels", '-s', action='store_true',
                       default=False, help="order the features by classes")
    parser.add_argument('--sign',
                        choices=['positive', 'negative', 'all'],
                        default='all',
                        help='show only positive, only negative, ' +
                             'or all weights')
    parser.add_argument('--version', action='version',
                        version='%(prog)s {0}'.format(__version__))
    args = parser.parse_args(argv)

    # Make warnings from built-in warnings module get formatted more nicely
    logging.captureWarnings(True)
    logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +
                                '%(message)s'))

    k = args.k if args.k > 0 else None

    learner = Learner.from_file(args.model_file)
    (weights, intercept) = learner.model_params

    multiclass = False
    model = learner._model
    if (isinstance(model, LinearSVC) or
        (isinstance(model, LogisticRegression) and
            len(learner.label_list) > 2) or
        (isinstance(model, SVC) and
            model.kernel == 'linear')):
        multiclass = True
    weight_items = iteritems(weights)
    if args.sign == 'positive':
        weight_items = (x for x in weight_items if x[1] > 0)
    elif args.sign == 'negative':
        weight_items = (x for x in weight_items if x[1] < 0)

    if intercept is not None:
        # subclass of LinearModel
        if '_intercept_' in intercept:
            # Some learners (e.g. LinearSVR) may return an array of intercepts but
            # sometimes that array is of length 1 so we don't need to print that
            # as an array/list. First, let's normalize these cases.
            model_intercepts = intercept['_intercept_']
            intercept_is_array = isinstance(model_intercepts, np.ndarray)
            num_intercepts = len(model_intercepts) if intercept_is_array else 1
            if intercept_is_array and num_intercepts == 1:
                model_intercepts = model_intercepts[0]
                intercept_is_array = False

            # now print out the intercepts
            print("intercept = {:.12f}".format(model_intercepts))
        else:
            print("== intercept values ==")
            for (label, val) in intercept.items():
                print("{: .12f}\t{}".format(val, label))
        print()

    print("Number of nonzero features:", len(weights), file=sys.stderr)
    weight_by_class = defaultdict(dict)
    if multiclass and args.sort_by_labels:
        for label_feature, weight in weight_items:
            label, feature = label_feature.split()
            weight_by_class[label][feature] = weight
        for label in sorted(weight_by_class):
            for feat, val in sorted(weight_by_class[label].items(), key=lambda x: -abs(x[1])):
                print("{: .12f}\t{}\t{}".format(val, label, feat))
    else:
        for feat, val in sorted(weight_items, key=lambda x: -abs(x[1]))[:k]:
            print("{: .12f}\t{}".format(val, feat))
Exemple #33
0
def vls_init_logger():
    import logging

    logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
    logging.captureWarnings(True)
def main():

    logging.captureWarnings(True)

    parser = argparse.ArgumentParser(
        description="Used to register new users with a given homeserver when"
        " registration has been disabled. The homeserver must be"
        " configured with the 'registration_shared_secret' option"
        " set.")
    parser.add_argument(
        "-u",
        "--user",
        default=None,
        help="Local part of the new user. Will prompt if omitted.",
    )
    parser.add_argument(
        "-p",
        "--password",
        default=None,
        help="New password for user. Will prompt if omitted.",
    )
    parser.add_argument(
        "-t",
        "--user_type",
        default=None,
        help="User type as specified in synapse.api.constants.UserTypes",
    )
    admin_group = parser.add_mutually_exclusive_group()
    admin_group.add_argument(
        "-a",
        "--admin",
        action="store_true",
        help=("Register new user as an admin. "
              "Will prompt if --no-admin is not set either."),
    )
    admin_group.add_argument(
        "--no-admin",
        action="store_true",
        help=("Register new user as a regular user. "
              "Will prompt if --admin is not set either."),
    )

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument(
        "-c",
        "--config",
        type=argparse.FileType("r"),
        help="Path to server config file. Used to read in shared secret.",
    )

    group.add_argument("-k",
                       "--shared-secret",
                       help="Shared secret as defined in server config file.")

    parser.add_argument(
        "server_url",
        default="https://localhost:8448",
        nargs="?",
        help="URL to use to talk to the homeserver. Defaults to "
        " 'https://localhost:8448'.",
    )

    args = parser.parse_args()

    if "config" in args and args.config:
        config = yaml.safe_load(args.config)
        secret = config.get("registration_shared_secret", None)
        if not secret:
            print("No 'registration_shared_secret' defined in config.")
            sys.exit(1)
    else:
        secret = args.shared_secret

    admin = None
    if args.admin or args.no_admin:
        admin = args.admin

    register_new_user(args.user, args.password, args.server_url, secret, admin,
                      args.user_type)
Exemple #35
0
    def config_logging(self):
        import logging.config
        logging_config = {
            'version': 1,
            'incremental': False,
            'formatters': {
                'standard': {
                    'format': '-- %(levelname)-8s [%(name)s] -- %(message)s'
                },
                'debug': {
                    'format':
                    '''\
-- %(levelname)-8s %(asctime)24s PID %(process)-12d TID %(thread)-20d
   from logger "%(name)s" 
   at location %(pathname)s:%(lineno)d [%(funcName)s()] 
   ::
   %(message)s
'''
                }
            },
            'handlers': {
                'console': {
                    'class': 'logging.StreamHandler',
                    'stream': 'ext://sys.stdout',
                    'formatter': 'standard'
                }
            },
            'loggers': {
                'sim': {
                    'handlers': ['console'],
                    'propagate': False
                },
                'ssa': {
                    'handlers': ['console'],
                    'propagate': False
                },
                'work_managers': {
                    'handlers': ['console'],
                    'propagate': False
                },
                'py.warnings': {
                    'handlers': ['console'],
                    'propagate': False
                }
            },
            'root': {
                'handlers': ['console']
            }
        }

        logging_config['loggers'][self.process_name] = {
            'handlers': ['console'],
            'propagate': False
        }

        if self.verbosity == 'debug':
            logging_config['root']['level'] = 5  #'DEBUG'
            logging_config['handlers']['console']['formatter'] = 'debug'
        elif self.verbosity == 'verbose':
            logging_config['root']['level'] = 'INFO'
        else:
            logging_config['root']['level'] = 'WARNING'

        logging.config.dictConfig(logging_config)
        logging_config['incremental'] = True
        logging.captureWarnings(True)
Exemple #36
0
def suppress_urllib_warnings():
    """Capture urllib warnings if possible, else disable them (python 2.6)."""
    try:
        lg.captureWarnings(True)
    except AttributeError:
        disable_warnings()
# treat humanfriendly as optional dependency
humanfriendly_available = False
try:
    from humanfriendly import AutomaticSpinner
    from humanfriendly.text import pluralize
    humanfriendly_available = True
except ImportError:  # pragma: no cover

    def pluralize(_1, _2, plural):
        return plural


logging.basicConfig()
log = logging.getLogger(sys.argv[0] if __name__ == "__main__" else __name__)
logging.captureWarnings(
    True
)  # see https://urllib3.readthedocs.org/en/latest/security.html#disabling-warnings


class DownloadError(Exception):
    """content could not be downloaded as requested."""
    pass


class Browser(object):
    """download relative or absolute url and return soup."""
    def __init__(self, args, root_url):
        """Construct a browser object with options."""
        self.save = args.save if hasattr(args, 'save') else False
        self.load = args.load if hasattr(args, 'load') else False
        self.load_dir = args.load_dir if hasattr(args, 'load_dir') else '.'
Exemple #38
0
def setup_logger(logfile="/tmp/dynafed_storagestats.log",
                 logid=False,
                 loglevel="WARNING",
                 verbose=False):
    """Setup the logger format to be used throughout the script.

    Arguments:
    logfile -- string defining path to write logs to.
    logid -- string defining an ID to be logged.
    loglevel -- string defining level to log: "DEBUG, INFO, WARNING, ERROR"
    verbose -- boolean. 'True' prints log messages to stderr.

    """
    # To capture warnings emitted by modules.
    logging.captureWarnings(True)

    # Create file logger.
    _logger = logging.getLogger("dynafed_storagestats")

    # Set log level to use.
    _num_loglevel = getattr(logging, loglevel.upper())
    _logger.setLevel(_num_loglevel)

    # Set file where to log and the mode to use and set the format to use.
    _log_handler_file = logging.handlers.TimedRotatingFileHandler(
        logfile,
        when="midnight",
        backupCount=15,
    )

    # Set the format depending whether a log id is requested.
    if logid:
        # Add ContextFilter
        _logid_context = ContextFilter(logid)

        # Add logid filter.
        _log_handler_file.addFilter(_logid_context)

        # Set logger format
        _log_format_file = logging.Formatter(
            '%(asctime)s - [%(logid)s] - [%(levelname)s]%(message)s')

    else:
        # Set logger format
        _log_format_file = logging.Formatter(
            '%(asctime)s - [%(levelname)s]%(message)s')

    # Set the format to the file handler.
    _log_handler_file.setFormatter(_log_format_file)

    # Add the file handler.
    _logger.addHandler(_log_handler_file)

    # Create STDERR handler if verbose is requested and add it to logger.
    if verbose:

        log_handler_stderr = logging.StreamHandler()

        if logid:
            log_format_stderr = logging.Formatter(
                '%(asctime)s - [%(logid)s] - [%(levelname)s]%(message)s')
            log_handler_stderr.addFilter(_logid_context)
        else:
            log_format_stderr = logging.Formatter(
                '%(asctime)s - [%(levelname)s]%(message)s')

        log_handler_stderr.setLevel(_num_loglevel)
        log_handler_stderr.setFormatter(log_format_stderr)
        # Add handler
        _logger.addHandler(log_handler_stderr)
 def tearDown(self):
     # Be sure to reset the warning capture
     logging.captureWarnings(False)
     super(TestDaiquiri, self).tearDown()
Exemple #40
0
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record

    logging.setLogRecordFactory(record_factory)

    logging.addLevelName(25, "INFO")
    logging.captureWarnings(True)

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler(
                "%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write(
                "ERROR: couldn't create the logfile directory. Logging to the standard output.\n"
            )

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(
            handler, logging.StreamHandler) and is_a_tty(handler.stream):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(
            int(
                db_levels.get(tools.config['log_db_level'],
                              tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)
def main():
    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument('-u', '--user', help='User to analyze')
    parser.add_argument('-o', '--organization', help='Organization to analyze')
    parser.add_argument('-r', '--repo', help='Repository to analyze')
    parser.add_argument('-t', '--token', help='Personal access token')
    parser.add_argument('--token-file',
                        default='.token',
                        help='File to load personal access token from, if '
                        '--token isn\'t specified (default: %(default)s)')
    parser.add_argument('--lfs',
                        action='store_true',
                        help='Clone repo to calculate LFS usage')
    parser.add_argument(
        '-s',
        '--sort',
        default='full_name',
        help='Repository metadata field to sort by. Prefix with '
        '- to sort descending. (default: %(default)s)'),
    parser.add_argument('--fields',
                        metavar='FIELD[,FIELD...]',
                        help='Comma-separated list of fields to show')
    parser.add_argument('-h',
                        '--humanize',
                        action='store_true',
                        help='Convert certain fields to human-readable format')
    parser.add_argument('--totals',
                        action='store_true',
                        help='Generate totals row')
    parser.add_argument(
        '-f',
        '--format',
        choices=('list', 'table', 'json', 'csv'),
        default='list',
        help='Format the output as a list, table, json, or csv '
        '(default: %(default)s)')
    parser.add_argument('-c',
                        '--csv',
                        action='store_const',
                        dest='format',
                        const='csv',
                        help='Output as CSV')
    parser.add_argument('-j',
                        '--json',
                        action='store_const',
                        dest='format',
                        const='json',
                        help='Output as JSON')
    parser.add_argument('-l',
                        '--log',
                        action='store',
                        metavar='FILE',
                        help='Log output to a file')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=1,
                        help='Increase verbosity.')
    parser.add_argument('-q',
                        '--quiet',
                        action='count',
                        default=0,
                        help='Decrease verbosity.')
    parser.add_argument('--help', action='help')

    args = parser.parse_args()

    args.verbose -= args.quiet

    console = logging.StreamHandler()
    console.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
    console.setLevel(logging.WARNING)
    logging.getLogger().addHandler(console)
    if args.verbose >= 2:
        console.setLevel(logging.DEBUG)
    elif args.verbose >= 1:
        console.setLevel(logging.INFO)
    else:
        console.setLevel(logging.WARNING)
    logging.getLogger().addHandler(console)

    if args.log:
        logfile = logging.FileHandler(args.log, 'w')
        logfile.setLevel(logging.DEBUG)
        logfile.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)-8s %(message)s'))
        logging.getLogger().addHandler(logfile)

    logging.getLogger().setLevel(logging.DEBUG)
    logging.captureWarnings(True)

    if not args.token and args.token_file and os.path.exists(args.token_file):
        logging.debug('Reading token from {}...'.format(args.token_file))
        with open(args.token_file) as fp:
            args.token = fp.read().strip()

    repos = fetch_repos(user=args.user,
                        organization=args.organization,
                        repo=args.repo,
                        access_token=args.token)

    if args.lfs:
        for repo in repos:
            try:
                repo['lfs'] = get_lfs_usage(repo)
            except:
                logging.exception('Error getting LFS usage for {}'.format(
                    repo['full_name']))

    repos.sort(key=sort_key(args.sort), reverse=args.sort.startswith('-'))

    multi_owners = len(set(repo['owner']['login'] for repo in repos)) > 1

    fields = get_fields(args.fields,
                        format=args.format,
                        multi_owners=multi_owners,
                        lfs=args.lfs)

    id_field = 'full_name' if multi_owners else 'name'
    if fields is not None:
        for field in ('full_name', 'name'):
            if field in fields:
                id_field = field
                break
        else:
            fields.insert(0, id_field)

    totals = {id_field: 'Totals'} if args.totals else None

    formatted = [
        format_repo(repo,
                    fields=fields,
                    format=args.format,
                    humanize=args.humanize,
                    totals=totals) for repo in repos
    ]

    if args.totals and fields:
        totals_row = collections.OrderedDict()
        for field in fields:
            value = totals.get(field, None)
            if (args.humanize and value is not None
                    and FIELD_ALIASES.get(field, field) in HUMANIZE):
                value = HUMANIZE[FIELD_ALIASES.get(field, field)](value)

            if args.format == 'csv':
                value = str(value) if value is not None else ''
            totals_row[field] = value
        formatted.append(totals_row)

    if args.format == 'json':
        print(json.dumps(formatted, indent=2))
    elif args.format == 'csv':
        writer = csv.DictWriter(sys.stdout, fields)
        writer.writeheader()
        writer.writerows(formatted)
    elif args.format == 'table':
        print(tabulate(formatted, headers='keys'))
    else:
        non_id_fields = [field for field in fields if field != id_field]
        if len(non_id_fields) == 1:
            widest_id = max(len(repo[id_field]) for repo in formatted)
            for repo in formatted:
                print('{1:{0}} {2}'.format(widest_id, repo[id_field],
                                           repo[non_id_fields[0]]))
        elif len(non_id_fields) == 0:
            for repo in formatted:
                print(repo[id_field])
        else:
            widest_field = max(len(field) for field in non_id_fields)
            for repo in formatted:
                print(repo[id_field])
                for field in fields:
                    if field == id_field:
                        continue
                    print(' - {1:>{0}}: {2}'.format(widest_field, field,
                                                    repo.get(field)))
                print()
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(
        description='Export ES query results to Prometheus.')
    parser.add_argument(
        '-e',
        '--es-cluster',
        default='localhost',
        help=
        'addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)'
    )
    parser.add_argument(
        '--ca-certs',
        help=
        'path to a CA certificate bundle. Can be absolute, or relative to the current working directory. If not specified, SSL certificate verification is disabled.'
    )
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=9206,
        help='port to serve the metrics endpoint on. (default: 9206)')
    parser.add_argument('--basic-user',
                        help='User for authentication. (default: no user)')
    parser.add_argument(
        '--basic-password',
        help='Password for authentication. (default: no password)')
    parser.add_argument(
        '--query-disable',
        action='store_true',
        help=
        'disable query monitoring. Config file does not need to be present if query monitoring is disabled.'
    )
    parser.add_argument(
        '-c',
        '--config-file',
        default='exporter.cfg',
        help=
        'path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)'
    )
    parser.add_argument('--cluster-health-disable',
                        action='store_true',
                        help='disable cluster health monitoring.')
    parser.add_argument(
        '--cluster-health-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for cluster health monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--cluster-health-level',
        default='indices',
        choices=['cluster', 'indices', 'shards'],
        help=
        'level of detail for cluster health monitoring.  (default: indices)')
    parser.add_argument('--nodes-stats-disable',
                        action='store_true',
                        help='disable nodes stats monitoring.')
    parser.add_argument(
        '--nodes-stats-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for nodes stats monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--nodes-stats-metrics',
        type=nodes_stats_metrics_parser,
        help=
        'limit nodes stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.'
    )
    parser.add_argument('--indices-stats-disable',
                        action='store_true',
                        help='disable indices stats monitoring.')
    parser.add_argument(
        '--indices-stats-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for indices stats monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--indices-stats-mode',
        default='cluster',
        choices=['cluster', 'indices'],
        help='detail mode for indices stats monitoring. (default: cluster)')
    parser.add_argument(
        '--indices-stats-metrics',
        type=indices_stats_metrics_parser,
        help=
        'limit indices stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.'
    )
    parser.add_argument(
        '--indices-stats-fields',
        type=indices_stats_fields_parser,
        help=
        'include fielddata info for specific fields. Fields should be separated by commas e.g. indices,fs. Use \'*\' for all.'
    )
    parser.add_argument(
        '--zero-query-gauges-on-exception',
        action='store_true',
        help=
        'set query gauges to zero after got exception on Elasticsearch query. (default: false)'
    )
    parser.add_argument('-j',
                        '--json-logging',
                        action='store_true',
                        help='turn on json logging.')
    parser.add_argument(
        '--log-level',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='detail level to log. (default: INFO)')
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='turn on verbose (DEBUG) logging. Overrides --log-level.')
    args = parser.parse_args()

    if args.basic_user and args.basic_password is None:
        parser.error('Username provided with no password.')
    elif args.basic_user is None and args.basic_password:
        parser.error('Password provided with no username.')
    elif args.basic_user:
        http_auth = (args.basic_user, args.basic_password)
    else:
        http_auth = None

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(
        log_format) if args.json_logging else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(handlers=[log_handler],
                        level=logging.DEBUG if args.verbose else log_level)
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')

    if args.ca_certs:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=True,
                                  ca_certs=args.ca_certs,
                                  http_auth=http_auth)
    else:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=False,
                                  http_auth=http_auth)

    scheduler = None

    if not args.query_disable:
        scheduler = sched.scheduler()

        config = configparser.ConfigParser()
        config.read_file(open(args.config_file))

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                query_interval = config.getfloat(section,
                                                 'QueryIntervalSecs',
                                                 fallback=15)
                query_timeout = config.getfloat(section,
                                                'QueryTimeoutSecs',
                                                fallback=10)
                query_indices = config.get(section,
                                           'QueryIndices',
                                           fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))
                zero_query_gauges_on_exception = args.zero_query_gauges_on_exception

                queries[query_name] = (query_interval, query_timeout,
                                       query_indices, query,
                                       zero_query_gauges_on_exception)

        if queries:
            for name, (interval, timeout, indices, query,
                       zero_query_gauges_on_exception) in queries.items():
                func = partial(run_query, es_client, name, indices, query,
                               timeout, zero_query_gauges_on_exception)
                run_scheduler(scheduler, interval, func)
        else:
            logging.warn('No queries found in config file %s',
                         args.config_file)

    if not args.cluster_health_disable:
        REGISTRY.register(
            ClusterHealthCollector(es_client, args.cluster_health_timeout,
                                   args.cluster_health_level))

    if not args.nodes_stats_disable:
        REGISTRY.register(
            NodesStatsCollector(es_client,
                                args.nodes_stats_timeout,
                                metrics=args.nodes_stats_metrics))

    if not args.indices_stats_disable:
        parse_indices = args.indices_stats_mode == 'indices'
        REGISTRY.register(
            IndicesStatsCollector(es_client,
                                  args.indices_stats_timeout,
                                  parse_indices=parse_indices,
                                  metrics=args.indices_stats_metrics,
                                  fields=args.indices_stats_fields))

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    try:
        if scheduler:
            scheduler.run()
        else:
            while True:
                time.sleep(5)
    except KeyboardInterrupt:
        pass

    shutdown()
Exemple #43
0
def set_cli_logger(**kwargs):
    logging.captureWarnings(True)
    set_logger(name='atmcorr', **kwargs)
Exemple #44
0
"""FEniCS Form Compiler (FFC).

FFC compiles finite element variational forms into C code.

"""

import logging
import pkg_resources

from FIAT import supported_elements

__version__ = pkg_resources.get_distribution("fenics-ffc").version


logging.basicConfig()
logger = logging.getLogger("ffc")
logging.captureWarnings(capture=True)

# Import main function, entry point to script
from ffc.main import main  # noqa: F401

# Import default parameters
from ffc.parameters import default_parameters  # noqa: F401

# Duplicate list of supported elements from FIAT and emove elements from
# list that we don't support or don't trust
supported_elements = sorted(supported_elements.keys())
supported_elements.remove("Argyris")
supported_elements.remove("Hermite")
supported_elements.remove("Morley")
Exemple #45
0
def master(taskAlias, **kwargs):
    '''   The main function '''

    Tabc.pypmanConfigPath = os.path.join(os.getcwd(), 'pypman')
    logging.captureWarnings(True)
    log.info('Welcome to pypman!')
    log.info('Current dir: %s' % os.getcwd())
    log.info('PYPMAN_CONFIG_PATH:%s' % Tabc.pypmanConfigPath)

    mapClasses = {
        'gb': GlobalTask,
        'python': PythonNodeTask,
        'node': PythonNodeTask
    }
    file = os.path.join(Tabc.pypmanConfigPath, 'pypman.yml')
    log.info('Master config file: %s' % file)
    Tabc.masterConfig = util.loadYml(file)
    processInfo = collections.namedtuple('processInfo', 'process tname')
    processInfoList = []
    targetHosts = Tabc.masterConfig['targetHosts']
    for thost in targetHosts:
        Tabc.setHost(thost)
        log.debug(Tabc.host)
        Tabc.setProject(taskAlias)
        log.debug(Tabc.project)
        tList = Tabc.taskList(taskAlias)
        log.info('Task list %s' % tList)
        for tname in tList:
            Tabc.setTask(tname)
            log.info('Running task:%s' % tname)
            executer = {}
            if Tabc.task.fname == "node" or Tabc.task.fname == "python":
                executer = mapClasses[Tabc.task.fname](**kwargs)
            else:
                executer = mapClasses["gb"](**kwargs)  # global task
            log.info('executing task %s on host %s ' %
                     (tname, Tabc.host.cfg['name']))
            if Tabc.task.synch:
                executer.execute_task(
                )  # tasks are executed in series (when user interaction is needed or when one task depends on the other)
            else:
                processInfoList.append(
                    processInfo(
                        executer.execute_task(),
                        tname))  #asynch exec; tasks are executed in parallel
    if not Tabc.task.synch:
        i = 0
        while processInfoList:
            i = i + 1
            log.debug('i= %s' % i)
            j = 0
            for pinf in processInfoList:
                j = j + 1
                log.debug('j=%s' % j)
                rcd = pinf.process.poll()
                log.debug(rcd)
                if rcd is None:  # No process is done, wait a bit and check again.
                    log.info('running task %s and buffering output.' %
                             pinf.tname)
                    time.sleep(
                        0.5
                    )  # too small or too large is not good; optimize case by case
                else:  # Process finished.
                    log.info('%s %s' %
                             (pinf.tname,
                              pinf.process.stdout.read().decode('utf-8')))
                    log.info('Task %s is done.' % pinf.tname)
                    processInfoList.remove(pinf)
                    break
    return
Exemple #46
0
def init(
    prog: Optional[str] = None,
    description: Optional[str] = None,
    add_help: bool = True,
    check_run: bool = False,
    argv: Optional[List[str]] = None,
    **load: bool,
) -> Tuple[argparse.ArgumentParser, List[str], Section]:

    argv = (argv or sys.argv)
    assert len(argv) > 0

    args_parser = argparse.ArgumentParser(prog=(prog or argv[0]),
                                          description=description,
                                          add_help=add_help)
    args_parser.add_argument("-c",
                             "--config",
                             dest="config_path",
                             default="/etc/kvmd/main.yaml",
                             metavar="<file>",
                             type=valid_abs_file,
                             help="Set config file path")
    args_parser.add_argument(
        "-o",
        "--set-options",
        dest="set_options",
        default=[],
        nargs="+",
        help="Override config options list (like sec/sub/opt=value)")
    args_parser.add_argument(
        "-m",
        "--dump-config",
        dest="dump_config",
        action="store_true",
        help="View current configuration (include all overrides)")
    if check_run:
        args_parser.add_argument("--run",
                                 dest="run",
                                 action="store_true",
                                 help="Run the service")
    (options, remaining) = args_parser.parse_known_args(argv)

    if options.dump_config:
        _dump_config(
            _init_config(
                config_path=options.config_path,
                override_options=options.set_options,
                load_auth=True,
                load_hid=True,
                load_atx=True,
                load_msd=True,
                load_gpio=True,
            ))
        raise SystemExit()
    config = _init_config(options.config_path, options.set_options, **load)

    logging.captureWarnings(True)
    logging.config.dictConfig(config.logging)

    if check_run and not options.run:
        raise SystemExit(
            "To prevent accidental startup, you must specify the --run option to start.\n"
            "Try the --help option to find out what this service does.\n"
            "Make sure you understand exactly what you are doing!")

    return (args_parser, remaining, config)
Exemple #47
0
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# PROVIDERS
        self.media_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.media",
            self.media_provider,
        )
        self.startup_timer.intermediate("providers")

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain

        self.trusted_domains = set([self.domain])
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.trusted_domains.add(https_url.hostname)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        # make python warnings go through the logging system
        logging.captureWarnings(capture=True)

        log = logging.getLogger('reddit')

        # when we're a script (paster run) just set up super simple logging
        if self.running_as_script:
            log.setLevel(logging.INFO)
            log.addHandler(logging.StreamHandler())

        # if in debug mode, override the logging level to DEBUG
        if self.debug:
            log.setLevel(logging.DEBUG)

        # attempt to figure out which pool we're in and add that to the
        # LogRecords.
        try:
            with open("/etc/ec2_asg", "r") as f:
                pool = f.read().strip()
            # clean up the pool name since we're putting stuff after "-"
            pool = pool.partition("-")[0]
        except IOError:
            pool = "reddit-app"
        self.log = logging.LoggerAdapter(log, {"pool": pool})

        # set locations
        self.locations = {}

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print >> sys.stderr, ("Warning: g.media_domain == g.domain. " +
                   "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid  = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        locale.setlocale(locale.LC_ALL, self.locale)

        # Pre-calculate ratelimit values
        self.RL_RESET_SECONDS = self.config["RL_RESET_MINUTES"] * 60
        self.RL_MAX_REQS = int(self.config["RL_AVG_REQ_PER_SEC"] *
                                      self.RL_RESET_SECONDS)

        self.RL_OAUTH_RESET_SECONDS = self.config["RL_OAUTH_RESET_MINUTES"] * 60
        self.RL_OAUTH_MAX_REQS = int(self.config["RL_OAUTH_AVG_REQ_PER_SEC"] *
                                     self.RL_OAUTH_RESET_SECONDS)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper,
                                          LiveConfig, LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username,
                                                             zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.secrets = fetch_secrets(self.zookeeper)
            self.throttles = LiveList(self.zookeeper, "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.optionxform = str
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.secrets = extract_secrets(parser)
            self.throttles = tuple()  # immutable since it's not real

        self.startup_timer.intermediate("zookeeper")

        ################# PRIVILEGED USERS
        self.admins = PermissionFilteredEmployeeList(
            self.live_config, type="admin")
        self.sponsors = PermissionFilteredEmployeeList(
            self.live_config, type="sponsor")
        self.employees = PermissionFilteredEmployeeList(
            self.live_config, type="employee")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(
            self.memcaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just used for @memoize results
        memoizecaches = CMemcache(
            self.memoizecaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just for srmember rels
        srmembercaches = CMemcache(
            self.srmembercaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
        )

        ratelimitcaches = CMemcache(
            self.ratelimitcaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
        )

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches,
                                             min_compress_len=50 * 1024,
                                             num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches,
                                    num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates.
        rendercaches = CMemcache(
            self.rendercaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=480,
        )

        # pagecaches hold fully rendered pages
        pagecaches = CMemcache(
            self.pagecaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=1400,
        )

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
                StatsCollectingConnectionPool(
                    keyspace,
                    stats=self.stats,
                    logging_name="main",
                    server_list=self.cassandra_seeds,
                    pool_size=self.cassandra_pool_size,
                    timeout=4,
                    max_retries=3,
                    prefill=False
                ),
        }

        permacache_cf = CassandraCache(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl
        )

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        cache_chains = {}
        localcache_cls = (SelfEmptyingCache if self.running_as_script
                          else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.memcache,
            )
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        cache_chains.update(cache=self.cache)

        if stalecaches:
            self.memoizecache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                memoizecaches,
            )
        else:
            self.memoizecache = MemcacheChain(
                (localcache_cls(), memoizecaches))
        cache_chains.update(memoizecache=self.memoizecache)

        if stalecaches:
            self.srmembercache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                srmembercaches,
            )
        else:
            self.srmembercache = MemcacheChain(
                (localcache_cls(), srmembercaches))
        cache_chains.update(srmembercache=self.srmembercache)

        self.ratelimitcache = MemcacheChain(
                (localcache_cls(), ratelimitcaches))
        cache_chains.update(ratelimitcache=self.ratelimitcache)

        self.rendercache = MemcacheChain((
            localcache_cls(),
            rendercaches,
        ))
        cache_chains.update(rendercache=self.rendercache)

        self.pagecache = MemcacheChain((
            localcache_cls(),
            pagecaches,
        ))
        cache_chains.update(pagecache=self.pagecache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(),))
        cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(),
            permacache_cf,
            memcache=permacache_memcaches,
            lock_factory=self.make_lock,
        )
        cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True,
        )
        cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)
        self.cache_chains = cache_chains

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")
Exemple #48
0
    def __init__(self, config, name=None):
        self._logger = None
        self._is_master = is_master()

        self.timer = Timer()
        self.config = config
        self.save_dir = get_mmf_env(key="save_dir")
        self.log_format = config.training.log_format
        self.time_format = "%Y_%m_%dT%H_%M_%S"
        self.log_filename = "train_"
        self.log_filename += self.timer.get_time_hhmmss(None, format=self.time_format)
        self.log_filename += ".log"

        self.log_folder = os.path.join(self.save_dir, "logs")

        env_log_dir = get_mmf_env(key="log_dir")
        if env_log_dir:
            self.log_folder = env_log_dir

        if not PathManager.exists(self.log_folder):
            PathManager.mkdirs(self.log_folder)

        self.log_filename = os.path.join(self.log_folder, self.log_filename)

        if not self._is_master:
            return
        if self._is_master:
            print("Logging to:", self.log_filename)

        logging.captureWarnings(True)

        if not name:
            name = __name__
        self._logger = logging.getLogger(name)
        self._file_only_logger = logging.getLogger(name)
        self._warnings_logger = logging.getLogger("py.warnings")

        # Set level
        level = config.training.logger_level
        self._logger.setLevel(getattr(logging, level.upper()))
        self._file_only_logger.setLevel(getattr(logging, level.upper()))

        # Capture stdout to logger
        self._stdout_logger = None
        if self.config.training.stdout_capture:
            self._stdout_logger = StreamToLogger(
                logging.getLogger("stdout"), getattr(logging, level.upper())
            )
            sys.stdout = self._stdout_logger

        formatter = logging.Formatter(
            "%(asctime)s | %(levelname)s | %(name)s : %(message)s",
            datefmt="%Y-%m-%dT%H:%M:%S",
        )

        # Add handler to file
        channel = logging.StreamHandler(PathManager.open(self.log_filename, mode="a"))
        channel.setFormatter(formatter)
        self.add_handlers(channel)

        # Add handler to train.log. train.log is full log that is also used
        # by slurm/fbl output
        channel = logging.StreamHandler(
            PathManager.open(os.path.join(self.save_dir, "train.log"), mode="a")
        )
        channel.setFormatter(formatter)
        self.add_handlers(channel)

        # Add handler to stdout. Only when we are not capturing stdout in
        # the logger
        if not self._stdout_logger:
            channel = logging.StreamHandler(sys.stdout)
            channel.setFormatter(formatter)

            self._logger.addHandler(channel)
            self._warnings_logger.addHandler(channel)

        should_not_log = self.config.training.should_not_log
        self.should_log = not should_not_log

        # Single log wrapper map
        self._single_log_map = set()