Esempio n. 1
0
def add_console_handler(level=logging.NOTSET, verbose=True):
    """
    Enable typical logging to the console.
    """

    # get the appropriate formatter
    if verbose:
        formatter = TTY_VerboseFormatter
    else:
        formatter = TTY_ConciseFormatter

    # build a handler
    from sys import stdout
    from cargo.temporal import utc_now

    handler = StreamHandler(stdout)

    handler.setFormatter(formatter(stdout))
    handler.setLevel(level)

    # add it
    logging.root.addHandler(handler)

    log.debug("added log handler for console at %s", utc_now())

    return handler
Esempio n. 2
0
 def __init__(self, filename, mode="a"):
     filename = os.path.abspath(filename)
     StreamHandler.__init__(self, open(filename, mode))
     self.baseFilename = filename
     self.mode = mode
     self._wr = weakref.ref(self, _remove_from_reopenable)
     _reopenable_handlers.append(self._wr)
Esempio n. 3
0
def init_log(log_level=logging.ERROR, logger='qtile', log_path='~/.%s.log'):
    log = getLogger(logger)
    log.setLevel(log_level)

    if log_path:
        try:
            log_path = log_path % logger
        except TypeError:  # Happens if log_path doesn't contain formatters.
            pass
        log_path = os.path.expanduser(log_path)
        handler = logging.FileHandler(log_path)
        handler.setFormatter(
            logging.Formatter(
                "%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s"
            )
        )
        log.addHandler(handler)

    handler = StreamHandler(sys.stdout)
    handler.setFormatter(
        ColorFormatter(
            '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s'
            ' %(funcName)s:%(lineno)d $RESET %(message)s'
        )
    )
    log.addHandler(handler)

    log.warning('Starting %s' % logger.title())
    return log
Esempio n. 4
0
def init(name, debug=True):
    ''' Initializes the named logger for the rest of this program's execution.
    All children loggers will assume this loggers's log level if theirs is not
    set. Suggestion: Put logger.init(__name__, debug) in the top __init__.py
    module of your package.'''

    global log

    if log:
        # Logger already initialized.
        return

    log = logging.getLogger(name)
    handler = StreamHandler()

    plugin_name = name.split('.')[0]

    if debug:
        log.setLevel(logging.DEBUG)
        handler.setFormatter(_getDebugFmt(plugin_name))
    else:
        log.setLevel(logging.INFO)
        handler.setFormatter(_getFmt(plugin_name))

    log.addHandler(handler)

    # Not shown if debug=False
    log.debug("Logger for %s initialized.", plugin_name)
Esempio n. 5
0
def provision(debug: bool=True, log_file_path: str='/build.log'):
    try:
        try:
            basicConfig(
                level=DEBUG,
                format='%(asctime)-15s %(funcName)s %(levelname)s %(message)s',
                filename=log_file_path)
            console = StreamHandler()
            console.setLevel(INFO)
            getLogger().addHandler(console)

            info('Building container image ...')

            yield
        except CalledProcessError as exc:
            error('A subprocess errored. Standard error output: ' +
                  exc.stderr.decode(encoding=getpreferredencoding(False)))
            raise
    except:
        if debug:
            print_exc()
            error(
                'Caught the above exception. '
                'Sleeping now, to help debugging inside container. ')
            sleep(100000)
        else:
            raise
Esempio n. 6
0
 def _setup_logger(self):
     logger = getLogger()
     logger.setLevel(DEBUG)
     handler = StreamHandler()
     handler.setFormatter(
         Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
     logger.addHandler(handler)
Esempio n. 7
0
 def __init__(self):     
     my_logger = getLogger('py4j')
     handler = StreamHandler()
     handler.setLevel(DEBUG)
     my_logger.setLevel(DEBUG)
     my_logger.addHandler(handler)
     
     if glob.glob("./lib/py4j-0.8.1.jar"):
         jarpath = glob.glob("./lib/py4j-0.8.1.jar")[0]
     elif glob.glob("./py4j-0.8.1.jar"):
         jarpath = glob.glob("./py4j-0.8.1")
     else:
         jarpath = None
     
     if glob.glob("./lib/droidnavi-gateway-server*"):
         classpath = glob.glob("./lib/droidnavi-gateway-server*")
     elif glob.glob("./droidnavi-gateway-server*"):
         classpath = glob.glob("./droidnavi-gateway-server*")[0]
     else:
         classpath = None
         
     self.__gateway = JavaGateway.launch_gateway(jarpath=jarpath, classpath=classpath, die_on_exit=True)
     print self.__gateway
     app = QtGui.QApplication(sys.argv)
     mainWindow = MainWindow(self.__gateway)
     sys.exit(app.exec_())
Esempio n. 8
0
def make_file_logger(logfile, maxBytes=int(1e7), backupCount=10):
    """Create a logger that mimics the format of Products.LongRequestLogger"""
    if isinstance(logfile, Logger):
        # The Logger is already set up.
        return logfile

    logger = Logger('slowlog')

    if isinstance(logfile, Handler):
        # The Handler is already set up.
        handler = logfile
    else:
        if hasattr(logfile, 'write'):
            # Write to an open file.
            handler = StreamHandler(logfile)
        else:
            # Create a rotating file handler.
            handler = RotatingFileHandler(logfile,
                                          maxBytes=maxBytes,
                                          backupCount=backupCount)
        fmt = Formatter('%(asctime)s - %(message)s')
        handler.setFormatter(fmt)

    logger.addHandler(handler)
    return logger
Esempio n. 9
0
def setup_logging(log_dir=None, quiet=False):
    """Set up the logging infrastructure.

    If *log_dir* is given, logs will be written to that directory. If *quiet*
    is True, logs below ERROR level will not be written to standard error.
    """
    global _setup_done

    if _setup_done:
        return
    _setup_done = True

    _root.handlers = []  # Remove any handlers already attached
    _root.setLevel("DEBUG")

    stream = StreamHandler()
    stream.setLevel("ERROR" if quiet else "DEBUG")
    stream.setFormatter(_ColorFormatter())
    _root.addHandler(stream)

    if log_dir:
        _setup_file_logging(log_dir)

    if quiet:
        _disable_pywikibot_logging()
Esempio n. 10
0
def init_logging(app):

    loggers = [app.logger, getLogger('sqlalchemy')] #, getLogger('sqlalchemy.engine')]

    handler = StreamHandler()
    handler.setFormatter(Formatter('%(asctime)s %(levelname)s\t%(filename)s:%(lineno)d: %(message)s'))

    # By default set the logger to INFO
    app.logger.setLevel(logging.INFO)

    # By default set the sqlalchemy logger to WARN
    app.logger.setLevel(logging.WARN)

    # default: NOTSET
    # sqlalchemy: WARN

    # CRITICAL 	50
    # ERROR     40
    # WARNING   30
    # INFO      20
    # DEBUG     10
    # NOTSET    0

    for l in loggers:
        l.setLevel(app.config['LOG_LEVEL'])

        # Remove all handlers
        del l.handlers[:]

        # Add the default one
        l.addHandler(handler)

    app.logger.debug("Logging initialized") # with %s level", handler.getLevel())
Esempio n. 11
0
    def init_app(cls, app):
        Config.init_app(app)

        import logging
        from logging.handlers import SMTPHandler
        from logging import StreamHandler

        # email errors to admin
        credentials = None
        secure = None
        if getattr(cls, "MAIL_USERNAME", None) is not None:
            credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
            if getattr(cls, "MAIL_USE_TLS", None):
                secure = ()
        mail_handler = SMTPHandler(
            mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
            fromaddr=cls.MAIL_SENDER,
            toaddrs=[cls.ADMIN],
            subject=cls.MAIL_SUBJECT_PREFIX + u"Application Error",
            credentials=credentials,
            secure=secure,
        )
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

        # log to stderr
        file_handler = StreamHandler()
        file_handler.setLevel(logging.WARNING)
        app.logger.addHandler(file_handler)

        # handle proxy server headers
        from werkzeug.contrib.fixers import ProxyFix

        app.wsgi_app = ProxyFix(app.wsgi_app)
Esempio n. 12
0
def configure_logging(app):
    """
    Sets up application wide logging.

    :param app:
    """
    if app.config.get('LOG_FILE'):
        handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)

        handler.setFormatter(Formatter(
            '%(asctime)s %(levelname)s: %(message)s '
            '[in %(pathname)s:%(lineno)d]'
        ))

        handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))
        app.logger.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))
        app.logger.addHandler(handler)

    stream_handler = StreamHandler()

    stream_handler.setFormatter(Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]'
    ))

    stream_handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))
    app.logger.addHandler(stream_handler)
Esempio n. 13
0
    def init_app(cls, app):
        Config.init_app(app)

        # send error log to admin'email
        import logging
        from logging.handlers import SMTPHandler

        # log to stderr
        from logging import StreamHandler
        file_handler = StreamHandler()
        file_handler.setLevel(logging.WARNING)
        app.logger.addHandler(file_handler)

        secure = None
        if getattr(cls, 'MAIL_USERNAME', None) is not None:
            credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
            if getattr(cls, 'MAIL_USER_TLS', None):
                secure = ()
            mail_hanlder = SMTPHandler(
                mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
                fromaddr=cls.ZBLOG_MAIL_SENDER,
                toaddrs=[cls.ZBLOG_ADMIN],
                subject=cls.ZBLOG_MAIL_SUBJECT_PREFIX + 'Application Error',
                credentials=credentials,
                secure=secure
            )
            mail_hanlder.setLevel(logging.ERROR)
            app.logger.addHandler(mail_hanlder)
Esempio n. 14
0
 def __init__(self, job, level):
     
     self.job = job
     self.level = level
     
     # Create queue through which log records can be sent from various
     # processes and threads to the logging thread.
     self.queue = Queue()
     
     formatter = Formatter('%(asctime)s %(levelname)-8s %(message)s')
     
     # Create handler that writes log messages to the job log file.
     os_utils.create_parent_directory(job.log_file_path)
     file_handler = FileHandler(job.log_file_path, 'w')
     file_handler.setFormatter(formatter)
     
     # Create handler that writes log messages to stderr.
     stderr_handler = StreamHandler()
     stderr_handler.setFormatter(formatter)
     
     self._record_counts_handler = _RecordCountsHandler()
     
     # Create logging listener that will run on its own thread and log
     # messages sent to it via the queue.
     self._listener = QueueListener(
         self.queue, file_handler, stderr_handler,
         self._record_counts_handler)
Esempio n. 15
0
 def init_app(cls,app):
     ProductionConfig.init_app(app)
     import logging
     from logging import StreamHandler
     file_handler=StreamHandler()
     file_handler.setLevel(logging.WARNING)
     app.logger.addHandler(file_handler)
Esempio n. 16
0
def init_log(log_level=logging.WARNING, logger='simulation',
             log_path=None):#TEST
    
    log = getLogger(logger)
    log.setLevel(log_level)

    if log_path:
        try:
            log_path = log_path % logger
        except TypeError:  # Happens if log_path doesn't contain formatters.
            pass
        log_path = os.path.expanduser(log_path)
        with open(log_path, "w"):
            pass
        handler = logging.FileHandler(log_path)
        handler.setFormatter(
            logging.Formatter(
                "%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s"
            )
        )
        log.addHandler(handler)

    handler = StreamHandler(sys.stdout)
    handler.setFormatter(
        ColorFormatter(
            '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s'
            ' %(funcName)s:%(lineno)d $RESET %(message)s'
        )
    )
    log.addHandler(handler)

    log.info('Starting %s' % logger.title())
        
    return log
Esempio n. 17
0
def _enable_logger(logger):
    from logging import StreamHandler
    handler = StreamHandler()
    handler.setLevel(DEBUG)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)
    return logger
Esempio n. 18
0
File: start.py Progetto: LMFDB/lmfdb
def start_logging():
    global logfocus, file_handler
    from lmfdb.utils.config import Configuration
    config = Configuration()
    logging_options = config.get_logging()

    file_handler = FileHandler(logging_options['logfile'])
    file_handler.setLevel(WARNING)

    if 'logfocus' in logging_options:
        logfocus = logging_options['logfocus']
        getLogger(logfocus).setLevel(DEBUG)

    root_logger = getLogger()
    root_logger.setLevel(INFO)
    root_logger.name = "LMFDB"

    formatter = Formatter(LmfdbFormatter.fmtString.split(r'[')[0])
    ch = StreamHandler()
    ch.setFormatter(formatter)
    root_logger.addHandler(ch)

    cfg = config.get_all()
    if "postgresql_options" and "password" in cfg["postgresql_options"]:
        cfg["postgresql_options"]["password"] = "******"
    info("Configuration = {}".format(cfg) )
    check_sage_version()
Esempio n. 19
0
def main():
    # set logger
    formatter = Formatter(
        fmt='%(asctime)s %(levelname)s: %(message)s',
    )

    handler = StreamHandler()
    handler.setLevel(INFO)
    handler.formatter = formatter

    logger.setLevel(INFO)
    logger.addHandler(handler)

    # parse arguments
    parser = argparse.ArgumentParser(
        description=(
            'This script read the downloaded xls.'
        )
    )

    parser.add_argument(
        '-s', '--src',
        action='store',
        nargs=1,
        type=str,
        required=True,
        help='source path of the xls to load',
        metavar='PATH',
    )

    args = parser.parse_args()
    logger.debug(args)

    # read
    read(args.src[0])
Esempio n. 20
0
def setup_logging():
    """
    Setup logging with support for colored output if available.
    """

    from logging import basicConfig, DEBUG

    FORMAT = (
        '  %(log_color)s%(levelname)-8s%(reset)s | '
        '%(log_color)s%(message)s%(reset)s'
    )

    logging_kwargs = {
        'level': DEBUG,
    }

    try:
        from logging import StreamHandler
        from colorlog import ColoredFormatter

        stream = StreamHandler()
        stream.setFormatter(ColoredFormatter(FORMAT))

        logging_kwargs['handlers'] = [stream]

    except ImportError:
        pass

    basicConfig(**logging_kwargs)
Esempio n. 21
0
def setup_logging():
    global log

    progname = basename(argv[0])
    log = getLogger()
    log.setLevel(DEBUG)

    handlers = []
    buildlog_handler = FileHandler(getenv("HOME") + "/build.log")
    buildlog_handler.setFormatter(
        Log8601Formatter("%(asctime)s " + progname + " %(levelname)s " +
                         "%(filename)s:%(lineno)s: %(message)s"))
    handlers.append(buildlog_handler)

    stderr_handler = StreamHandler(stderr)
    stderr_handler.setFormatter(
        Log8601Formatter("%(asctime)s %(name)s %(levelname)s " +
                         "%(filename)s:%(lineno)s: %(message)s"))
    handlers.append(stderr_handler)
    
    if exists("/dev/log"):
        syslog_handler = SysLogHandler(
            address="/dev/log", facility=LOG_LOCAL1)
        syslog_handler.setFormatter(
            Log8601Formatter(progname +
                             " %(asctime)s %(levelname)s: %(message)s"))
        handlers.append(syslog_handler)


    log.addHandler(MultiHandler(handlers))

    getLogger("boto").setLevel(INFO)
    getLogger("boto3").setLevel(INFO)
    getLogger("botocore").setLevel(INFO)
    return
def create_app():
    app = Flask(__name__)

    # Append CORS headers to each request.
    app.after_request(cors_headers)

    # Register views.
    app.register_blueprint(main)
    app.register_blueprint(static)
    app.register_blueprint(status)

    # Use a dummy data generator while Yahoo BOSS access is being sorted out.
    app.register_blueprint(dummy)

    # Log using the mozlog format to stdout.
    handler = StreamHandler(stream=stdout)
    handler.setFormatter(MozLogFormatter(logger_name='universalSearch'))
    handler.setLevel(INFO)
    app.logger_name = 'request.summary'
    app.logger.addHandler(handler)
    app.logger.setLevel(INFO)

    # Use logging middleware.
    if not conf.TESTING:
        app.before_request(request_timer)
        app.after_request(request_summary)

    app.config.update(
        CELERY_BROKER_URL=conf.CELERY_BROKER_URL,
        DEBUG=conf.DEBUG
    )
    return app
Esempio n. 23
0
def log_server(level, queue, filename, mode='w'):
    """Run the logging server.

    This listens to the queue of log messages, and handles them using Python's
    logging handlers.  It prints to stderr, as well as to a specified file, if
    it is given.

    """
    formatter = _get_formatter()
    handlers = []

    sh = StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(level)
    handlers.append(sh)

    if filename:
        fh = FileHandler(filename, mode)
        fh.setFormatter(formatter)
        fh.setLevel(level)
        handlers.append(fh)

    listener = QueueListener(queue, *handlers)
    listener.start()

    # For some reason, queuelisteners run on a separate thread, so now we just
    # "busy wait" until terminated.
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        pass
    finally:
        listener.stop()
Esempio n. 24
0
def main():
    parser = ArgumentParser(description=(__doc__),
                            formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('filename')
    parser.add_argument('--log',
                        default='INFO',
                        help=('Set log level. e.g. DEBUG, INFO, WARN'))
    parser.add_argument('-d', '--debug',
                        action='store_true',
                        help=('Aliased to --log=DEBUG'))
    parser.add_argument('-v', '--version',
                        action='version',
                        version=u"%(prog)s {}".format(VERSION),
                        help=u'Show version and exit.')
    parser.add_argument('-u', '--unacceptable_level',
                        action='store',
                        default='CRITICAL',
                        help=(u'Error level that aborts the check.'))
    args = parser.parse_args()
    if args.debug:
        args.log = 'DEBUG'

    logger = getLogger(__name__)
    handler = StreamHandler()
    logger.setLevel(args.log.upper())
    handler.setLevel(args.log.upper())
    logger.addHandler(handler)

    lint(args, logger)
Esempio n. 25
0
 def watch(self, level=INFO, out=stdout):
     self.stop()
     handler = StreamHandler(out)
     handler.setFormatter(self.formatter)
     self.handlers[self.logger_name] = handler
     self.logger.addHandler(handler)
     self.logger.setLevel(level)
Esempio n. 26
0
def setup_logger(app_name):
    """ Instantiate a logger object

        Usage:
            logger = setup_logger('foo')     # saved as foo.log
            logger.info("Some info message")
            logger.warn("Some warning message")
            logger.error("Some error message")
            ... [for more options see: http://docs.python.org/2/library/logging.html]
    """
    logger = getLogger(app_name)
    logger.setLevel(DEBUG)
    # create file handler which logs even debug messages
    fh = FileHandler(app_name + '.log')
    fh.setLevel(DEBUG)
    # create console handler with a higher log level
    ch = StreamHandler()
    ch.setLevel(ERROR)
    # create formatter and add it to the handlers
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger
Esempio n. 27
0
def get_logger_to_stderr():
    logger = getLogger("kagura.stderr")
    handler = StreamHandler()
    handler.setLevel(DEBUG)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)
    return logger  # it writes on file and stderr
Esempio n. 28
0
File: app.py Progetto: srt32/rodeo
def configure_logging(app):
    """Configure file(info) and email(error) logging."""
    if app.debug or app.testing:
        # Skip debug and test mode. Just check standard output.
        return

    # Set info level on logger, which might be overwritten by handers.
    # Suppress DEBUG messages.
    app.logger.setLevel(INFO)

    info_file_handler = StreamHandler()
    info_file_handler.setLevel(INFO)
    info_file_handler.setFormatter(Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]')
    )
    app.logger.addHandler(info_file_handler)

    # Testing
    # app.logger.info("testing info.")
    # app.logger.warn("testing warn.")
    # app.logger.error("testing error.")

    mail_handler = SMTPHandler(app.config['MAIL_SERVER'],
                               app.config['MAIL_USERNAME'],
                               app.config['ADMINS'],
                               'O_ops... %s failed!' % app.config['PROJECT'],
                               (app.config['MAIL_USERNAME'],
                                app.config['MAIL_PASSWORD']))
    mail_handler.setLevel(ERROR)
    mail_handler.setFormatter(Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]')
    )
    app.logger.addHandler(mail_handler)
Esempio n. 29
0
def initialize_logging():
    initialize_root_logger()
    initialize_conda_logger()

    formatter = Formatter("%(message)s\n")

    stdout = getLogger('stdout')
    stdout.setLevel(INFO)
    stdouthandler = StreamHandler(sys.stdout)
    stdouthandler.setLevel(INFO)
    stdouthandler.setFormatter(formatter)
    stdout.addHandler(stdouthandler)
    stdout.addFilter(TokenURLFilter())
    stdout.propagate = False

    stderr = getLogger('stderr')
    stderr.setLevel(INFO)
    stderrhandler = StreamHandler(sys.stderr)
    stderrhandler.setLevel(INFO)
    stderrhandler.setFormatter(formatter)
    stderr.addHandler(stderrhandler)
    stderr.addFilter(TokenURLFilter())
    stderr.propagate = False

    binstar = getLogger('binstar')
    binstar.setLevel(CRITICAL+1)
    binstar.addHandler(NullHandler())
    binstar.propagate = False
    binstar.disabled = True
Esempio n. 30
0
def bootstrap(configfile, console_app):
    conf = config.Config(configfile)
    
    loglevels = {"debug": DEBUG, "info": INFO, "error": ERROR, "warning": WARNING }
    
    basicConfig(filename=conf.logfile, 
                        format = "[%(asctime)s] %(name)s |%(levelname)s| %(message)s", 
                        datefmt="%Y-%m-%d %H:%M:%S", 
                        level=loglevels[conf.loglevel])
    
    if console_app:
        root_logger = getLogger()
        child_logger = StreamHandler(stdout)
        child_logger.setLevel(DEBUG)
        formatter = Formatter("[%(asctime)s] %(name)s |%(levelname)s| %(message)s", "%Y-%m-%d %H:%M:%S")
        child_logger.setFormatter(formatter)
        root_logger.addHandler(child_logger)
        info("started as console application")
    else:
        info("started as daemon")
    
    info("using config file: '{c}'".format(c=configfile))
    debug("working directory: '{w}', pid: '{pid}'".format(w=getcwd(), pid=getpid()))
    debug("config:\n" + str(conf.get_raw_cfg()))
    signal(SIGTERM, signal_handler)
Esempio n. 31
0
from logging import getLogger, StreamHandler, Formatter, NOTSET


ntfs_logger = getLogger('NTFS')
ntfs_handler = StreamHandler()
ntfs_formatter = Formatter(
    '[%(asctime)s] %(name)s:%(levelname)s:%(message)s')
ntfs_handler.setFormatter(ntfs_formatter)
ntfs_logger.addHandler(ntfs_handler)
ntfs_logger.setLevel(NOTSET)


def set_ntfs_log_level(log_level: int) -> None:
    global ntfs_logger
    ntfs_logger.setLevel(log_level)


__all__ = [set_ntfs_log_level]
Esempio n. 32
0
def configure_logs(app):
    basicConfig(filename='error.log', level=DEBUG)
    logger = getLogger()
    logger.addHandler(StreamHandler())
Esempio n. 33
0
#
# PY3K COMPATIBLE

import json
from logging import getLogger, StreamHandler, DEBUG
from flask import Flask, Blueprint, Response, request as request
from flask.views import MethodView
from traceback import format_exc

from rucio.api import config
from rucio.common.exception import ConfigurationError
from rucio.common.utils import generate_http_error_flask
from rucio.web.rest.flaskapi.v1.common import before_request, after_request, check_accept_header_wrapper_flask

LOGGER = getLogger("rucio.config")
SH = StreamHandler()
SH.setLevel(DEBUG)
LOGGER.addHandler(SH)

URLS = ('/(.+)/(.+)/(.*)', 'OptionSet', '/(.+)/(.+)', 'OptionGetDel', '/(.+)',
        'Section', '', 'Config')


class Config(MethodView):
    """ REST API for full configuration. """
    @check_accept_header_wrapper_flask(['application/json'])
    def get(self):
        """
        List full configuration.

        .. :quickref: Config; List full config.
Esempio n. 34
0
import logging
from logging import getLogger, Formatter, StreamHandler, DEBUG
import os
import sys

import numpy as np
from chainer import cuda, Variable

##############################
# logging

logger = getLogger("logger")
logger.setLevel(DEBUG)

handler = StreamHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(fmt="%(message)s"))
logger.addHandler(handler)

def set_logger(filename):
    if os.path.exists(filename):
        logger.debug("[info] A file %s already exists." % filename)
        do_remove = raw_input("[info] Delete the existing log file? [y/n]: ")
        if (not do_remove.lower().startswith("y")) and (not len(do_remove) == 0):
            logger.debug("[info] Done.")
            sys.exit(0)
    logging.basicConfig(level=DEBUG, format="%(message)s", filename=filename, filemode="w")

##############################
# pre-trained word embeddings
Esempio n. 35
0
from logging import Formatter, StreamHandler, FileHandler
from datetime import datetime
from os import sep


def archivo_logs():
    with open('INFO' + sep + 'LeagueOfProgra.log', 'w',
              encoding='utf-8') as archivo:
        archivo.write(
            '\n ------------------------------------------------\n {}\n\n'.
            format(datetime.now()))


archivo = 'INFO' + sep + 'LeagueOfProgra.log'
formatter = Formatter('%(name)s, linea %(lineno)d]:   %(message)s ')
stream_handler = StreamHandler()
stream_handler.setFormatter(formatter)
file_handler = FileHandler(archivo)
file_handler.setFormatter(formatter)
Esempio n. 36
0
 def setUpClass(cls):
     root_logger = getLogger()
     if not root_logger.hasHandlers():
         root_logger.addHandler(StreamHandler())
Esempio n. 37
0
def setup_logging(level,
                  console_stream=None,
                  log_dir=None,
                  scope=None,
                  log_name=None):
    """Configures logging for a given scope, by default the global scope.

  :param str level: The logging level to enable, must be one of the level names listed here:
                    https://docs.python.org/2/library/logging.html#levels
  :param file console_stream: The stream to use for default (console) logging. If None (default),
                              this will disable console logging.
  :param str log_dir: An optional directory to emit logs files in.  If unspecified, no disk logging
                      will occur.  If supplied, the directory will be created if it does not already
                      exist and all logs will be tee'd to a rolling set of log files in that
                      directory.
  :param str scope: A logging scope to configure.  The scopes are hierarchichal logger names, with
                    The '.' separator providing the scope hierarchy.  By default the root logger is
                    configured.
  :param str log_name: The base name of the log file (defaults to 'pants.log').
  :returns: The full path to the main log file if file logging is configured or else `None`.
  :rtype: str
  """

    # TODO(John Sirois): Consider moving to straight python logging.  The divide between the
    # context/work-unit logging and standard python logging doesn't buy us anything.

    # TODO(John Sirois): Support logging.config.fileConfig so a site can setup fine-grained
    # logging control and we don't need to be the middleman plumbing an option for each python
    # standard logging knob.

    log_filename = None
    log_stream = None

    logger = logging.getLogger(scope)
    for handler in logger.handlers:
        logger.removeHandler(handler)

    if console_stream:
        console_handler = StreamHandler(stream=console_stream)
        console_handler.setFormatter(
            Formatter(fmt='%(levelname)s] %(message)s'))
        console_handler.setLevel(level)
        logger.addHandler(console_handler)

    if log_dir:
        safe_mkdir(log_dir)
        log_filename = os.path.join(log_dir, log_name or 'pants.log')
        file_handler = RotatingFileHandler(log_filename,
                                           maxBytes=10 * 1024 * 1024,
                                           backupCount=4)
        log_stream = file_handler.stream

        class GlogFormatter(Formatter):
            LEVEL_MAP = {
                logging.FATAL: 'F',
                logging.ERROR: 'E',
                logging.WARN: 'W',
                logging.INFO: 'I',
                logging.DEBUG: 'D'
            }

            def format(self, record):
                datetime = time.strftime('%m%d %H:%M:%S',
                                         time.localtime(record.created))
                micros = int((record.created - int(record.created)) * 1e6)
                return '{levelchar}{datetime}.{micros:06d} {process} {filename}:{lineno}] {msg}'.format(
                    levelchar=self.LEVEL_MAP[record.levelno],
                    datetime=datetime,
                    micros=micros,
                    process=record.process,
                    filename=record.filename,
                    lineno=record.lineno,
                    msg=record.getMessage())

        file_handler.setFormatter(GlogFormatter())
        file_handler.setLevel(level)
        logger.addHandler(file_handler)

    logger.setLevel(level)

    # This routes warnings through our loggers instead of straight to raw stderr.
    logging.captureWarnings(True)

    return LoggingSetupResult(log_filename, log_stream)
Esempio n. 38
0
def create_app(conf=None, verbose=0, logfile=None, **kwargs):
    """Initialize the whole application.

    :param conf: Configuration file to use
    :type conf: str

    :param verbose: Set the verbosity level
    :type verbose: int

    :param logfile: Store the logs in the given file
    :type logfile: str

    :param kwargs: Extra options:
                   - gunicorn (bool): Enable gunicorn engine instead of flask's
                   default. Default is True.
                   - unittest (bool): Are we running tests (used for test only).
                   Default is False.
                   - debug (bool): Enable debug mode. Default is False.
                   - cli (bool): Are we running the CLI. Default is False.
                   - reverse_proxy (bool): Are we behind a reverse-proxy.
                   Default is True if gunicorn is True
    :type kwargs: dict

    :returns: A :class:`burpui.server.BUIServer` object
    """
    from flask import g, request, session
    from flask_login import LoginManager
    from flask_bower import Bower
    from flask_babel import gettext
    from .utils import ReverseProxied, lookup_file, is_uuid
    from .security import basic_login_from_request
    from .server import BUIServer as BurpUI
    from .sessions import session_manager
    from .ext.cache import cache
    from .ext.i18n import babel, get_locale
    from .misc.auth.handler import BUIanon

    logger = logging.getLogger('burp-ui')

    gunicorn = kwargs.get('gunicorn', True)
    unittest = kwargs.get('unittest', False)
    debug = kwargs.get('debug', False)
    cli = kwargs.get('cli', False)
    reverse_proxy = kwargs.get('reverse_proxy', gunicorn)
    celery_worker = kwargs.get('celery_worker', False)
    websocket_server = kwargs.get('websocket_server', False)

    # The debug argument used to be a boolean so we keep supporting this format
    if isinstance(verbose, bool):
        if verbose:
            verbose = logging.DEBUG
        else:
            verbose = logging.CRITICAL
    else:
        levels = [
            logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO,
            logging.DEBUG
        ]
        if verbose >= len(levels):
            verbose = len(levels) - 1
        if not verbose:
            verbose = 0
        verbose = levels[verbose]

    if logfile:
        from logging.handlers import RotatingFileHandler
        handler = RotatingFileHandler(logfile,
                                      maxBytes=1024 * 1024 * 100,
                                      backupCount=5)
    else:
        from logging import StreamHandler
        handler = StreamHandler()

    if verbose > logging.DEBUG:
        LOG_FORMAT = ('[%(asctime)s] %(levelname)s in '
                      '%(module)s.%(funcName)s: %(message)s')
    else:
        LOG_FORMAT = ('-' * 27 + '[%(asctime)s]' + '-' * 28 + '\n' +
                      '%(levelname)s in %(module)s.%(funcName)s ' +
                      '[%(pathname)s:%(lineno)d]:\n' + '%(message)s\n' +
                      '-' * 80)

    handler.setLevel(verbose)
    handler.setFormatter(Formatter(LOG_FORMAT))

    logger.setLevel(verbose)

    logger.addHandler(handler)

    logger.debug('conf: {}\n'.format(conf) +
                 'verbose: {}\n'.format(logging.getLevelName(verbose)) +
                 'logfile: {}\n'.format(logfile) +
                 'gunicorn: {}\n'.format(gunicorn) +
                 'debug: {}\n'.format(debug) +
                 'unittest: {}\n'.format(unittest) + 'cli: {}\n'.format(cli) +
                 'reverse_proxy: {}'.format(reverse_proxy))

    if not unittest:  # pragma: no cover
        from ._compat import patch_json
        patch_json()

    # We initialize the core
    app = BurpUI()
    if verbose:
        app.enable_logger()
    app.gunicorn = gunicorn

    app.config['CFG'] = None

    # Some config
    app.config['BUI_CLI'] = cli

    # FIXME: strange behavior when bundling errors
    # app.config['BUNDLE_ERRORS'] = True

    app.config['REMEMBER_COOKIE_HTTPONLY'] = True

    if debug and not gunicorn:  # pragma: no cover
        app.config['DEBUG'] = True and not unittest
        app.config['TESTING'] = True and not unittest

    # Still need to test conf file here because the init function can be called
    # by gunicorn directly
    if conf:
        app.config['CFG'] = lookup_file(conf, guess=False)
    else:
        app.config['CFG'] = lookup_file()

    logger.info('Using configuration: {}'.format(app.config['CFG']))

    app.setup(app.config['CFG'], unittest, cli)

    if debug:
        app.config.setdefault('TEMPLATES_AUTO_RELOAD', True)
        app.config['TEMPLATES_AUTO_RELOAD'] = True
        app.config['DEBUG'] = True

    # manage application secret key
    if app.secret_key and \
            (app.secret_key.lower() == 'none' or
             (app.secret_key.lower() == 'random' and
              gunicorn)):  # pragma: no cover
        logger.critical('Your setup is not secure! Please consider setting a'
                        ' secret key in your configuration file')
        app.secret_key = 'Burp-UI'
    if not app.secret_key or app.secret_key.lower() == 'random':
        from base64 import b64encode
        app.secret_key = b64encode(os.urandom(256))

    app.wsgi_app = ReverseProxied(app.wsgi_app, app)

    # Manage reverse_proxy special tricks & improvements
    if reverse_proxy:  # pragma: no cover
        from werkzeug.contrib.fixers import ProxyFix

        app.wsgi_app = ProxyFix(app.wsgi_app)

    if app.storage and app.storage.lower() == 'redis':
        try:
            # Session setup
            if not app.session_db or \
                    str(app.session_db).lower() not in ['none', 'false']:
                from redis import Redis
                from .ext.session import sess
                host, port, pwd = get_redis_server(app)
                db = 0
                if app.session_db and \
                        str(app.session_db).lower() not \
                        in ['redis', 'default', 'true']:
                    try:  # pragma: no cover
                        (_, _, pwd, host, port, db) = \
                            parse_db_setting(app.session_db)
                    except ValueError as exp:
                        logger.warning(str(exp))
                try:
                    db = int(db)
                except ValueError:
                    db = 0
                logger.debug(
                    'SESSION: Using redis://guest:****@{}:{}/{}'.format(
                        host, port, db))
                red = Redis(host=host, port=port, db=db, password=pwd)
                app.config['WITH_SRV_SESSION'] = True
                app.config['SESSION_TYPE'] = 'redis'
                app.config['SESSION_REDIS'] = red
                app.config['SESSION_USE_SIGNER'] = app.secret_key is not None
                app.config['SESSION_PERMANENT'] = False
                sess.init_app(app)
                session_manager.backend = red
        except Exception as exp:  # pragma: no cover
            logger.warning('Unable to initialize session: {}'.format(str(exp)))
            app.config['WITH_SRV_SESSION'] = False
        try:
            # Cache setup
            if not app.cache_db or \
                    str(app.cache_db).lower() not in ['none', 'false']:
                host, port, pwd = get_redis_server(app)
                db = 1
                if app.cache_db and \
                        str(app.cache_db).lower() not \
                        in ['redis', 'default', 'true']:
                    try:  # pragma: no cover
                        (_, _, pwd, host, port, db) = \
                            parse_db_setting(app.cache_db)
                    except ValueError as exp:
                        logger.warning(str(exp))
                try:
                    db = int(db)
                except ValueError:
                    db = 1
                logger.debug('CACHE: Using redis://guest:****@{}:{}/{}'.format(
                    host, port, db))
                cache.init_app(app,
                               config={
                                   'CACHE_TYPE': 'redis',
                                   'CACHE_REDIS_HOST': host,
                                   'CACHE_REDIS_PORT': port,
                                   'CACHE_REDIS_PASSWORD': pwd,
                                   'CACHE_REDIS_DB': db
                               })
                # clear cache at startup in case we removed or added servers
                with app.app_context():
                    cache.clear()
            else:  # pragma: no cover
                cache.init_app(app)
        except Exception as exp:  # pragma: no cover
            logger.warning('Unable to initialize cache: {}'.format(str(exp)))
            cache.init_app(app)
        try:
            # Limiter setup
            if app.limiter and str(app.limiter).lower() not \
                    in ['none', 'false']:  # pragma: no cover
                from .ext.limit import limiter
                app.config['RATELIMIT_HEADERS_ENABLED'] = True
                if app.limiter and str(app.limiter).lower() not \
                        in ['default', 'redis', 'true']:
                    app.config['RATELIMIT_STORAGE_URL'] = app.limiter
                else:
                    db = 3
                    host, port, pwd = get_redis_server(app)
                    if pwd:
                        conn = 'redis://*****:*****@{}:{}/{}'.format(
                            pwd, host, port, db)
                    else:
                        conn = 'redis://{}:{}/{}'.format(host, port, db)
                    app.config['RATELIMIT_STORAGE_URL'] = conn

                (_, _, pwd, host, port,
                 db) = parse_db_setting(app.config['RATELIMIT_STORAGE_URL'])

                logger.debug(
                    'LIMITER: Using redis://guest:****@{}:{}/{}'.format(
                        host, port, db))
                limiter.init_app(app)
                app.config['WITH_LIMIT'] = True
        except ImportError:  # pragma: no cover
            logger.warning('Unable to load limiter. Did you run \'pip install '
                           'flask-limiter\'?')
        except Exception as exp:  # pragma: no cover
            logger.warning('Unable to initialize limiter: {}'.format(str(exp)))
    else:
        cache.init_app(app)

    # Initialize i18n
    babel.init_app(app)

    # Create SQLAlchemy if enabled
    create_db(app, cli, unittest, celery_worker=celery_worker)

    if not celery_worker:
        from .api import api, apibp
        from .routes import view, mypad

        app.jinja_env.globals.update(
            isinstance=isinstance,
            list=list,
            mypad=mypad,
            version_id='{}-{}'.format(__version__, __release__),
        )

        # We initialize the API
        api.load_all()
        app.register_blueprint(apibp)

        # Then we load our routes
        app.register_blueprint(view)

        # Initialize Bower ext
        app.config.setdefault('BOWER_COMPONENTS_ROOT',
                              os.path.join('static', 'vendor'))
        app.config.setdefault('BOWER_REPLACE_URL_FOR', True)
        bower = Bower()
        bower.init_app(app)

    # Order of the initialization matters!
    # The websocket must be configured prior to the celery worker for instance

    # Initialize Session Manager
    session_manager.init_app(app)

    # And the login_manager
    app.login_manager = LoginManager()
    app.login_manager.anonymous_user = BUIanon
    app.login_manager.login_view = 'view.login'
    app.login_manager.login_message_category = 'info'
    app.login_manager.session_protection = 'strong'
    # This is just to have the strings in the .po files
    app.login_manager.login_message = gettext(
        'Please log in to access this page.')
    app.login_manager.needs_refresh_message = gettext(
        'Please reauthenticate to access this page.')
    # This will be called at runtime and will then translate the strings
    app.login_manager.localize_callback = gettext
    app.login_manager.init_app(app)

    # Create WebSocket server
    create_websocket(app, websocket_server, celery_worker, gunicorn, cli)

    # Create celery app if enabled
    create_celery(app, warn=False)

    def _check_session(user, request, api=False):
        """Check if the session is in the db"""
        if user and not session_manager.session_in_db():  # pragma: no cover
            login = getattr(user, 'name', None)
            if login and not is_uuid(login):
                remember = session.get('persistent', False)
                if not remember:
                    from flask_login import decode_cookie
                    remember_cookie = request.cookies.get(
                        app.config.get('REMEMBER_COOKIE_NAME'), False)
                    # check if the remember_cookie is legit
                    if remember_cookie and decode_cookie(remember_cookie):
                        remember = True
                session_manager.store_session(
                    login, request.remote_addr,
                    request.headers.get('User-Agent'), remember, api)
            elif login:
                app.uhandler.remove(login)

    @app.before_request
    def setup_request():
        g.locale = get_locale()
        g.date_format = session.get('dateFormat', 'llll')
        # make sure to store secure cookie if required
        if app.scookie:
            criteria = [
                request.is_secure,
                request.headers.get('X-Forwarded-Proto', 'http') == 'https'
            ]
            app.config['SESSION_COOKIE_SECURE'] = \
                app.config['REMEMBER_COOKIE_SECURE'] = any(criteria)

    @app.login_manager.user_loader
    def load_user(userid):
        """User loader callback"""
        if app.auth != 'none':
            user = app.uhandler.user(userid)
            if not user:
                return None
            if 'X-Language' in request.headers:
                language = request.headers.get('X-Language')
                user.language = language
                session['language'] = language
            if '_id' not in session:
                from flask_login import login_user
                # if _id is not in session, it means we loaded the user from
                # cache/db using the remember cookie so we need to login it
                login_user(user, remember=user.is_authenticated, fresh=False)
            _check_session(user, request)
            return user
        return None

    @app.login_manager.request_loader
    def load_user_from_request(request):
        """User loader from request callback"""
        if app.auth != 'none':
            user = basic_login_from_request(request, app)
            _check_session(user, request, True)
            return user

    @app.after_request
    def after_request(response):
        if getattr(g, 'basic_session', False):
            if session_manager.invalidate_current_session():
                session_manager.delete_session()
        return response

    return app
Esempio n. 39
0
    def get_logger(self):
        """
        get_logger(self)

        Returns the logger object

        Returns
        -------
        logging.getLogger object
                    Logger object

        Examples
        --------
        >>> get_logger()

        """
        global loggers

        if loggers.get(self.name):
            return loggers.get(self.name)
        else:

            # Setup logging

            # create formatter to be used by multiple handlers
            formatter = logging.Formatter('%(asctime)s | '
                                          '%(pathname)s:%(lineno)d | '
                                          '%(funcName)s | '
                                          '%(levelname)s | '
                                          '%(message)s')

            # Create console / stream handler to write to Console
            stream_log_handler = StreamHandler()
            # Set log level to DEBUG
            stream_log_handler.setLevel(logging.DEBUG)
            # Set formatter to the console handler
            stream_log_handler.setFormatter(formatter)

            # Create file handler to write to a log file
            file_log_handler = RotatingFileHandler('logs/app.log',
                                                   maxBytes=10000000,
                                                   backupCount=10)
            # Set log level to DEBUG
            file_log_handler.setLevel(logging.DEBUG)
            # Set formatter to the file handler
            file_log_handler.setFormatter(formatter)

            # create logger
            logger = logging.getLogger(self.name)
            logger.setLevel(logging.DEBUG)

            # add console log handler to the logger
            logger.addHandler(stream_log_handler)

            # add file log handler to the logger
            logger.addHandler(file_log_handler)

            loggers[self.name] = logger

            # return the logger object
            return logger
Esempio n. 40
0
# coding: utf-8

import sys
from os.path import dirname
from logging import getLogger, StreamHandler

from .app import create_app
from .settings import BACKEND_CONF, BACKEND_NAME
from .backends.store import Backends

handler = StreamHandler(sys.stdout)
logger = getLogger(dirname(__name__))
logger.propagate = True
logger.addHandler(handler)

store = Backends.get_store(BACKEND_NAME, **BACKEND_CONF)
app = create_app(store)
Esempio n. 41
0
 def emit(self, record):
     if app.debug and _should_log_for(app, 'debug'):
         StreamHandler.emit(self, record)
Esempio n. 42
0
# Flag for feature extracting. When False, we finetune the whole model,
#   when True we only update the reshaped layer params
feature_extract = True

# use GPU ID please check `nvidia-smi` command
gpu_id = 0

# output dir log, graph etc
output_dir = "../outputs/{}_{}_{}".format(model_name, batch_size, num_epochs)
os.mkdir(output_dir)

# root rogger setting
logger = getLogger()
formatter = Formatter(
    '%(asctime)s %(name)s %(funcName)s [%(levelname)s]: %(message)s')
handler = StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
file_handler = FileHandler(filename=output_dir + "/train.log")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(DEBUG)

# log torch versions
logger.info("PyTorch Version: {}".format(torch.__version__))
logger.info("Torchvision Version: {}".format(torchvision.__version__))

# Detect if we have a GPU available
device = torch.device(
    "cuda:{}".format(gpu_id) if torch.cuda.is_available() else "cpu")
Esempio n. 43
0
from lib.python.ui.js_plot_dialog import *

import urllib.request
blocklyURL = "file:" + urllib.request.pathname2url(
    os.path.join(currentDirPath, "lib", "editor", "index.html"))

# Log file setting.
# import logging
# logging.basicConfig(filename='MainWindow.log', level=logging.DEBUG)

# Log output setting.
# If handler = StreamHandler(), log will output into StandardOutput.
from logging import getLogger, NullHandler, StreamHandler, DEBUG
logger = getLogger(__name__)
handler = NullHandler() if True else StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)

import math


def nCr(n, r):
    f = math.factorial
    if r > n:
        return 1
    else:
        return f(n) / f(r) / f(n - r)

Esempio n. 44
0
 def emit(self, record):
     if not app.debug and _should_log_for(app, 'production'):
         StreamHandler.emit(self, record)
Esempio n. 45
0
File: tests.py Progetto: michelp/pq
from psycopg2cffi.pool import ThreadedConnectionPool
from psycopg2cffi import ProgrammingError
from psycopg2cffi.extensions import cursor
from psycopg2cffi.extras import NamedTupleCursor

from pq import (
    PQ,
    Queue,
)

from pq.tasks import Queue as TaskQueue

# Set up logging such that we can quickly enable logging for a
# particular queue instance (via the `logging` flag).
getLogger('pq').setLevel(INFO)
getLogger('pq').addHandler(StreamHandler())

# Make rescheduling test suite less verbose
getLogger('pq.tasks').setLevel(CRITICAL)


def mean(values):
    return sum(values) / float(len(values))


def stdev(values, c):
    n = len(values)
    ss = sum((x - c)**2 for x in values)
    ss -= sum((x - c) for x in values)**2 / n
    return sqrt(ss / (n - 1))
Esempio n. 46
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import enum
import logging
from logging import StreamHandler

handler = StreamHandler()
logger = logging.getLogger(__name__)


class Level(enum.Enum):
    FATAL = logging.FATAL
    ERROR = logging.ERROR
    WARN = logging.WARN
    INFO = logging.INFO
    DEBUG = logging.DEBUG


class ColoredFormatter(logging.Formatter):
    RESET_SEQ = "\033[0m"
    COLOR_SEQ = "\033[1;%dm"
    COLOR_START = "COLOR_START"
    COLOR_END = "COLOR_END"
    BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
    COLORS = {
        'ERROR': RED,
        'WARNING': YELLOW,
        'INFO': WHITE,
        'DEBUG': CYAN,
    }
OUT_DIR = '../input/custom/'
OUT_TRAIN_FILE = '201808231336_train_tsne.csv'
OUT_TEST_FILE = '201808231336_test_tsne.csv'

KEY_COL = 'SK_ID_CURR'
TGT_COL = 'TARGET'

LOG_DIR = '../log/'
LOG_FILE = 'tsne.py.log'

# Log Settings
logger = getLogger(__name__)
log_fmt = Formatter(
    '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s'
)
log_handler = StreamHandler()
log_handler.setLevel(INFO)
log_handler.setFormatter(log_fmt)
logger.addHandler(log_handler)
log_handler = FileHandler(LOG_DIR + LOG_FILE, 'a')
log_handler.setFormatter(log_fmt)
logger.setLevel(DEBUG)
logger.addHandler(log_handler)

logger.info('start tsne.py')

logger.info('--- load data ---')
logger.info('loading: ' + IN_DIR + IN_TRAIN_FILE)
df_train = pd.read_csv(IN_DIR + IN_TRAIN_FILE)
train_rows = len(df_train)
logger.info('df_train: {}'.format(df_train.shape))
    logger.info("Output LP file")
    model.writeLP("santa2019_pulp.lp")

    logger.info("optimization starts")

    solver = COIN_CMD(presolve=1, threads=8, maxSeconds=3600, msg=1)

    model.solve(solver)


if __name__ == "__main__":
    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        "%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s "
    )

    handler = StreamHandler()
    handler.setLevel("INFO")
    handler.setFormatter(log_fmt)
    logger.setLevel("INFO")
    logger.addHandler(handler)

    handler = FileHandler(os.path.basename(os.path.abspath(__file__)) + ".log", "a")
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    main()
Esempio n. 49
0
 def get_handler(self) -> Handler:
     return StreamHandler(sys.stdout)
Esempio n. 50
0
from ...const_value import TW_CONSUMER_KEY, TW_CONSUMER_SECRET
from ...boto3 import add_json_to_dynamo_tweet_json
from .twitter_image import TwitterImage, convert_twitter
from .base64_util import base64_encode_str
from ...const_value import IS_DEBUG
import requests
from typing import Any, Dict, Optional, cast, List
import os
import json
from logging import LogRecord, getLogger, StreamHandler, DEBUG, INFO

logger = getLogger(__name__)  # 以降、このファイルでログが出たということがはっきりする。
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False


def mkdir_notexists(dirs: List[str]):
    for dirpath in dirs:
        if not os.path.exists(dirpath):
            os.makedirs(dirpath)
            logger.info(f"[mkdir_noexists] mkdir {dirpath}")


if not IS_DEBUG:
    logger.setLevel(INFO)

# const
TOKEN_FILENAME: str = ".token.json"
import numpy as np
from logging import getLogger, DEBUG, StreamHandler

from edit_img import image_processing

logger = getLogger(__name__)
logger.setLevel(DEBUG)
StreamHandler().setLevel(DEBUG)
logger.addHandler(StreamHandler())


def noise(img, noise_param=10000, debug=False):
    logger.debug("generating noise")
    row, col = img.shape
    logger.debug("noise" + str(noise_param / (row * col) * 100) + "%")

    # 白
    pts_x = np.random.randint(0, col - 1, noise_param)
    pts_y = np.random.randint(0, row - 1, noise_param)
    img[(pts_y, pts_x)] = 255  # y,xの順番になることに注意

    # 黒
    pts_x = np.random.randint(0, col - 1, noise_param)
    pts_y = np.random.randint(0, row - 1, noise_param)
    img[(pts_y, pts_x)] = 0
    if debug:
        image_processing.image_show(img)
    return img


def trim(img, left=0., right=1., top=0., bottom=1., debug=False):
Esempio n. 52
0
def get_logger(outdir):
    file_name = sys.argv[0]

    os.makedirs(outdir, exist_ok=True)

    logger = getLogger(__name__)
    log_fmt = Formatter(LOG_STR)
    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.addHandler(handler)

    handler = FileHandler('{0}/{1}.log'.format(outdir, file_name), 'a')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    return logger
Esempio n. 53
0
import newrelic.agent
import sys
import os
import logging
import httplib
import traceback
from flask import jsonify
from logging import Formatter, StreamHandler
from logging.handlers import SysLogHandler

if not app.config.get('TESTING'):
    newrelic.agent.initialize('newrelic.ini')
os.environ['TZ'] = 'US/Eastern'

# Initialize logging
streamhandler = StreamHandler(sys.stdout)
sysloghandler = SysLogHandler(address=(PAPERTRAIL_URL, PAPERTRAIL_PORT))
formatter = Formatter(LOG_FORMAT)
streamhandler.setFormatter(formatter)
sysloghandler.setFormatter(formatter)
app.logger.addHandler(sysloghandler)
app.logger.addHandler(streamhandler)
app.logger.setLevel(logging.DEBUG)


def get_credentials(request_info):
    """Get credentials from request."""
    try:
        return getcredentials.get_credentials(request_info.get('args'))
    except ValueError as err:
        print "ValueError in credentials: " + err.message
Esempio n. 54
0
COPY_TAIL = PERIODS_TAIL + "/copy"

# HTTP communication defaults
DEFAULT_HOST = "http://127.0.0.1:5000"
DEFAULT_TIMEOUT = 10


def default_period_name():
    """The current year as string (format YYYY)."""
    return str(dt.today().year)


# Set up the package logger
LOGGER = getLogger(__package__)
LOGGER.setLevel(DEBUG)
_stream_handler = StreamHandler()
_stream_handler.setLevel(INFO)
LOGGER.addHandler(_stream_handler)
FORMATTER = Formatter(
    fmt='%(levelname)s %(asctime)s %(name)s:%(lineno)d %(message)s')


def init_logger(name):
    """Set up module logger. Library loggers are assigned the package logger as
    parent. Any records are propagated to the parent package logger.
    """
    logger = getLogger(name)
    logger.setLevel(DEBUG)

    if logger.parent.name == "root":
        # Library logger; probably has NullHandler
Esempio n. 55
0
    def run(self):
        cls_name = self._device_def['cls'].__name__

        # If the multiprocessing start method is fork, the child
        # process gets a copy of the root logger.  The copy is
        # configured to sign the messages as "device-server", and
        # write to the main log file and stderr.  We remove those
        # handlers so that this DeviceServer is logged to a separate
        # file and the messages are signed with the device name.
        root_logger = logging.getLogger()
        # Get a new list of handlers because otherwise we are
        # iterating over the same list as removeHandler().
        for handler in list(root_logger.handlers):
            root_logger.removeHandler(handler)

        if __debug__:
            root_logger.setLevel(logging.DEBUG)
        else:
            root_logger.setLevel(logging.INFO)

        # Later, we'll log to one file per server, with a filename
        # based on a unique identifier for the device. Some devices
        # don't have UIDs available until after initialization, so
        # log to stderr until then.
        stderr_handler = StreamHandler(sys.stderr)
        stderr_handler.setFormatter(_create_log_formatter(cls_name))
        root_logger.addHandler(stderr_handler)
        root_logger.debug("Debugging messages on.")

        root_logger.addFilter(Filter())

        self._device = self._device_def['cls'](**self._device_def['conf'])
        while not self.exit_event.is_set():
            try:
                self._device.initialize()
            except Exception as e:
                _logger.info("Failed to start device. Retrying in 5s.",
                             exc_info=e)
                time.sleep(5)
            else:
                break
        if (isinstance(self._device, microscope.devices.FloatingDeviceMixin)
                and len(self._id_to_host) > 1):
            uid = str(self._device.get_id())
            if uid not in self._id_to_host or uid not in self._id_to_port:
                raise Exception("Host or port not found for device %s" %
                                (uid, ))
            host = self._id_to_host[uid]
            port = self._id_to_port[uid]
        else:
            host = self._device_def['host']
            port = self._device_def['port']
        pyro_daemon = Pyro4.Daemon(port=port, host=host)

        log_handler = RotatingFileHandler('%s_%s_%s.log' %
                                          (cls_name, host, port))
        log_handler.setFormatter(_create_log_formatter(cls_name))
        root_logger.addHandler(log_handler)

        _logger.info('Device initialized; starting daemon.')
        _register_device(pyro_daemon, self._device, obj_id=cls_name)

        # Run the Pyro daemon in a separate thread so that we can do
        # clean shutdown under Windows.
        pyro_thread = Thread(target=pyro_daemon.requestLoop)
        pyro_thread.daemon = True
        pyro_thread.start()
        _logger.info('Serving %s', pyro_daemon.uriFor(self._device))
        if isinstance(self._device, microscope.devices.FloatingDeviceMixin):
            _logger.info('Device UID on port %s is %s', port,
                         self._device.get_id())

        # Wait for termination event. We should just be able to call
        # wait() on the exit_event, but this causes issues with locks
        # in multiprocessing - see http://bugs.python.org/issue30975 .
        while self.exit_event and not self.exit_event.is_set():
            # This tread waits for the termination event.
            try:
                time.sleep(5)
            except (KeyboardInterrupt, IOError):
                pass
        pyro_daemon.shutdown()
        pyro_thread.join()
        self._device.shutdown()
Esempio n. 56
0
import logging.config
import mysql.connector

#ロギング設定
logger = getLogger(__name__)
loggerJSON = getLogger(__name__)
if not logger.handlers and not loggerJSON.handlers:
    fileHandler = FileHandler('twitter.log')
    fileHandlerJSON = FileHandler('json.log')
    formatter = Formatter(
        '%(asctime)s [%(levelname)s] [%(filename)s: %(funcName)s: %(lineno)d] %(message)s'
    )

    fileHandler.setFormatter(formatter)
    fileHandler.setLevel(INFO)
    streamHander = StreamHandler()
    streamHander.setLevel(INFO)
    logger.setLevel(INFO)
    logger.addHandler(fileHandler)
    logger.addHandler(streamHander)

    fileHandlerJSON.setFormatter(formatter)
    fileHandlerJSON.setLevel(DEBUG)
    loggerJSON.setLevel(DEBUG)
    loggerJSON.addHandler(fileHandlerJSON)

#Twitter接続キー
CK = 'SnrvKbQoNiMG3bOVP3SpaxcLZ'  # Consumer Key
CS = 'pjWoIENdJ0IPBdO0nyhRVAS9QKKnKDpAnhT0fGOfal7a0kM8ec'  # Consumer Secret
AT = '982311862061576192-DXKZvi1rd5mH9ovRbEX1A66dLUJV7eO'  # Access Token
AS = '40WHp4iPnRtwGN5StqsQrLdIbAMsbVu1WY2QXHSjeEwrJ'  # Accesss Token Secert
Esempio n. 57
0
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"

# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
FN_SOLUTION_CSV = "/data/{}.csv".format(MODEL_NAME)

# Logger
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)

if __name__ == '__main__':
    logger.addHandler(handler)
    logger.addHandler(fh_handler)


def directory_name_to_area_id(datapath):
    """
    Directory name to AOI number
Esempio n. 58
0
import json
import glob
import os
from logging import getLogger, StreamHandler, Formatter, DEBUG, INFO

logger = getLogger(__name__)
logger.setLevel(DEBUG)
handler = StreamHandler()
formatter = Formatter('{} - %(levelname)s - %(message)s'.format(__file__))
handler.setFormatter(formatter)
handler.setLevel(INFO)
logger.addHandler(handler)

# should include '/' at the end.
GITHUB_LINK = 'OpenJij/OpenJijTutorial/blob/master/'


def add_google_colab_link(nb_name, github_link, output_nb):
    with open(nb_name, "r", encoding='utf-8') as f:
        nb_json = json.load(f)

    # Check if the second cell has a Colab link
    def check_colab_link(cell):
        if cell['cell_type'] != 'markdown':
            return False
        elif '[![Open in Colab]' in cell['source'][
                0] or '<a href="https://colab' in cell['source'][0]:
            return True
        else:
            return False
Esempio n. 59
0
            level = "debug"
        elif record.levelno == WARNING:
            level = "warning"
        elif record.levelno == ERROR:
            level = "error"
        trans = str.maketrans({"%": "%25", "\n": "%0A", "\r": "%0D"})
        return f"::{level} ::{message.translate(trans)}"

    def _fmt_fallback(self, record: LogRecord) -> str:
        return Formatter.format(self, record)

    def format(self, record: LogRecord) -> str:
        if self._env == "actions":
            return self._fmt_actions(record)
        else:
            return self._fmt_fallback(record)


logger = getLogger("tagbot")
_ENV = "local"
if os.getenv("AWS_LAMBDA_FUNCTION_NAME"):
    _ENV = "lambda"
elif os.getenv("GITHUB_ACTIONS") == "true":
    _ENV = "actions"
log_handler = StreamHandler()
logger.addHandler(log_handler)
log_handler.setFormatter(LogFormatter(_ENV))
if _ENV != "local":
    log_handler.setStream(sys.stdout)
    logger.setLevel(DEBUG)
Esempio n. 60
0
import os
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
#
import twitter
# pylint: disable=E0401
from serializer import Serializer
from fileutils import FileUtils
from events import SimpleEvent
import download
import ranking
# pylint: enable=E0401
# pylint: disable=C0103
# console output
logger = getLogger('myapp.tweetbot')
handler = StreamHandler()
handler.setLevel(DEBUG)
handler.setFormatter(logging.Formatter('%(threadName)s:%(message)s'))
logger.setLevel(DEBUG)
logger.addHandler(handler)


class TweetBot(object):
    """
        tweetbot main code.
    """
    def __init__(self, config):
        self.api = None
        self.dtnow = datetime.now()
        self.fillspace = 0
        self.isTweet = True