def start(self):
        from logging import getLogger, FileHandler, Formatter, DEBUG
        self.log = getLogger(self.name)
        self.log.setLevel(DEBUG)

        formatter = Formatter(
            '%(asctime)s [%(process)d:%(threadName)s] %(levelname)-8s %(name)s:  %(message)s')
        # Unique log handler (single file)
        handler = FileHandler(self.uniquefile, "w")
        handler.setLevel(DEBUG)
        handler.setFormatter(formatter)
        self.log.addHandler(handler)

        # If you suspect that the diff stuff isn't working, un comment the next
        # line.  You should see this show up once per-process.
        # self.log.info("Here is a line that should only be in the first output.")

        # Setup output used for testing
        handler = self.getLogHandler(self.sharedfile)
        handler.setLevel(DEBUG)
        handler.setFormatter(formatter)
        self.log.addHandler(handler)

        # If this ever becomes a real "Thread", then remove this line:
        self.run()
Beispiel #2
0
def addHandler(handler=None, stream=None, filename=None, filemode='a',
               format=None, datefmt=None, level=None, max_level=None,
               filters=(), logger=None):
    """stream, filename, filemode, format, datefmt: as per logging.basicConfig

       handler: use a precreated handler instead of creating a new one
       logging: logging to add the handler to (uses root logging if none specified)
       filters: an iterable of filters to add to the handler
       level: only messages of this level and above will be processed
       max_level: only messages of this level and below will be processed
    """
    # Create the handler if one hasn't been passed in
    if handler is None:
        if filename is not None:
            handler = FileHandler(filename, filemode)
        else:
            handler = StreamHandler(stream)
    # Set up the formatting of the log messages
    # New API, so it can default to str.format instead of %-formatting
    formatter = Formatter(format, datefmt)
    handler.setFormatter(formatter)
    # Set up filtering of which messages to handle
    if level is not None:
        handler.setLevel(level)
    if max_level is not None:
        handler.addFilter(LowPassFilter(max_level))
    for filter in filters:
        handler.addFilter(filter)
    # Add the fully configured handler to the specified logging
    if logger is None:
        logger = getLogger()
    logger.addHandler(handler)
    return handler
Beispiel #3
0
	def setup_logger(self, fname):
		self.logger = logging.getLogger()
		self.logger.setLevel(logging.INFO)
		fhandler = FileHandler(filename=fname)
		formatter = logging.Formatter('%(asctime)-15s %(message)s')
		fhandler.setFormatter(formatter)
		self.logger.addHandler(fhandler)
Beispiel #4
0
def get_logger(log_name = 'default.log', log_level = DEBUG):
    formatter = Formatter('[%(asctime)s][%(process)d][%(filename)s:%(lineno)s][%(levelname)s]: %(message)s')
    logger = getLogger('%s.%s' % (log_name.replace('/', '.'),
                                                time.time()))
    logger.handlers = []
    logger.setLevel(DEBUG)
    logger.propagate = False

    console = StreamHandler()
    console.setFormatter(formatter)
    console.setLevel(WARN)
    logger.addHandler(console)

    logfile = os.path.dirname(os.path.abspath(sys.argv[0]))
    logfile = os.path.join(logfile, log_name)
    logfiledebug = FileHandler(filename = logfile, mode='a')
    logfiledebug.setFormatter(formatter)
    logfiledebug.setLevel(log_level)
    logger.addHandler(logfiledebug)

    def _logger_die(logger, msg):
        logger.error(msg)
        raise AssertionError(msg)

    logger.die = lambda msg: _logger_die(logger, msg)

    return logger
Beispiel #5
0
def configure_loggers(log, verbosity, log_file, log_verbosity):
    LOGFMT_CONSOLE = (
        "[%(asctime)s] %(name)-10s %(levelname)-7s in %(module)s.%(funcName)s()," " line %(lineno)d\n\t%(message)s"
    )

    LOGFMT_FILE = (
        "[%(asctime)s] [%(process)d]%(name)-10s %(levelname)-7s in %(module)s.%(funcName)s(),"
        " line %(lineno)d\n\t%(message)s"
    )

    # Configure root logger to log to stdout
    logging.basicConfig(level=verbosity, datefmt="%H:%M:%S", format=LOGFMT_CONSOLE)

    # Configure main logger to rotate log files
    rh = RotatingFileHandler(log_file, maxBytes=100000, backupCount=25)
    log.addHandler(rh)

    # Configure main logger to log to a file
    if log_file:
        fh = FileHandler(log_file, "w")
        fh.setFormatter(Formatter(LOGFMT_FILE, "%Y-%m-%d %H:%M:%S"))
        fh.setLevel(log_verbosity)
        log.addHandler(fh)

    return log
 def __init__(self, name, level=0):
     Log.__init__(self, name, level)
     del self.handlers[:]
     handler = FileHandler(join(Path.config_dir, "umitweb.log"))
     handler.setFormatter(self.formatter)
     
     self.addHandler(handler)
Beispiel #7
0
    def register_local_log(self, path, level=None, purge_buffer=True):
        """The shinken logging wrapper can write to a local file if needed
        and return the file descriptor so we can avoid to
        close it.

        Add logging to a local log-file.

        The file will be rotated once a day
        """
        self.log_set = True
        # Todo : Create a config var for backup count
        if os.path.exists(path) and not stat.S_ISREG(os.stat(path).st_mode):
            # We don't have a regular file here. Rotate may fail
            # It can be one of the stat.S_IS* (FIFO? CHR?)
            handler = FileHandler(path)
        else:
            handler = TimedRotatingFileHandler(path, 'midnight', backupCount=5)
        if level is not None:
            handler.setLevel(level)
        if self.name is not None:
            handler.setFormatter(defaultFormatter_named)
        else:
            handler.setFormatter(defaultFormatter)
        self.addHandler(handler)

        # Ok now unstack all previous logs
        if purge_buffer:
            self._destack()

        # Todo : Do we need this now we use logging?
        return handler.stream.fileno()
Beispiel #8
0
def setup_logging(verbose_level: int = 0, filename: str = None):
    level = None
    if verbose_level == -1:
        level = logging.CRITICAL
    if verbose_level is 0:
        level = logging.INFO
    elif verbose_level >= 1:
        level = logging.DEBUG

    formatter = logging.Formatter(fmt="%(asctime)-10s%(message)s", datefmt="%H:%M:%S")

    stdout_handler = ColorizingStreamHandler()
    stdout_handler.setFormatter(formatter)
    stdout_handler.stream = sys.stdout

    if not filename:
        filename = "last.log"
    file_handler = FileHandler(filename=filename, mode="w")
    file_handler.setFormatter(formatter)

    root = logging.getLogger()
    root.addHandler(stdout_handler)
    root.addHandler(file_handler)

    root.setLevel(level)

    return logging.getLogger(__name__)
Beispiel #9
0
def get_task_logger(worker, task, subtask=None, workunit=None):
    """
    Initializes a logger for tasks and subtasks.  Logs for tasks are stored as
    in separate files and aggregated.  This allow workunits to be viewed in a
    single log.  Otherwise a combined log could contain messages from many 
    different workunits making it much harder to grok.

    @param worker: there may be more than one Worker per Node.  Logs are
                      stored per worker.
    @param task: ID of the task instance.  Each task instance receives 
                             its own log.
    @param subtask: (optional) subtask_key.  see workunit_id
    @param workunit: (optional) ID of workunit.  workunits receive their
                         own log file so that the log can be read separately.
                         This is separate from the task instance log.
    """
    directory, filename = task_log_path(task, subtask, workunit, worker)
    makedirs(directory)

    logger_name = 'task.%s' % task

    if workunit:
        logger_name += '.%s' % workunit

    logger = logging.getLogger(logger_name)
    handler = FileHandler(filename)
    
    formatter = logging.Formatter(LOG_FORMAT % ("[%s]" % worker))
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(settings.LOG_LEVEL)

    return logger
Beispiel #10
0
def init_app_logger(app):
    file_handler = FileHandler('flask.log')
    file_handler.setFormatter(Formatter(
        '%(asctime)s|%(levelname)s|%(pathname)s:%(lineno)d|%(message)s'
    ))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
Beispiel #11
0
class CaptureLog:
    """Context to capture log from a specific logger and write it to a file

    Parameters
    ----------
    filename : str
        Where to write the log file.
    mode : str
        Mode for opening the log file (default 'w').
    name : str
        Name of the logger from which to capture (default 'mne').
    """
    def __init__(self, filename, mode='w', logger='mne', level='debug'):
        self.logger = logger
        self.level = log_level(level)
        self.handler = FileHandler(filename, mode)
        self.handler.setLevel(self.level)
        self.handler.setFormatter(Formatter("%(levelname)-8s :%(message)s"))
        self._old_level = None

    def __enter__(self):
        logger = getLogger(self.logger)
        logger.addHandler(self.handler)
        if logger.level == 0 or logger.level > self.level:
            self._old_level = logger.level
            logger.setLevel(self.level)
        return logger

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.handler.close()
        logger = getLogger(self.logger)
        logger.removeHandler(self.handler)
        if self._old_level is not None:
            logger.setLevel(self._old_level)
 def __init__(self, job, level):
     
     self.job = job
     self.level = level
     
     # Create queue through which log records can be sent from various
     # processes and threads to the logging thread.
     self.queue = Queue()
     
     formatter = Formatter('%(asctime)s %(levelname)-8s %(message)s')
     
     # Create handler that writes log messages to the job log file.
     os_utils.create_parent_directory(job.log_file_path)
     file_handler = FileHandler(job.log_file_path, 'w')
     file_handler.setFormatter(formatter)
     
     # Create handler that writes log messages to stderr.
     stderr_handler = StreamHandler()
     stderr_handler.setFormatter(formatter)
     
     self._record_counts_handler = _RecordCountsHandler()
     
     # Create logging listener that will run on its own thread and log
     # messages sent to it via the queue.
     self._listener = QueueListener(
         self.queue, file_handler, stderr_handler,
         self._record_counts_handler)
Beispiel #13
0
def init_webapp(app, db):
    print 'Initializing PRAC webapp...'

    pracApp.app = app
    # use html templates from prac app
    prac_loader = jinja2.ChoiceLoader([
        pracApp.app.jinja_loader,
        jinja2.FileSystemLoader(['/opt/practools/tools/prac/pracweb/gui/templates']),
    ])
    pracApp.app.jinja_loader = prac_loader
    pracApp.app.config['PRAC_STATIC_PATH'] = '/opt/practools/tools/prac/pracweb/gui/build'

    # settings for fileuploads and logging
    pracApp.app.config['ALLOWED_EXTENSIONS'] = {'mln', 'db', 'pracmln', 'emln'}
    pracApp.app.config['UPLOAD_FOLDER'] = '/home/ros/pracfiles'
    pracApp.app.config['PRAC_ROOT_PATH'] = '/opt/practools/tools/prac'
    pracApp.app.config['LOG_FOLDER'] = os.path.join('/home/ros/pracfiles/prac', 'log')

    if not os.path.exists(pracApp.app.config['LOG_FOLDER']):
        os.mkdir(pracApp.app.config['LOG_FOLDER'])

    # separate logger for user statistics
    root_logger = logging.getLogger('userstats')
    handler = FileHandler(os.path.join(pracApp.app.config['LOG_FOLDER'], "userstats.json"))
    formatter = logging.Formatter("%(message)s,")
    handler.setFormatter(formatter)
    root_logger.addHandler(handler)

    print 'Registering PRAC routes...'
    from pracweb.gui.pages import inference
    from pracweb.gui.pages import views
    from pracweb.gui.pages import utils
Beispiel #14
0
def create_app(config_name):
    app.config.from_object(config[config_name])
    db.init_app(app)
    login_manager.init_app(app)
    login_manager.session_protection = 'strong'
    login_manager.login_view = 'admin.login'

    if not app.debug:
        import logging
        from logging import FileHandler, Formatter

        file_handler = FileHandler(Constant.LOG_DIR, encoding='utf8')
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(Formatter(
            '[%(asctime)s] %(levelname)s: %(message)s '
            '[in %(pathname)s:%(lineno)d]'))
        app.logger.addHandler(file_handler)

    from main import main as main_blueprint
    app.register_blueprint(main_blueprint)

    from admin import admin as admin_blueprint
    app.register_blueprint(admin_blueprint, url_prefix='/admin')

    patch_request_class(app, size=16*1024*1024) # 16MB
    configure_uploads(app, resource_uploader)

    return app
Beispiel #15
0
def init_logging():
    # Disable default stderr handler
    root = getLogger().addHandler(logging.NullHandler())

    # Get the loggers used in pytelemetry.telemetry.telemetry file
    rx = getLogger("telemetry.rx")
    tx = getLogger("telemetry.tx")
    rx.setLevel(logging.DEBUG)
    tx.setLevel(logging.DEBUG)

    # Format how data will be .. formatted
    formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')

    # Create a handler to save logging output to a file
    dateTag = datetime.datetime.now().strftime("%Y-%b-%d_%H-%M-%S")
    in_handler = FileHandler('in-%s.log' % dateTag)
    in_handler.setLevel(logging.DEBUG) # Also pass all messages
    in_handler.setFormatter(formatter)

    out_handler = FileHandler('out-%s.log' % dateTag)
    out_handler.setLevel(logging.DEBUG) # Also pass all messages
    out_handler.setFormatter(formatter)

    # Attach the logger to the handler
    rx.addHandler(in_handler)
    tx.addHandler(out_handler)
Beispiel #16
0
def setup_logging():
    global log

    progname = basename(argv[0])
    log = getLogger()
    log.setLevel(DEBUG)

    handlers = []
    buildlog_handler = FileHandler(getenv("HOME") + "/build.log")
    buildlog_handler.setFormatter(
        Log8601Formatter("%(asctime)s " + progname + " %(levelname)s " +
                         "%(filename)s:%(lineno)s: %(message)s"))
    handlers.append(buildlog_handler)

    stderr_handler = StreamHandler(stderr)
    stderr_handler.setFormatter(
        Log8601Formatter("%(asctime)s %(name)s %(levelname)s " +
                         "%(filename)s:%(lineno)s: %(message)s"))
    handlers.append(stderr_handler)
    
    if exists("/dev/log"):
        syslog_handler = SysLogHandler(
            address="/dev/log", facility=LOG_LOCAL1)
        syslog_handler.setFormatter(
            Log8601Formatter(progname +
                             " %(asctime)s %(levelname)s: %(message)s"))
        handlers.append(syslog_handler)


    log.addHandler(MultiHandler(handlers))

    getLogger("boto").setLevel(INFO)
    getLogger("boto3").setLevel(INFO)
    getLogger("botocore").setLevel(INFO)
    return
Beispiel #17
0
def _initialize_logging(config):
    """
    Configure logging.

    Two loggers are established: ``tiddlyweb`` and ``tiddlywebplugins``.
    Modules which wish to log should use ``logging.getLogger(__name__)``
    to get a logger in the right part of the logging hierarchy.
    """
    logger = logging.getLogger('tiddlyweb')
    logger.propagate = False
    logger.setLevel(config['log_level'])

    plugin_logger = logging.getLogger('tiddlywebplugins')
    plugin_logger.propagate = False
    plugin_logger.setLevel(config['log_level'])

    from logging import FileHandler
    file_handler = FileHandler(
            filename=os.path.join(config['root_dir'], config['log_file']))
    formatter = logging.Formatter(
            '%(asctime)s %(levelname)s %(name)s: %(message)s')
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    plugin_logger.addHandler(file_handler)

    logger.debug('TiddlyWeb starting up as %s', sys.argv[0])
Beispiel #18
0
def get_log(name):
    logger = getLogger('Stream')
    logger.setLevel(ERROR)
    file_handler = FileHandler('var/logs/stream.log')
    file_handler.setFormatter(Formatter("%(asctime)-15s %(name)s [%(levelname)s] - %(message)s"))
    logger.addHandler(file_handler)
    return logger
Beispiel #19
0
    def __setup(self, name, logFolder):
        """
        Configure python logging system for our neeeds.

        Positional arguments:
        name -- name of root python logger which will be
        used as root for our logger object
        logFolder -- path to folder for logs
        """
        self.__rootLogger = getLogger(name)
        # Set level to INFO to enable handling of
        # all messages with level up to info
        self.__rootLogger.setLevel(INFO)
        # Clear any handlers this logger already may have
        for handler in self.__rootLogger.handlers:
            self.__rootLogger.removeHandler(handler)
        # Define log storage options
        logPath = os.path.join(logFolder, '{}.log'.format(name))
        os.makedirs(os.path.dirname(logPath), mode=0o755, exist_ok=True)
        handler = FileHandler(logPath, mode='a', encoding='utf-8', delay=False)
        # Set up formatter options
        msgFormat = '{asctime:19.19} | {levelname:7.7} | {name:23.23} | {message}'
        timeFormat = '%Y-%m-%d %H:%M:%S'  # Must be specified in old style, as of python 3.2
        formatter = Formatter(fmt=msgFormat, datefmt=timeFormat, style='{')
        handler.setFormatter(formatter)
        self.__rootLogger.addHandler(handler)
Beispiel #20
0
def setup_logging():
    log_formatter = Formatter(
        '''{"message_type":"%(levelname)s","location":"%(pathname)s","line_number":%(lineno)d,"module":"%(module)s","function":"%(funcName)s","time":"%(asctime)s","message":"%(message)s"}''')  # pylint: disable=C0301
    fh = FileHandler('flask_logs.log')
    fh.setLevel(INFO)
    fh.setFormatter(log_formatter)
    application.logger.addHandler(fh)
    application.logger.setLevel(INFO)
    if not application.debug:
        from application.models import Administrator
        dbadmins = Administrator.query.all()
        if dbadmins is not None:
            emails = [dbadmin.email for dbadmin in dbadmins]
            emailErrorHandler = TlsSMTPHandler(
                ('smtp.gmail.com', 587),
                '*****@*****.**',
                emails,
                'Server Error',
                ('*****@*****.**', 'ANRISNTPTV')
            )
            emailErrorHandler.setFormatter(Formatter(
                '''
    Message type:       %(levelname)s
    Location:           %(pathname)s:%(lineno)d
    Module:             %(module)s
    Function:           %(funcName)s
    Time:               %(asctime)s

    Message:

    %(message)s
    '''))
            application.logger.addHandler(emailErrorHandler)
Beispiel #21
0
    def create_logger(self, debug, log_file):
        logger = logging.getLogger("ensime-{}".format(self.window))
        file_log_formatter = logging.Formatter(LOG_FORMAT)
        console_log_formatter = logging.Formatter(CONSOLE_LOG_FORMAT)

        logger.handlers.clear()
        with open(log_file, "w") as f:
            now = datetime.datetime.now()
            tm = now.strftime("%Y-%m-%d %H:%M:%S.%f")
            f.write("{}: {} - {}\n".format(tm, "Initializing project", self.project_root))
        file_handler = FileHandler(log_file)
        file_handler.setFormatter(file_log_formatter)
        logger.addHandler(file_handler)

        console_handler = logging.StreamHandler()
        console_handler.setFormatter(console_log_formatter)
        logger.addHandler(console_handler)

        if debug:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        logger.info("Logger initialised.")
        return logger
Beispiel #22
0
 def __init__(self, logger_name=__name__, make_unique=True, loglevel=INFO, 
              filename=None, verbose=False, 
              stdout_format='%(created)f,%(message)s',
              tofile_format='%(created)f,%(message)s'):
     """
     :Parameters:
      - `logger_name`: name for logger
      - `make_unique`: whether to append a unique timestamp to the name
      - `loglevel`: which messages to log
      - `filename`: If this is set, log messages to a file
      - `verbose`: If this is True, log messages to stdout
      - `stdout_format`: string format for stdout messages
      - `tofile_format`: string format for messages in log file
     """
     if make_unique:
         logger_name = logger_name + str(time.time())
     self.logger = getLogger(logger_name)
     self.logger.setLevel(loglevel)
     if filename:
         handler = FileHandler(filename)
         handler.setFormatter(Formatter(fmt=tofile_format))
         self.logger.addHandler(handler)
     if verbose:
         handler = StreamHandler(sys.stdout)
         handler.setFormatter(Formatter(fmt=stdout_format))
         self.logger.addHandler(handler)
Beispiel #23
0
def setup_logger(app_name):
    """ Instantiate a logger object

        Usage:
            logger = setup_logger('foo')     # saved as foo.log
            logger.info("Some info message")
            logger.warn("Some warning message")
            logger.error("Some error message")
            ... [for more options see: http://docs.python.org/2/library/logging.html]
    """
    logger = getLogger(app_name)
    logger.setLevel(DEBUG)
    # create file handler which logs even debug messages
    fh = FileHandler(app_name + '.log')
    fh.setLevel(DEBUG)
    # create console handler with a higher log level
    ch = StreamHandler()
    ch.setLevel(ERROR)
    # create formatter and add it to the handlers
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger
Beispiel #24
0
def add_disk_handler(prefix, level=logging.NOTSET):
    """
    Enable typical logging to disk.
    """

    # generate an unused log file path
    from os.path import lexists
    from itertools import count

    for i in count():
        path = "%s.%i" % (prefix, i)

        if not lexists(path):
            break

    # build a handler
    from cargo.temporal import utc_now

    handler = FileHandler(path, encoding="utf-8")

    handler.setFormatter(VerboseFileFormatter())
    handler.setLevel(level)

    # add it
    logging.root.addHandler(handler)

    log.debug("added log handler for file %s at %s", path, utc_now())

    return handler
def __setup_logging(app):
    log_file_path = ""
    log_dir_path = ""
    log_level = app.config.get('LOG_LEVEL', logging.INFO)
    
    if os.path.isabs(app.config['LOG_DIR']):
        log_dir_path = app.config['LOG_DIR']
        log_file_path = log_dir_path + app.config['LOG_FILE']
        
    else:
        here = os.path.dirname(os.path.abspath(__file__))
        log_dir_path = os.path.join(
            os.path.dirname(here), app.config['LOG_DIR'])
        log_file_path = log_dir_path + app.config['LOG_FILE']
    
    if not os.path.isdir(log_dir_path):
        os.makedirs(log_dir_path, mode=app.config['LOG_FILE_MODE'])    
    
    if not os.path.isfile(log_file_path):
        open(log_file_path, 'a').close()
    
    log_file_handler = FileHandler(filename=log_file_path, encoding='utf-8')
    log_file_handler.setLevel(log_level)
    log_file_handler.setFormatter(Formatter(
        '[%(asctime)s] [%(levelname)s] %(message)s %(module)s:%(funcName)s:%(lineno)d'
    ))
    
    app.logger.addHandler(log_file_handler)
    app.logger.setLevel(log_level)
Beispiel #26
0
    def __setup_logger(self, name, log_path):
        """
        Configure python logging system for our neeeds.

        Required arguments:
        name -- name of root python logger which will be
        used as root for our logger object
        log_path -- path to file into which log will be written
        """
        self.__root_logger = getLogger(name)
        # Set level to INFO to enable handling of
        # all messages with level up to info
        self.__root_logger.setLevel(INFO)
        # Clear any handlers this logger already may have
        for handler in self.__root_logger.handlers:
            self.__root_logger.removeHandler(handler)
        # Create folder for logger if it doesn't exist yet
        log_folder = os.path.dirname(log_path)
        if os.path.isdir(log_folder) is not True:
            os.makedirs(log_folder, mode=0o755)
        handler = FileHandler(log_path, mode='a', encoding='utf-8', delay=False)
        # Set up formatter options
        msg_format = '{asctime:19.19} | {levelname:7.7} | {name:23.23} | {message}'
        time_format = '%Y-%m-%d %H:%M:%S'  # Must be specified in old style, as of python 3.2
        formatter = Formatter(fmt=msg_format, datefmt=time_format, style='{')
        handler.setFormatter(formatter)
        self.__root_logger.addHandler(handler)
Beispiel #27
0
class Logging(logging.FileHandler):

    @classmethod
    def __init__(self,user_connect_=None):

        self.user_connect = user_connect_
        self.application = app\


    @classmethod
    def create_login_info(self,user,time,url):
        self.logging = FileHandler('icollect_info.log')
        self.logging.setLevel(logging.DEBUG)
        self.logging.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
        self.application.logger.addHandler(self.logging)
        create_dict_to_loggin_info = dict({'user_connect':user,'time':time,'url':url})
        self.application.logger.info('Info LogIn' + ":" + str(create_dict_to_loggin_info))

    @classmethod
    def create_logout_info(self,user,time):
        self.logging = FileHandler('icollect_info.log')
        self.logging.setLevel(logging.DEBUG)
        self.logging.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
        self.application.logger.addHandler(self.logging)
        create_dict_to_loggin_info = dict({'user_connect':user,'time':time})
        self.application.logger.info('Info Logout' + ":" + str(create_dict_to_loggin_info))
Beispiel #28
0
        def setHandler(logger, lvl, path, _format):
            """
            Set right handler related to input lvl, path and format.

            :param Logger logger: logger on which add an handler.
            :param str lvl: logging level.
            :param str path: file path.
            :param str _format: logging message format.
            """

            class _Filter(Filter):
                """Ensure message will be given for specific lvl"""

                def filter(self, record):
                    return record.levelname == lvl

            # get the rights formatter and filter to set on a file handler
            handler = FileHandler(path)
            handler.addFilter(_Filter())
            handler.setLevel(lvl)
            formatter = Formatter(_format)
            handler.setFormatter(formatter)

            # if an old handler exist, remove it from logger
            if hasattr(logger, lvl):
                old_handler = getattr(logger, lvl)
                logger.removeHandler(old_handler)

            logger.addHandler(handler)
            setattr(logger, lvl, handler)
Beispiel #29
0
def init_app(app, name=''):
    """
    Configures the provided app's logger.

    :param app: the application object to configure the logger
    :param name: the name of the logger to create and configure
    """

    # flask app object automatically registers its own debug logger if
    # app.debug is True. Remove it becuase debug logging is handled here
    # instead.
    del app.logger.handlers[:]

    log_path = app.config['LOG_PATH']
    log_level = app.config['LOG_LEVEL'] or ''
    log_filter = app.config['LOG_FILTER']
    log_ignore = app.config['LOG_IGNORE']

    handler = FileHandler(log_path) if log_path else StreamHandler()
    handler.setLevel(log_level.upper() or ('DEBUG' if app.debug else 'WARNING'))  # noqa
    handler.addFilter(MultiNameFilter(log_filter, log_ignore))
    handler.setFormatter(Formatter(
        '%(asctime)s %(process)s %(thread)-15s %(name)-10s %(levelname)-8s %(message)s',  # noqa
        '%H:%M:%S' if app.debug else '%Y-%m-%d %H:%M:%S%z'))

    logger = getLogger(name)
    logger.setLevel(handler.level)
    logger.addHandler(handler)
Beispiel #30
0
def log_server(level, queue, filename, mode='w'):
    """Run the logging server.

    This listens to the queue of log messages, and handles them using Python's
    logging handlers.  It prints to stderr, as well as to a specified file, if
    it is given.

    """
    formatter = _get_formatter()
    handlers = []

    sh = StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(level)
    handlers.append(sh)

    if filename:
        fh = FileHandler(filename, mode)
        fh.setFormatter(formatter)
        fh.setLevel(level)
        handlers.append(fh)

    listener = QueueListener(queue, *handlers)
    listener.start()

    # For some reason, queuelisteners run on a separate thread, so now we just
    # "busy wait" until terminated.
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        pass
    finally:
        listener.stop()
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name",
                        type=str,
                        help="bert-name used for biencoder")
    parser.add_argument("--model_path", type=str, help="model save path")
    parser.add_argument("--index_path", type=str, help="model save path")
    parser.add_argument("--load_index",
                        action="store_true",
                        help="model save path")
    parser.add_argument("--mention_dataset",
                        type=str,
                        help="mention dataset path")
    parser.add_argument("--category", type=str, help="mention dataset path")
    parser.add_argument("--candidate_dataset",
                        type=str,
                        help="candidate dataset path")
    parser.add_argument("--candidate_preprocessed",
                        action="store_true",
                        help="whether candidate_dataset is preprocessed")
    parser.add_argument("--builder_gpu",
                        action="store_true",
                        help="bert-name used for biencoder")
    parser.add_argument("--max_ctxt_len",
                        type=int,
                        help="maximum context length")
    parser.add_argument("--max_title_len",
                        type=int,
                        help="maximum title length")
    parser.add_argument("--max_desc_len",
                        type=int,
                        help="maximum description length")
    parser.add_argument("--mlflow",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument("--parallel",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument("--fp16",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument('--fp16_opt_level', type=str, default="O1")
    parser.add_argument("--logging",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument("--log_file",
                        type=str,
                        help="whether using inbatch negative")

    args = parser.parse_args()

    if args.mlflow:
        mlflow.start_run()
        arg_dict = vars(args)
        for key, value in arg_dict.items():
            mlflow.log_param(key, value)

    logger = None

    if args.logging:
        logger = getLogger(__name__)
        #handler = StreamHandler()

        logger.setLevel(DEBUG)
        #handler.setLevel(DEBUG)
        formatter = Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        #handler.setFormatter(formatter)
        #logger.addHandler(handler)

        if args.log_file:
            fh = FileHandler(filename=args.log_file)
            fh.setLevel(DEBUG)
            fh.setFormatter(formatter)
            logger.addHandler(fh)

    return args, logger
Beispiel #32
0

@app.errorhandler(404)
def not_found_error(error):
    return render_template('errors/404.html'), 404


@app.errorhandler(500)
def server_error(error):
    return render_template('errors/500.html'), 500


if not app.debug:
    file_handler = FileHandler('error.log')
    file_handler.setFormatter(
        Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('errors')

#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#

# Default port:
if __name__ == '__main__':
    app.run(debug=True)

# Or specify port manually:
Beispiel #33
0
    df_que2 = pandas.concat([df1, df2], ignore_index=True)
    df_que2 = df_que2.drop_duplicates().fillna('')
    logger.info('df_que2 {}'.format(df_que2.shape))
    df_que2['qid'] = numpy.arange(df_que2.shape[0])

    map_test = dict(zip(df_que2['question'], range(df_que2.shape[0])))

    return map_train, map_test, train_num


if __name__ == '__main__':
    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = FileHandler('doc2vec.py.log', 'w')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.setLevel('INFO')
    logger.addHandler(handler)

    # load_data()
    train()
Beispiel #34
0
import codecs
import sys
import gzip
from tqdm import tqdm
'''
Twitterのスクリーンネーム(@につづく名前),取得したTwitterのつぶやきデータを保存するリスト,リクエストを投げる際に必要なパラメータを
引数にとる.
返り値はつぶやきデータの入ったリストである.
ネットワークに繋がっていなかったり異常がある場合は強制終了する
'''

# Logger settings
handler = FileHandler('getTweet.log', mode='a', encoding='utf_8')
handler.setLevel(DEBUG)
formatter = Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
handler.setFormatter(formatter)


def getTweet(screen_name: str, params: dict, twitter_keys: list):
    """
    過去に遡ってツイートを取得する
    """
    # Set Logger
    logger_getTweet = getLogger('getTweet')
    logger_getTweet.setLevel(DEBUG)
    logger_getTweet.addHandler(handler)
    # initialize the list to save tweets
    save_list = []
    # API上限に達するまで取得し続ける.API上限に達すると配列tweetの長さが0に
    # 200 tweets per 1 loop
    for i in range(20):
import pytesseract
import numpy

from messages import RequestMessage, AddRequestMessage, QueryRequestMessage, QueryResponseMessage, QueryResponseEntry

LOG = getLogger(__name__)
LOG.setLevel(DEBUG)

SCREENSHOTS_DIRECTORY = Path('./screenshots')
ENTRY_LIST_PATH = Path('./entry_list.json')
STRING_ENCODING = 'utf-8'
IMG_HOST = 'http://localhost:4545'
LOG_PATH = './ocr_web_native_application.log'

file_handler = FileHandler(filename=LOG_PATH)
file_handler.setFormatter(
    Formatter('%(asctime)s - %(levelname)s - %(message)s'))
LOG.addHandler(hdlr=file_handler)


@dataclass
class Entry:
    url: str
    title: str
    timestamp_ms: int
    hash_value: bytes
    bloom_filter: BloomFilter

    @property
    def image_path(self) -> Path:
        return SCREENSHOTS_DIRECTORY / f'{self.timestamp_ms}_{self.hash_value.hex()}.png'
Beispiel #36
0
import catboost as cat

import warnings
warnings.simplefilter('ignore')

utils.start(__file__)
#==============================================================================
# Logger
#==============================================================================
from logging import getLogger, FileHandler, Formatter, DEBUG
logger = getLogger(__name__)
logger.setLevel(DEBUG)

file_handler = FileHandler(os.path.join('logs', 'log_{}'.format(str(datetime.datetime.today().date()).replace('-', ''))))
formatter = Formatter('%(message)s')
file_handler.setFormatter(formatter)
file_handler.setLevel(DEBUG)

logger.addHandler(file_handler)
logger.propagate = False

#==============================================================================
PATH = os.path.join('..', 'data')

KEY = 'card_id'

SEED = 18
# SEED = np.random.randint(9999)

NTHREAD = cpu_count()
Beispiel #37
0
def note_and_log(cls):
    """
    This will be used as a decorator on class to activate
    logging and store messages in the variable cls._notes
    This will allow quick access to events in the web app.

    A note can be added to cls._notes without logging if passing
    the argument log=false to function note()
    Something can be logged without addind a note using function log()
    """
    if hasattr(cls, 'DEBUG_LEVEL'):
        if cls.DEBUG_LEVEL == 'debug':
            file_level = logging.DEBUG
            console_level = logging.DEBUG
        elif cls.DEBUG_LEVEL == 'info':
            file_level = logging.INFO
            console_level = logging.INFO
    else:
        file_level = logging.INFO
        console_level = logging.WARNING
    # Notes object
    cls._notes = namedtuple('_notes', ['timestamp', 'notes'])
    cls._notes.timestamp = []
    cls._notes.notes = []

    # Defining log object
    cls.logname = '{} | {}'.format(cls.__module__, cls.__name__)
    root_logger = logging.getLogger()
    cls._log = logging.getLogger('BAC0')
    if not len(root_logger.handlers):
        root_logger.addHandler(cls._log)
    
    # Console Handler
    ch = logging.StreamHandler()
    ch.set_name('stderr')
    ch2 = logging.StreamHandler(sys.stdout)
    ch2.set_name('stdout')
    ch.setLevel(console_level)
    ch2.setLevel(logging.CRITICAL)

    formatter = logging.Formatter(
        '{asctime} - {levelname:<8}| {message}', style='{')

    # File Handler
    _PERMISSION_TO_WRITE = True
    logUserPath = expanduser('~')
    logSaveFilePath = join(logUserPath, '.BAC0')

    logFile = join(logSaveFilePath, 'BAC0.log')
    if not os.path.exists(logSaveFilePath):
        try:
            os.makedirs(logSaveFilePath)
        except:
            _PERMISSION_TO_WRITE = False
    if _PERMISSION_TO_WRITE:
        fh = FileHandler(logFile)
        fh.set_name('file_handler')
        fh.setLevel(file_level)
        fh.setFormatter(formatter)

    ch.setFormatter(formatter)
    ch2.setFormatter(formatter)
    # Add handlers the first time only...
    if not len(cls._log.handlers):
        if _PERMISSION_TO_WRITE:
            cls._log.addHandler(fh)
        cls._log.addHandler(ch)
        cls._log.addHandler(ch2)
    
#    cls._log.setLevel(logging.CRITICAL)
        
    def log_title(self, title, args=None, width=35):
        cls._log.info("")
        cls._log.info("#"*width)
        cls._log.info("# {}".format(title))
        cls._log.info("#"*width)
        if args:
            cls._log.debug("{!r}".format(args))
            cls._log.debug("#"*35)

    def log_subtitle(self, subtitle, args=None, width=35):
        cls._log.info("")
        cls._log.info("="*width)
        cls._log.info("{}".format(subtitle))
        cls._log.info("="*width)
        if args:
            cls._log.debug("{!r}".format(args))
            cls._log.debug("="*width)

    def log(self, note, *, level=logging.DEBUG):
        """
        Add a log entry...no note
        """
        if not note:
            raise ValueError('Provide something to log')
        note = '{} | {}'.format(cls.logname, note)
        cls._log.log(level, note)

    def note(self, note, *, level=logging.INFO, log=True):
        """
        Add note to the object. By default, the note will also
        be logged

        :param note: (str) The note itself
        :param level: (logging.level)
        :param log: (boolean) Enable or disable logging of note
        """
        if not note:
            raise ValueError('Provide something to log')
        note = '{} | {}'.format(cls.logname, note)
        cls._notes.timestamp.append(datetime.now())
        cls._notes.notes.append(note)
        if log:
            cls.log(level, note)

    @property
    def notes(self):
        """
        Retrieve notes list as a Pandas Series
        """
        if not _PANDAS:
            return dict(zip(self._notes.timestamp, self._notes.notes))
        return pd.Series(self._notes.notes, index=self._notes.timestamp)

    def clear_notes(self):
        """
        Clear notes object
        """
        cls._notes.timestamp = []
        cls._notes.notes = []

    # Add the functions to the decorated class
    cls.clear_notes = clear_notes
    cls.note = note
    cls.notes = notes
    cls.log = log
    cls.log_title = log_title
    cls.log_subtitle = log_subtitle
    return cls
Beispiel #38
0
from logging import Logger, FileHandler, Formatter, WARN, INFO

DEBUG = False

### logger config
logger = Logger('cls')
WARN_LOGGER = "/tmp/cls_warn.log"
INFO_LOGGER = "/tmp/cls_info.log"
formatter = Formatter("%(asctime)s - %(levelname)s - %(message)s")

filewarnhandler = FileHandler(WARN_LOGGER, 'a')
filewarnhandler.setLevel(WARN)
filewarnhandler.setFormatter(formatter)

fileinfohandler = FileHandler(INFO_LOGGER, 'a')
fileinfohandler.setLevel(INFO)
fileinfohandler.setFormatter(formatter)

logger.addHandler(filewarnhandler)
logger.addHandler(fileinfohandler)

# origin_warning = logger.warning
#
#
# def my_warning(*args, **kwargs):
#     origin_warning(locals())
#     return origin_warning(*args, **kwargs)
#
#
# logger.warning = my_warning
Beispiel #39
0
def main(argv: List[str]) -> None:

    parser = argparse.ArgumentParser(
        description="Modifies the BSP file, allowing additional entities "
        "and bugfixes.", )

    parser.add_argument("--nopack",
                        dest="allow_pack",
                        action="store_false",
                        help="Prevent packing of files found in the map.")
    parser.add_argument(
        "--propcombine",
        action="store_true",
        help="Allow merging static props together.",
    )
    parser.add_argument(
        "--showgroups",
        action="store_true",
        help="Show propcombined props, by setting their tint to 0 255 0",
    )

    parser.add_argument(
        "map",
        help="The path to the BSP file.",
    )

    args = parser.parse_args(argv)

    # The path is the last argument to the compiler.
    # Hammer adds wrong slashes sometimes, so fix that.
    # Also if it's the VMF file, make it the BSP.
    path = Path(args.map).with_suffix('.bsp')

    # Open and start writing to the map's log file.
    handler = FileHandler(path.with_suffix('.log'))
    handler.setFormatter(
        Formatter(
            # One letter for level name
            '[{levelname}] {module}.{funcName}(): {message}',
            style='{',
        ))
    LOGGER.addHandler(handler)

    LOGGER.info('Srctools postcompiler hook started at {}!',
                datetime.datetime.now().isoformat())
    LOGGER.info("Map path is {}", path)

    conf, game_info, fsys, pack_blacklist, plugins = config.parse(path)

    fsys.open_ref()

    packlist = PackList(fsys)

    LOGGER.info('Gameinfo: {}', game_info.path)
    LOGGER.info(
        'Search paths: \n{}',
        '\n'.join([sys.path for sys, prefix in fsys.systems]),
    )

    fgd = FGD.engine_dbase()

    LOGGER.info('Loading soundscripts...')
    packlist.load_soundscript_manifest(
        conf.path.with_name('srctools_sndscript_data.vdf'))
    LOGGER.info('Done! ({} sounds)', len(packlist.soundscripts))

    LOGGER.info('Reading BSP...')
    bsp_file = BSP(path)

    LOGGER.info('Reading entities...')
    vmf = bsp_file.read_ent_data()
    LOGGER.info('Done!')

    studiomdl_path = conf.get(str, 'studiomdl')
    if studiomdl_path:
        studiomdl_loc = (game_info.root / studiomdl_path).resolve()
        if not studiomdl_loc.exists():
            LOGGER.warning('No studiomdl found at "{}"!', studiomdl_loc)
            studiomdl_loc = None
    else:
        LOGGER.warning('No studiomdl path provided.')
        studiomdl_loc = None

    for plugin in plugins:
        plugin.load()

    use_comma_sep = conf.get(bool, 'use_comma_sep')
    if use_comma_sep is None:
        # Guess the format, by picking whatever the first output uses.
        for ent in vmf.entities:
            for out in ent.outputs:
                use_comma_sep = out.comma_sep
                break
        if use_comma_sep is None:
            LOGGER.warning(
                'No outputs in map, could not determine BSP I/O format!')
            LOGGER.warning('Set "use_comma_sep" in srctools.vdf.')
        use_comma_sep = False

    run_transformations(vmf, fsys, packlist, bsp_file, game_info,
                        studiomdl_loc)

    if studiomdl_loc is not None and args.propcombine:
        LOGGER.info('Combining props...')
        propcombine.combine(
            bsp_file,
            vmf,
            packlist,
            game_info,
            studiomdl_loc,
            [
                game_info.root / folder for folder in conf.get(
                    Property, 'propcombine_qc_folder').as_array(conv=Path)
            ],
            conf.get(int, 'propcombine_auto_range'),
            conf.get(int, 'propcombine_min_cluster'),
            debug_tint=args.showgroups,
        )
        LOGGER.info('Done!')
    else:  # Strip these if they're present.
        for ent in vmf.by_class['comp_propcombine_set']:
            ent.remove()

    bsp_file.lumps[BSP_LUMPS.ENTITIES].data = bsp_file.write_ent_data(
        vmf, use_comma_sep)

    if conf.get(bool, 'auto_pack') and args.allow_pack:
        LOGGER.info('Analysing packable resources...')
        packlist.pack_fgd(vmf, fgd)

        packlist.pack_from_bsp(bsp_file)

        packlist.eval_dependencies()

    packlist.pack_into_zip(bsp_file,
                           blacklist=pack_blacklist,
                           ignore_vpk=False)

    with bsp_file.packfile() as pak_zip:
        LOGGER.info('Packed files: \n{}'.format('\n'.join(pak_zip.namelist())))

    LOGGER.info('Writing BSP...')
    bsp_file.save()

    LOGGER.info("srctools VRAD hook finished!")
Beispiel #40
0
# LoggingSetting - START
import logging
from logging import getLogger, StreamHandler, FileHandler,Formatter
logger = getLogger("LOG")
logger.setLevel(logging.DEBUG)

handler_format = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

stream_handler = StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(handler_format)
logger.addHandler(stream_handler)

file_handler = FileHandler(filename="lsl.log")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(handler_format)
logger.addHandler(file_handler)

# logger.debug("Hello World!")
# LoggingSetting - END

def Logger(*args):
    message = ""
    for a in args:
        message += str(a) + " "
    logger.debug(message)

def ListLogger(*args):
    message = ""
    print(args)
    for msg in args[0] :
class ConfigRpmMaker(object):

    ERROR_MSG = """
------------------------------------------------------------------------
Your commit has been accepted by the SVN server, but due to the errors
that it contains no RPMs have been created.

See %s/%s.txt for details.

Please fix the issues and trigger the RPM creation with a dummy commit.
------------------------------------------------------------------------
"""

    def __init__(self, revision, svn_service):
        self.revision = revision
        self.svn_service = svn_service
        self.temp_dir = get_temporary_directory()
        self._assure_temp_dir_if_set()
        self._create_logger()
        self.work_dir = None
        self.host_queue = Queue()
        self.failed_host_queue = Queue()

    def __build_error_msg_and_move_to_public_access(self, revision):
        err_url = get_error_log_url()
        error_msg = self.ERROR_MSG % (err_url, revision)
        for line in error_msg.split('\n'):
            LOGGER.error(line)
        self._move_error_log_for_public_access()
        self._clean_up_work_dir()
        return error_msg

    def build(self):
        LOGGER.info('Working on revision %s', self.revision)
        self.logger.info("Starting with revision %s", self.revision)
        try:
            changed_paths = self.svn_service.get_changed_paths(self.revision)
            available_hosts = self.svn_service.get_hosts(self.revision)

            affected_hosts = list(
                self._get_affected_hosts(changed_paths, available_hosts))
            if not affected_hosts:
                LOGGER.info(
                    "No rpm(s) built. No host affected by change set: %s",
                    str(changed_paths))
                return

            log_elements_of_list(LOGGER.debug, 'Detected %s affected host(s).',
                                 affected_hosts)

            self._prepare_work_dir()
            rpms = self._build_hosts(affected_hosts)
            self._upload_rpms(rpms)
            self._move_configviewer_dirs_to_final_destination(affected_hosts)

        except BaseConfigRpmMakerException as exception:
            self.logger.error('Last error during build:\n%s' % str(exception))
            self.__build_error_msg_and_move_to_public_access(self.revision)
            raise exception

        except Exception as exception:
            self.logger.exception('Last error during build:')
            error_msg = self.__build_error_msg_and_move_to_public_access(
                self.revision)
            raise Exception(
                'Unexpected error occurred, stacktrace will follow.\n%s\n\n%s'
                % (traceback.format_exc(), error_msg))

        self._clean_up_work_dir()
        return rpms

    def _clean_up_work_dir(self):
        if self._keep_work_dir():
            LOGGER.info(
                'All working data can be found in "{working_directory}"'.
                format(working_directory=self.work_dir))
        else:
            if self.work_dir and exists(self.work_dir):

                if is_verbose_enabled():
                    log_directories_summary(LOGGER.debug, self.work_dir)

                LOGGER.debug('Cleaning up working directory "%s"',
                             self.work_dir)
                rmtree(self.work_dir)

            if exists(self.error_log_file):
                LOGGER.debug('Removing error log "%s"', self.error_log_file)
                remove(self.error_log_file)

    def _keep_work_dir(self):
        return is_no_clean_up_enabled()

    def _move_error_log_for_public_access(self):
        error_log_dir = os.path.join(get_error_log_directory())
        if error_log_dir:
            if not os.path.exists(error_log_dir):
                os.makedirs(error_log_dir)
            shutil.move(self.error_log_file,
                        os.path.join(error_log_dir, self.revision + '.txt'))

    def _read_integer_from_file(self, path):

        with open(path) as file_which_contains_integer:
            integer_from_file = int(file_which_contains_integer.read())

        return integer_from_file

    def _move_configviewer_dirs_to_final_destination(self, hosts):
        LOGGER.info("Updating configviewer data.")

        for host in hosts:
            temp_path = build_config_viewer_host_directory(
                host, revision=self.revision)
            dest_path = build_config_viewer_host_directory(host)

            if exists(dest_path):
                path_to_revision_file = join(dest_path, "%s.rev" % host)
                revision_from_file = self._read_integer_from_file(
                    path_to_revision_file)

                if revision_from_file > int(self.revision):
                    LOGGER.debug(
                        'Will not update configviewer data for host "%s" since the current revision file contains revision %d which is higher than %s',
                        host, revision_from_file, self.revision)
                    rmtree(temp_path)
                    continue

                rmtree(dest_path)

            LOGGER.debug('Updating configviewer data for host "%s"', host)
            move(temp_path, dest_path)

    def _notify_that_host_failed(self, host_name, stack_trace):
        failure_information = (host_name, stack_trace)
        self.failed_host_queue.put(failure_information)
        approximately_count = self.failed_host_queue.qsize()
        LOGGER.error(
            'Build for host "{host_name}" failed. Approximately {count} builds failed.'
            .format(host_name=host_name, count=approximately_count))

        maximum_allowed_failed_hosts = get_max_failed_hosts()
        if approximately_count >= maximum_allowed_failed_hosts:
            LOGGER.error(
                'Stopping to build more hosts since the maximum of %d failed hosts has been reached'
                % maximum_allowed_failed_hosts)
            self.host_queue.queue.clear()

    def _build_hosts(self, hosts):
        if not hosts:
            LOGGER.warn('Trying to build rpms for hosts, but no hosts given!')
            return

        for host in hosts:
            self.host_queue.put(host)

        rpm_queue = Queue()
        svn_service_queue = Queue()
        svn_service_queue.put(self.svn_service)

        thread_count = self._get_thread_count(hosts)
        thread_pool = [
            BuildHostThread(
                name='Thread-%d' % i,
                revision=self.revision,
                svn_service_queue=svn_service_queue,
                rpm_queue=rpm_queue,
                notify_that_host_failed=self._notify_that_host_failed,
                host_queue=self.host_queue,
                work_dir=self.work_dir,
                error_logging_handler=self.error_handler)
            for i in range(thread_count)
        ]

        for thread in thread_pool:
            LOGGER.debug('%s: starting ...', thread.name)
            thread.start()

        for thread in thread_pool:
            thread.join()

        failed_hosts = dict(self._consume_queue(self.failed_host_queue))
        if failed_hosts:
            failed_hosts_str = [
                '\n%s:\n\n%s\n\n' % (key, value)
                for (key, value) in failed_hosts.iteritems()
            ]
            raise CouldNotBuildSomeRpmsException(
                "Could not build config rpm for some host(s): %s" %
                '\n'.join(failed_hosts_str))

        LOGGER.info("Finished building configuration rpm(s).")
        built_rpms = self._consume_queue(rpm_queue)
        log_elements_of_list(LOGGER.debug, 'Built %s rpm(s).', built_rpms)

        return built_rpms

    @measure_execution_time
    def _upload_rpms(self, rpms):
        rpm_upload_cmd = get_rpm_upload_command()
        chunk_size = self._get_chunk_size(rpms)

        if rpm_upload_cmd:
            LOGGER.info("Uploading %s rpm(s).", len(rpms))
            LOGGER.debug(
                'Uploading rpm(s) using command "%s" and chunk_size "%s"',
                rpm_upload_cmd, chunk_size)

            pos = 0
            while pos < len(rpms):
                rpm_chunk = rpms[pos:pos + chunk_size]
                cmd = '%s %s' % (rpm_upload_cmd, ' '.join(rpm_chunk))
                process = subprocess.Popen(cmd,
                                           shell=True,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)
                stdout, stderr = process.communicate()
                if process.returncode:
                    error_message = 'Rpm upload failed with exit code %s. Executed command "%s"\n' % (
                        process.returncode, cmd)
                    if stdout:
                        error_message += 'stdout: "%s"\n' % stdout.strip()
                    if stderr:
                        error_message += 'stderr: "%s"\n' % stderr.strip()
                    raise CouldNotUploadRpmsException(error_message)
                pos += chunk_size
        else:
            LOGGER.info(
                "Rpms will not be uploaded since no upload command has been configured."
            )

    def _get_affected_hosts(self, changed_paths, available_host):
        result = set()
        for segment in OVERLAY_ORDER:
            for changed_path in changed_paths:
                result |= set(
                    self._find_matching_hosts(segment, changed_path,
                                              available_host))

        return result

    def _find_matching_hosts(self, segment, svn_path, available_hosts):
        result = []
        for host in available_hosts:
            for path in segment.get_svn_paths(host):
                if svn_path.startswith(path):
                    result.append(host)
                    break

        return result

    def _get_thread_count(self, affected_hosts):
        thread_count = int(get_thread_count())
        if thread_count < 0:
            raise ConfigurationException(
                '%s is %s, values <0 are not allowed)' %
                (get_thread_count, thread_count))

        if not thread_count or thread_count > len(affected_hosts):
            if not thread_count:
                reason = 'Configuration property "%s" is %s' % (
                    get_thread_count, thread_count)
            elif thread_count > len(affected_hosts):
                reason = "More threads available than affected hosts"
            thread_count = len(affected_hosts)
            LOGGER.info("%s: using one thread for each affected host." %
                        (reason))
        return thread_count

    def _consume_queue(self, queue):
        items = []

        while not queue.empty():
            item = queue.get()
            queue.task_done()
            items.append(item)

        return items

    def _create_logger(self):
        self.error_log_file = tempfile.mktemp(dir=get_temporary_directory(),
                                              prefix='yadt-config-rpm-maker.',
                                              suffix='.revision-%s.error.log' %
                                              self.revision)
        self.error_handler = FileHandler(self.error_log_file)
        formatter = Formatter(configuration.LOG_FILE_FORMAT,
                              configuration.LOG_FILE_DATE_FORMAT)
        self.error_handler.setFormatter(formatter)
        self.error_handler.setLevel(ERROR)

        self.logger = getLogger('fileLogger')
        self.logger.addHandler(self.error_handler)
        self.logger.propagate = False

    def _assure_temp_dir_if_set(self):
        if self.temp_dir and not os.path.exists(self.temp_dir):
            os.makedirs(self.temp_dir)

    def _prepare_work_dir(self):
        LOGGER.debug('Preparing working directory "%s"', self.temp_dir)
        self.work_dir = mkdtemp(prefix='yadt-config-rpm-maker.',
                                suffix='.' + self.revision,
                                dir=self.temp_dir)

        self.rpm_build_dir = join(self.work_dir, 'rpmbuild')
        LOGGER.debug('Creating directory structure for rpmbuild in "%s"',
                     self.rpm_build_dir)
        for name in [
                'tmp', 'RPMS', 'RPMS/x86_64', 'RPMS/noarch', 'BUILD',
                'BUILDROOT', 'SRPMS', 'SPECS', 'SOURCES'
        ]:
            path = join(self.rpm_build_dir, name)
            if not exists(path):
                makedirs(path)

    def _get_chunk_size(self, rpms):
        chunk_size_raw = get_rpm_upload_chunk_size()
        try:
            chunk_size = int(chunk_size_raw)
        except ValueError:
            raise ConfigurationException(
                'rpm_upload_chunk_size (%s) is not a legal value (should be int)'
                % chunk_size_raw)
        if chunk_size < 0:
            raise ConfigurationException(
                "Config param 'rpm_upload_cmd_chunk_size' needs to be greater or equal 0"
            )

        if not chunk_size:
            chunk_size = len(rpms)

        return chunk_size
Beispiel #42
0
 def _file_handler(self):
     handler = FileHandler(self._pwd(), mode='a')
     handler.setFormatter(self._formatter())
     return handler
Beispiel #43
0
    configpass = f1.readline().rstrip()
app.config[
    'SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://%s:%s@localhost/restapi" % (
        configuser, configpass)

db = SQLAlchemy(app)

__all__ = ['views', 'models']

# Logging to external file
app.debug = True
import logging
from logging import Formatter
from logging import FileHandler
from logging.handlers import RotatingFileHandler
log = "/var/log/lighttpd/restapi.log"
FORMAT = '[%(asctime)s] [view: %(view)s] [ip: %(ip)s] [user: %(kuser)s] @context:%(context)s@ %(message)s'
file_handler_1 = RotatingFileHandler(log,
                                     maxBytes=2 * 1024 * 1024,
                                     backupCount=5)
file_handler = FileHandler(log)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(Formatter(FORMAT))
app.logger.addHandler(file_handler)

# Import all views from here
from softball.views import stats

# Load all blueprints
app.register_blueprint(stats.mod)
Beispiel #44
0
    def __init__(self):
        # Read config.ini.
        full_path = os.path.dirname(os.path.abspath(__file__))
        config = configparser.ConfigParser()
        config.read(os.path.join(full_path, 'config.ini'), encoding='utf-8')

        try:
            self.banner_delay = float(config['Common']['banner_delay'])
            self.report_date_format = config['Common']['date_format']
            self.modules_dir = config['Common']['module_path']
            self.log_dir = os.path.join(full_path,
                                        config['Common']['log_path'])
            if os.path.exists(self.log_dir) is False:
                os.mkdir(self.log_dir)
                self.print_message(
                    NOTE, 'Created directory : {}'.format(self.log_dir))
            self.log_file = config['Common']['log_file']
            self.log_path = os.path.join(self.log_dir, self.log_file)

            # Load fps rate.
            self.fps = int(config['Common']['fps'])
            if self.fps < 30:
                self.print_message(
                    WARNING,
                    'Too short fps. Change : {} -> 30'.format(str(self.fps)))
                self.fps = 30
            elif self.fps > 60:
                self.print_message(
                    WARNING,
                    'Too short fps. Change : {} -> 60'.format(str(self.fps)))
                self.fps = 60

        except Exception as e:
            self.print_message(FAIL,
                               'Reading config.ini is failure : {}'.format(e))
            sys.exit(1)

        # Define image bank Id.
        self.image_id_0 = 0
        self.image_id_1 = 1
        self.image_id_2 = 2

        # Define tile map Id.
        self.tm_id_0 = 0
        self.tm_id_1 = 1
        self.tm_id_2 = 2
        self.tm_id_3 = 3
        self.tm_id_4 = 4
        self.tm_id_5 = 5
        self.tm_id_6 = 6
        self.tm_id_7 = 7

        # Define color key.
        self.color_0 = 0
        self.color_1 = 1
        self.color_2 = 2
        self.color_3 = 3
        self.color_4 = 4
        self.color_5 = 5
        self.color_6 = 6
        self.color_7 = 7
        self.color_8 = 8
        self.color_9 = 9
        self.color_10 = 10
        self.color_11 = 11
        self.color_12 = 12
        self.color_13 = 13
        self.color_14 = 14
        self.color_15 = 15

        # Define status.
        self.status_normal = 'normal'
        self.status_attack_main = 'main_gun'
        self.status_attack_sub = 'sub_gun'
        self.status_attack_se = 'special_enquipment'
        self.status_attack_probe = 'probe'

        # Setting logger.
        self.logger = getLogger('8Vana')
        self.logger.setLevel(20)
        file_handler = FileHandler(self.log_path)
        self.logger.addHandler(file_handler)
        formatter = Formatter('%(levelname)s,%(message)s')
        file_handler.setFormatter(formatter)
Beispiel #45
0
# ログの出力名を設定
logger = getLogger(__name__)

# ログのフォーマットを設定(詳細は次のマークダウンテーブル参考)
fmt = Formatter('%(asctime)s %(name)s %(lineno)d %(levelname)s %(message)s')

# ログのコンソール出力の設定
shandler = StreamHandler()
shandler.setLevel('INFO')
shandler.setFormatter(fmt)

# ログのファイル出力先の設定
fhandler = FileHandler('result_tmp/Input.log')
fhandler.setLevel(DEBUG)
fhandler.setFormatter(fmt)

# ログレベルの設定
logger.setLevel(DEBUG)
logger.addHandler(shandler)
logger.addHandler(fhandler)
logger.propagate = False


def load_train_data():
    logger.info('read_train start')
    train = pd.read_csv('../../input/sales_train.csv')
    logger.info('train:\n{}'.format(train.head()))
    logger.debug('read_train end')
    return train
Beispiel #46
0
class Logger:
    """Writes system state to log files."""
    def __init__(self, name=None):
        self.__name = name
        self.__loggers = {
            'file': self.__file_logger,
            'console': self.__console_logger
        }
        self.__log_level = {'info': INFO, 'debug': DEBUG}
        self.__modes = []
        self.__logs_path = ''
        self.__common_log_handler = None
        self.__console_log_handler = None
        self.__log_format = None

        self.__logger = getLogger(self.__name)

        self.info = self.__logger.info
        self.debug = self.__logger.debug
        self.warning = self.__logger.warning
        self.error = self.__logger.error
        self.critical = self.__logger.critical

    def set_logs(self, mode=None, message_level='info', logs_directory=None):
        """Set logger handlers."""
        if mode not in self.__loggers:
            raise ValueError('Mode "{}" is not support'.format(mode))
        self.__modes.append(mode)
        if mode == 'file':
            if not logs_directory:
                raise ValueError('"logs_path" should not be None')
            self.__logs_directory = logs_directory

        self.__logger.setLevel(self.__log_level[message_level])

        message_format = '%(levelname)-8s %(asctime)s (%(filename)s:%(lineno)d) %(message)-40s'
        self.__log_format = Formatter(fmt=message_format,
                                      datefmt="%y-%m-%d %H:%M:%S")
        self.__loggers.get(mode).__call__()

    def __file_logger(self):
        """Create and start loggers file handler."""
        log_file = '{0}/{1}.log'.format(self.__logs_path, self.__name)

        # Existing log rewriting
        if os.path.exists(log_file):
            os.remove(log_file)

        self.__common_log_handler = FileHandler(log_file,
                                                mode='w',
                                                encoding='utf-8')
        self.__common_log_handler.setFormatter(self.__log_format)

        self.__logger.addHandler(self.__common_log_handler)

    def __console_logger(self):
        """Create and start loggers console handler."""
        self.__console_log_handler = StreamHandler()
        self.__console_log_handler.setFormatter(self.__log_format)
        self.__logger.addHandler(self.__console_log_handler)

    def close_logs(self, mode):
        """Close logger handlers."""
        if mode not in self.__modes:
            return
        if mode == 'file':
            self.__common_log_handler.close()
            self.__logger.removeHandler(self.__common_log_handler)
        elif mode == 'console':
            self.__console_log_handler.close()
            self.__logger.removeHandler(self.__console_log_handler)
        self.__modes.remove(mode)
Beispiel #47
0
from datetime import datetime
import sys

#logファイル作成 https://qiita.com/amedama/items/b856b2f30c2f38665701
from logging import getLogger, StreamHandler, FileHandler, DEBUG, Formatter
logger = getLogger(__name__)
stream_handler = StreamHandler()
stream_handler.setLevel(DEBUG)
stream_handler.setFormatter(
    Formatter("%(asctime)s %(levelname)8s %(message)s)"))
file_handler = FileHandler(filename=os.path.dirname(__file__) + '\\log\\' +
                           os.path.splitext(os.path.basename(__file__))[0] +
                           datetime.strftime(datetime.today(), '%Y%m%d') +
                           '_' + datetime.now().strftime('%H%M%S') + '.log')
file_handler.setLevel(DEBUG)
file_handler.setFormatter(Formatter("%(asctime)s %(levelname)8s %(message)s)"))

logger.setLevel(DEBUG)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.propagate = False

#variables
todayJPGFileName = ""


#methods
def get_today_JPGFile_name():

    print('JPGFileName is JPG_20180XX')
    i = input("Please input today's Number (20180XX)")
Beispiel #48
0
 def setUpClass(cls):
     """
     Perform class setup before running the testcase
     Remove shared memory files, start vpp and connect the vpp-api
     """
     gc.collect()  # run garbage collection first
     cls.logger = getLogger(cls.__name__)
     cls.tempdir = tempfile.mkdtemp(prefix='vpp-unittest-' + cls.__name__ +
                                    '-')
     file_handler = FileHandler("%s/log.txt" % cls.tempdir)
     file_handler.setFormatter(
         Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
                   datefmt="%H:%M:%S"))
     file_handler.setLevel(DEBUG)
     cls.logger.addHandler(file_handler)
     cls.shm_prefix = cls.tempdir.split("/")[-1]
     os.chdir(cls.tempdir)
     cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir,
                     cls.shm_prefix)
     cls.setUpConstants()
     cls.reset_packet_infos()
     cls._captures = []
     cls._zombie_captures = []
     cls.verbose = 0
     cls.vpp_dead = False
     cls.registry = VppObjectRegistry()
     # need to catch exceptions here because if we raise, then the cleanup
     # doesn't get called and we might end with a zombie vpp
     try:
         cls.run_vpp()
         cls.vpp_stdout_deque = deque()
         cls.vpp_stderr_deque = deque()
         cls.pump_thread_stop_flag = Event()
         cls.pump_thread_wakeup_pipe = os.pipe()
         cls.pump_thread = Thread(target=pump_output, args=(cls, ))
         cls.pump_thread.daemon = True
         cls.pump_thread.start()
         cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
         if cls.step:
             hook = StepHook(cls)
         else:
             hook = PollHook(cls)
         cls.vapi.register_hook(hook)
         cls.sleep(0.1, "after vpp startup, before initial poll")
         hook.poll_vpp()
         try:
             cls.vapi.connect()
         except:
             if cls.debug_gdbserver:
                 print(
                     colorize(
                         "You're running VPP inside gdbserver but "
                         "VPP-API connection failed, did you forget "
                         "to 'continue' VPP from within gdb?", RED))
             raise
     except:
         t, v, tb = sys.exc_info()
         try:
             cls.quit()
         except:
             pass
         raise t, v, tb
Beispiel #49
0
import time
import pandas as pd

from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode

logger = getLogger('dev-debug')
formatter = Formatter\
        ('%(asctime)s [%(levelname)s] [%(filename)s: \
        %(funcName)s: %(lineno)d] %(message)s'                                              )
handlerSh = StreamHandler()
handlerFile = FileHandler('error.log')
handlerSh.setFormatter(formatter)
handlerSh.setLevel(DEBUG)
handlerFile.setFormatter(formatter)
handlerFile.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handlerSh)
logger.addHandler(handlerFile)
logger.debug('log start')

# Yelp Fusion no longer uses OAuth as of December 7, 2017.
# You no longer need to provide Client ID to fetch Data
# It now uses private keys to authenticate requests (API Key)
# You can find it on
# https://www.yelp.com/developers/v3/manage_app
API_KEY = os.environ.get("YELP_API_KEY")

# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
Beispiel #50
0
import os
from logging import FileHandler, Formatter

fileHandler = FileHandler("microservice.log", encoding="utf-8")
fileHandler.setFormatter(
    Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)


class Config:
    SECRET_KEY = os.environ.get("SECRET_KEY") or "top secret"

    SQLALCHEMY_DATABASE_URI = (
        os.environ.get("DATABASE_URI") or "sqlite:///../gooutsafe.db"
    )
    SQLALCHEMY_TRACK_MODIFICATIONS = False

    # https://avatars.dicebear.com/api/avataaars/roma%20molesta.svg
    AVATAR_PROVIDER = "https://avatars.dicebear.com/api/avataaars/{seed}.svg"

    # Services
    URL_API_BOOKING = os.environ.get("URL_API_BOOKING") or "http://localhost:5002/"
    URL_API_RESTAURANT = (
        os.environ.get("URL_API_RESTAURANT") or "http://localhost:5003/"
    )
    URL_API_CELERY = os.environ.get("URL_API_CELERY") or "http://localhost:5004"
    READ_TIMEOUT = os.environ.get("READ_TIMEOUT") or 3.05
    WRITE_TIMEOUT = os.environ.get("WRITE_TIMEOUT") or 9.1

    @staticmethod
    def init_app(app):
Beispiel #51
0
def create_app(config):
    app = Flask(__name__)
    app.config.from_pyfile(config)

    # Automatically tear down SQLAlchemy.
    '''
    @app.teardown_request
    def shutdown_session(exception=None):
    db_session.remove()
    '''

    # Login required decorator.
    '''
    def login_required(test):
    @wraps(test)
    def wrap(*args, **kwargs):
        if 'logged_in' in session:
        return test(*args, **kwargs)
        else:
        flash('You need to login first.')
        return redirect(url_for('login'))
    return wrap
    '''

    #----------------------------------------------------------------------------#
    # Controllers.
    #----------------------------------------------------------------------------#


    @app.route('/')
    def index():
        return render_template('pages/index.html')

    @app.route('/login')
    def login():
        return render_template('pages/login.html')


    @app.route('/register')
    def register():
        form = RegisterForm(request.form)
        return render_template('forms/register.html', form=form)


    @app.route('/forgot')
    def forgot():
        form = ForgotForm(request.form)
        return render_template('forms/forgot.html', form=form)

    # Error handlers.


    @app.errorhandler(500)
    def internal_error(error):
        #db_session.rollback()
        return render_template('errors/500.html'), 500

    @app.errorhandler(404)
    def not_found_error(error):
        return render_template('errors/404.html'), 404

    if not app.debug:
        file_handler = FileHandler('error.log')
        file_handler.setFormatter(
            Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
        )
        app.logger.setLevel(logging.INFO)
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)
        app.logger.info('errors')

    return app
Beispiel #52
0
from flask import (Flask, request, render_template, send_from_directory,
                   url_for, jsonify)
from werkzeug import secure_filename
import os, time, json, glob, sys
from pprint import pprint
from collections import OrderedDict

import plagiarism

basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)

from logging import Formatter, FileHandler
handler = FileHandler(os.path.join(basedir, 'log.txt'), encoding='utf8')
handler.setFormatter(
    Formatter("[%(asctime)s] %(levelname)-8s %(message)s",
              "%Y-%m-%d %H:%M:%S"))
app.logger.addHandler(handler)

app.config['ALLOWED_EXTENSIONS'] = set(
    ['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])


@app.route('/')
def index():
    return render_template('home.html')


@app.route('/fileUpload', methods=['POST'])
def fileUpload():
    if request.method == 'POST':
Beispiel #53
0
 def create_file_handler(fn):
     h = FileHandler(filename=fn)
     h.setFormatter(Formatter(get_console_format()))
     return h
Beispiel #54
0
            artifact_resp = GET(artifact_url, _LONGTIME)
            if artifact_resp.status_code != 200:
                raise IOError('Bad response from CircleCI: HTTP {}'.format(artifact_resp.status_code))
            content = artifact_resp.content
            mimetype = artifact_resp.headers.get('Content-Type', '')
    except IOError as err:
        return make_response(render_template('error-runtime.html', error=err, codes=err_codes), 500)
    
    return Response(content, headers={'Content-Type': mimetype, 'Cache-Control': 'no-store private'})

@app.route('/<path:path>')
@errors_logged
@handle_redirects
def all_other_paths(path):
    '''
    '''
    return u'¯\_(ツ)_/¯'

if environ.get('app-logfile', None):
    handler = FileHandler(environ['app-logfile'])
    handler.setFormatter(Formatter('%(process)05s %(asctime)s %(levelname)06s: %(message)s'))

else:
    handler = StreamHandler()
    handler.setFormatter(Formatter('%(process)05s %(levelname)06s: %(message)s'))

getLogger('precog').addHandler(handler)

if __name__ == '__main__':
    app.run('localhost', debug=True)
Beispiel #55
0
    return render_template('pages/search_shows.html',
                           results=response,
                           search_term=search_term)


@app.errorhandler(404)
def not_found_error(error):
    return render_template('errors/404.html'), 404


@app.errorhandler(500)
def server_error(error):
    return render_template('errors/500.html'), 500


if not app.debug:
    f = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
    file_handler = FileHandler('error.log')
    file_handler.setFormatter(Formatter(f))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('errors')

# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#

if __name__ == '__main__':
    app.run()
Beispiel #56
0
def main():
    parser = argparse.ArgumentParser(description='argparse example.')
    parser.add_argument('input', help="Root directory", metavar='<input>')
    parser.add_argument('output', help="Output directory", metavar='<output>')
    parser.add_argument('--ext',
                        help='File extension. default: %(default)s',
                        metavar='str',
                        default='.mha')
    parser.add_argument(
        '--prefix',
        help='Prefix of the output filename. default: %(default)s',
        metavar='str',
        default='SE')
    parser.add_argument('--compress',
                        help='Compress the output image. default: %(default)s',
                        type=str,
                        choices=['auto', 'true', 'false'],
                        default='auto')
    parser.add_argument('--offset',
                        help='Offset to the number. default: %(default)s',
                        type=int,
                        metavar='int',
                        default=1)
    parser.add_argument('--logdir',
                        help='Directory to store logs. default: %(default)s',
                        metavar='str',
                        default=None)
    parser.add_argument('--verbose',
                        help='Verbosity. default: %(default)s',
                        type=int,
                        metavar='level',
                        default=0)

    args = parser.parse_args()

    logger.setLevel(verbosity_to_level(args.verbose))
    if args.logdir is not None:
        logdir = Path(args.logdir)
        logdir.mkdir(parents=True, exist_ok=True)
        handler = FileHandler(
            logdir /
            '{}.log'.format(datetime.today().strftime("%y%m%d_%H%M%S")))
        handler.setLevel(verbosity_to_level(args.verbose))
        handler.setFormatter(log_format)
        logger.addHandler(handler)

    root_dir = Path(args.input)
    out_dir = Path(args.output)

    compression = {'auto': None, 'true': True, 'false': False}[args.compress]
    dtype = None
    prefix = args.prefix
    ext = args.ext
    offset = args.offset

    logger.info('Collect dicom information')
    all_files = [
        str(e) for e in tqdm.tqdm(root_dir.glob('**/*'), desc='list all files')
        if e.is_file()
    ]

    key_tags = [
        'PatientID', 'SeriesInstanceUID', 'AcquisitionDate', 'AcquisitionTime',
        'ImageOrientationPatient', 'ImagePositionPatient'
    ]
    dcm_files = []
    for fn in tqdm.tqdm(all_files):
        try:
            dcm = pydicom.dcmread(fn, stop_before_pixels=True)
            dcm_files.append([fn] + [dcm.get(tag) for tag in key_tags])
        except Exception as e:
            logger.warning({'filename': fn, 'exception': e})

    df = pd.DataFrame(dcm_files, columns=['filepath'] + key_tags)

    logger.info('Convert dicom files')

    def sort_dicom(df):
        orientation = np.array(df['ImageOrientationPatient'].iloc[0]).reshape(
            (2, 3))
        third_axis = np.cross(orientation[0], orientation[1])
        locs = df['ImagePositionPatient'].map(lambda p: np.dot(third_axis, p))
        sorted_index = np.argsort(locs)
        return df.iloc[sorted_index]

    FLOAT_TYPES = set([
        sitk.sitkFloat32, sitk.sitkFloat64, sitk.sitkVectorFloat32,
        sitk.sitkVectorFloat64
    ])

    for patient_id, df_patient in df.groupby('PatientID'):
        logger.info(patient_id)
        sids, times = [], []
        for series_id, df_series in df_patient.groupby('SeriesInstanceUID'):
            sids.append(series_id)
            dts = df_series.apply(
                lambda row: DT(row.AcquisitionDate + row.AcquisitionTime),
                axis=1).tolist()
            if len(df_series) <= 2:
                times.append(dts[0])
            else:
                dts.sort()
                times.append(dts[len(dts) // 2])
        nums = np.argsort(np.argsort(times))
        series_id2series_number = dict(zip(sids, nums))

        for series_id, df_series in df_patient.groupby('SeriesInstanceUID'):
            logger.debug(series_id)
            output_filename = out_dir / patient_id / (prefix + '{:d}'.format(
                series_id2series_number[series_id] + offset) + ext)
            output_filename.parent.mkdir(parents=True, exist_ok=True)
            filenames = sort_dicom(df_series)['filepath'].tolist()
            reader = sitk.ImageSeriesReader()
            reader.SetFileNames(filenames)
            image = reader.Execute()
            if image.GetPixelID() == sitk.sitkFloat64 and dtype is None:
                f = sitk.CastImageFilter()
                f.SetOutputPixelType(sitk.sitkFloat32)
                image = f.Execute(image)
            writer = sitk.ImageFileWriter()
            if compression is None:
                compression = image.GetPixelID() not in FLOAT_TYPES
            writer.SetUseCompression(compression)
            writer.SetFileName(str(output_filename))
            writer.Execute(image)

    logger.info('End')
import logging
from logging import FileHandler
from logging import Formatter

LOG_LEVEL = "INFO"
LOG_FORMAT = ("%(asctime)s [%(levelname)s]: %(message)s")

# ====================== CLUSTERING ALGO LOGGER ======================
# Log with the clustering algo specific information
CLUSTERING_LOG_FILE = useful_paths.FILE_LOG_CLUSTERING
clustering_logger = logging.getLogger("thesis.clusteringlog")

clustering_logger.setLevel(LOG_LEVEL)
clustering_file_handler = FileHandler(CLUSTERING_LOG_FILE)
clustering_file_handler.setLevel(LOG_LEVEL)
clustering_file_handler.setFormatter(Formatter(LOG_FORMAT))
clustering_logger.addHandler(clustering_file_handler)
clustering_logger.propagate = False


# ====================== COORDINATOR ALGO LOGGER ======================
# Log with the clustering algo specific information
COORDINATOR_LOG_FILE = useful_paths.FILE_LOG_COORDINATOR
coordinator_logger = logging.getLogger("thesis.coordinatorlog")

coordinator_logger.setLevel(LOG_LEVEL)
coordinator_file_handler = FileHandler(COORDINATOR_LOG_FILE)
coordinator_file_handler.setLevel(LOG_LEVEL)
coordinator_file_handler.setFormatter(Formatter(LOG_FORMAT))
coordinator_logger.addHandler(coordinator_file_handler)
coordinator_logger.propagate = False
Beispiel #58
0
import logging
from logging import Formatter, FileHandler, StreamHandler
import os
from os import path

from flask import Flask

app_path = os.environ['APP_PATH']
app = Flask(__name__,
            static_folder=path.join(app_path, 'static'),
            template_folder=path.join(app_path, 'templates'))

app.config.from_pyfile(path.join(app_path, 'config.defaults'))
app.config.from_pyfile(path.join(app_path, 'config.local'), silent=True)

app.secret_key = app.config['SESSION_KEY']

formatter = Formatter('[%(asctime)s] %(levelname)s: %(message)s')
app.logger.setLevel(logging.DEBUG)
if app.config['LOG_FILE']:
    filelog = FileHandler(app.config['LOG_FILE'])
    filelog.setLevel(logging.WARNING)
    filelog.setFormatter(formatter)
    app.logger.addHandler(filelog)
if app.config['LOG_CONSOLE']:
    console = StreamHandler()
    console.setLevel(logging.DEBUG)
    console.setFormatter(formatter)
    app.logger.addHandler(console)
Beispiel #59
0
    def __init__(self, args):
        # Meh...
        working_dir = args.project_dir
        project_name = args.service
        threads = args.threads
        # /Meh...

        self.args = args
        self.name = project_name
        self.threads = threads
        self.working_dir = os.path.join(working_dir, self.name)
        self.acquisition_dir = os.path.join(self.working_dir, "acquisition")

        if os.path.exists(self.working_dir):
            IO.put("Resuming project in " + self.working_dir, "highlight")
        else:
            os.makedirs(self.working_dir, exist_ok=True)
            IO.put("Initializing project in " + self.working_dir, "highlight")

        self.project_folders["data"] = os.path.join(self.acquisition_dir,
                                                    "data")
        self.project_folders["logs"] = os.path.join(self.working_dir, "logs")
        self.project_folders["metadata"] = os.path.join(
            self.acquisition_dir, "metadata")
        #self.project_folders["trash"] = os.path.join(self.acquisition_dir, "trash")
        #self.project_folders["trash_metadata"] = os.path.join(self.acquisition_dir, "trash_metadata")

        self.config_file = os.path.join(self.working_dir, "config.cfg")

        for f in self.project_folders:
            IO.put("{} path is {}".format(f, self.project_folders[f]))
            if not os.path.exists(self.project_folders[f]):
                IO.put("{} directory not found, creating from scratch.",
                       "warn")
                os.makedirs(self.project_folders[f], exist_ok=True)

        IO.put("Config file is " + self.config_file)

        if not os.path.isfile(self.config_file):
            IO.put("Config file not found, creating default config file",
                   "warn")
            with open(self.config_file, 'w') as f:
                f.write(DefaultConfigs.defaults)

        self.config = ConfigLoader.ConfigLoader()
        self.config.from_file(self.config_file)

        self.transaction_log = os.path.join(self.project_folders["logs"],
                                            "transaction.log")
        self.exception_log = os.path.join(self.project_folders["logs"],
                                          "exception.log")

        self.transaction_logger = logging.getLogger(project_name + "_t")
        self.exception_logger = logging.getLogger(project_name + "_e")

        self.transaction_logger.setLevel(20)
        self.exception_logger.setLevel(20)

        tfh = FileHandler(self.transaction_log)
        efh = FileHandler(self.exception_log)

        fmt = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        fmt.converter = time.gmtime
        tfh.setFormatter(fmt)
        efh.setFormatter(fmt)

        self.transaction_logger.addHandler(tfh)
        self.exception_logger.addHandler(efh)
Beispiel #60
0
"""Set up logging for Yeti."""

import logging
import os

from logging import FileHandler
from logging import Formatter
from core.config.config import yeti_config

LOG_FORMAT = (
    "%(asctime)s [%(levelname)s]: %(message)s")
LOG_LEVEL = logging.INFO

# user logger
USER_LOG_FILE = yeti_config.get('logging', 'filename')
# Fall back to tmp if the logging directory does not exist
if not os.path.isdir(os.path.dirname(USER_LOG_FILE)):
    USER_LOG_FILE = '/tmp/yeti.log'


userLogger = logging.getLogger("userLogger.messaging")
userLogger.setLevel(LOG_LEVEL)
userLogger_file_handler = FileHandler(USER_LOG_FILE)
userLogger_file_handler.setLevel(LOG_LEVEL)
userLogger_file_handler.setFormatter(Formatter(LOG_FORMAT))
userLogger.addHandler(userLogger_file_handler)