Beispiel #1
0
def setup_logfile(filename, level=DEBUG, maxlevel=None):
    if filename in logfiles:
        return
    logfiles.add(filename)

    logger = logging.getLogger()

    handler = FileHandler(filename, encoding='utf-8')
    add_common_filters(handler)
    handler.setLevel(level)
    fmt = LogFormatter(
        fmt="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
        output_markers=(START_MARKER, END_MARKER))
    handler.setFormatter(fmt)

    if maxlevel:
        filt = MaxFilter(maxlevel)
        handler.addFilter(filt)

    logger.addHandler(handler)
    logger.setLevel(min(logger.level, level))

    if cache_handler is not None:
        cache_handler.replay(handler)

    return handler
Beispiel #2
0
def addHandler(handler=None, stream=None, filename=None, filemode='a',
               format=None, datefmt=None, level=None, max_level=None,
               filters=(), logger=None):
    """stream, filename, filemode, format, datefmt: as per logging.basicConfig

       handler: use a precreated handler instead of creating a new one
       logging: logging to add the handler to (uses root logging if none specified)
       filters: an iterable of filters to add to the handler
       level: only messages of this level and above will be processed
       max_level: only messages of this level and below will be processed
    """
    # Create the handler if one hasn't been passed in
    if handler is None:
        if filename is not None:
            handler = FileHandler(filename, filemode)
        else:
            handler = StreamHandler(stream)
    # Set up the formatting of the log messages
    # New API, so it can default to str.format instead of %-formatting
    formatter = Formatter(format, datefmt)
    handler.setFormatter(formatter)
    # Set up filtering of which messages to handle
    if level is not None:
        handler.setLevel(level)
    if max_level is not None:
        handler.addFilter(LowPassFilter(max_level))
    for filter in filters:
        handler.addFilter(filter)
    # Add the fully configured handler to the specified logging
    if logger is None:
        logger = getLogger()
    logger.addHandler(handler)
    return handler
Beispiel #3
0
def get_app(static=None, testing=False):
    """
    factory for app
    """
    if static:
        #static var
        return static
    
    app =  Flask(__name__)
    init_app(app)
    static = app
    
    #debug mode static file serving
    if app.config['DEBUG']:
        from werkzeug.wsgi import SharedDataMiddleware
        import os
        app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
          '/': os.path.join(os.path.dirname(__file__), 'static')
        })
        
    #email logger
    if not app.debug:
        import logging
        from logging import FileHandler
        file_handler = FileHandler(LOG_FILE)
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)
    
    if not testing:
        @app.teardown_request
        def shutdown_session(exception=None):
            from db import db
            db.session.remove()
    
    return app
Beispiel #4
0
def create_app(config_name):
    application = Flask(__name__)
    cfg = config[config_name]
    application.config.from_object(cfg)

    handler = FileHandler('errors.log')
    handler.setLevel(logging.DEBUG)
    handler.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]'
    ))

    application.logger.addHandler(handler)
    application.logger.info('logging set up')

    initialize_blueprints(application)
    application.logger.info('blueprints initialized')

    initialize_api_resources(application)
    application.logger.info('api resources initialized')

    db.init_app(application)
    application.logger.info('db initialized')

    admin.init_app(application)
    admin.name = 'GreenDeploy'
    admin.template_mode = 'bootstrap3'
    admin.add_view(ModelView(Project, db.session))
    admin.add_view(ModelView(DriveDestination, db.session))
    admin.add_view(ModelView(BuildHistory, db.session))

    application.logger.info('admin initialized')

    return application
Beispiel #5
0
    def register_local_log(self, path, level=None, purge_buffer=True):
        """The shinken logging wrapper can write to a local file if needed
        and return the file descriptor so we can avoid to
        close it.

        Add logging to a local log-file.

        The file will be rotated once a day
        """
        self.log_set = True
        # Todo : Create a config var for backup count
        if os.path.exists(path) and not stat.S_ISREG(os.stat(path).st_mode):
            # We don't have a regular file here. Rotate may fail
            # It can be one of the stat.S_IS* (FIFO? CHR?)
            handler = FileHandler(path)
        else:
            handler = TimedRotatingFileHandler(path, 'midnight', backupCount=5)
        if level is not None:
            handler.setLevel(level)
        if self.name is not None:
            handler.setFormatter(defaultFormatter_named)
        else:
            handler.setFormatter(defaultFormatter)
        self.addHandler(handler)

        # Ok now unstack all previous logs
        if purge_buffer:
            self._destack()

        # Todo : Do we need this now we use logging?
        return handler.stream.fileno()
def __setup_logging(app):
    log_file_path = ""
    log_dir_path = ""
    log_level = app.config.get('LOG_LEVEL', logging.INFO)
    
    if os.path.isabs(app.config['LOG_DIR']):
        log_dir_path = app.config['LOG_DIR']
        log_file_path = log_dir_path + app.config['LOG_FILE']
        
    else:
        here = os.path.dirname(os.path.abspath(__file__))
        log_dir_path = os.path.join(
            os.path.dirname(here), app.config['LOG_DIR'])
        log_file_path = log_dir_path + app.config['LOG_FILE']
    
    if not os.path.isdir(log_dir_path):
        os.makedirs(log_dir_path, mode=app.config['LOG_FILE_MODE'])    
    
    if not os.path.isfile(log_file_path):
        open(log_file_path, 'a').close()
    
    log_file_handler = FileHandler(filename=log_file_path, encoding='utf-8')
    log_file_handler.setLevel(log_level)
    log_file_handler.setFormatter(Formatter(
        '[%(asctime)s] [%(levelname)s] %(message)s %(module)s:%(funcName)s:%(lineno)d'
    ))
    
    app.logger.addHandler(log_file_handler)
    app.logger.setLevel(log_level)
Beispiel #7
0
def add_disk_handler(prefix, level=logging.NOTSET):
    """
    Enable typical logging to disk.
    """

    # generate an unused log file path
    from os.path import lexists
    from itertools import count

    for i in count():
        path = "%s.%i" % (prefix, i)

        if not lexists(path):
            break

    # build a handler
    from cargo.temporal import utc_now

    handler = FileHandler(path, encoding="utf-8")

    handler.setFormatter(VerboseFileFormatter())
    handler.setLevel(level)

    # add it
    logging.root.addHandler(handler)

    log.debug("added log handler for file %s at %s", path, utc_now())

    return handler
Beispiel #8
0
class CaptureLog:
    """Context to capture log from a specific logger and write it to a file

    Parameters
    ----------
    filename : str
        Where to write the log file.
    mode : str
        Mode for opening the log file (default 'w').
    name : str
        Name of the logger from which to capture (default 'mne').
    """
    def __init__(self, filename, mode='w', logger='mne', level='debug'):
        self.logger = logger
        self.level = log_level(level)
        self.handler = FileHandler(filename, mode)
        self.handler.setLevel(self.level)
        self.handler.setFormatter(Formatter("%(levelname)-8s :%(message)s"))
        self._old_level = None

    def __enter__(self):
        logger = getLogger(self.logger)
        logger.addHandler(self.handler)
        if logger.level == 0 or logger.level > self.level:
            self._old_level = logger.level
            logger.setLevel(self.level)
        return logger

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.handler.close()
        logger = getLogger(self.logger)
        logger.removeHandler(self.handler)
        if self._old_level is not None:
            logger.setLevel(self._old_level)
Beispiel #9
0
def setup_logger(app_name):
    """ Instantiate a logger object

        Usage:
            logger = setup_logger('foo')     # saved as foo.log
            logger.info("Some info message")
            logger.warn("Some warning message")
            logger.error("Some error message")
            ... [for more options see: http://docs.python.org/2/library/logging.html]
    """
    logger = getLogger(app_name)
    logger.setLevel(DEBUG)
    # create file handler which logs even debug messages
    fh = FileHandler(app_name + '.log')
    fh.setLevel(DEBUG)
    # create console handler with a higher log level
    ch = StreamHandler()
    ch.setLevel(ERROR)
    # create formatter and add it to the handlers
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger
Beispiel #10
0
def create_app(config_name):
    global app
    app.config.from_object(config[config_name])

    # Setup database
    # Currently inits mongoDB
    init_db(app)

    # Todo make intializing blueprints consistent
    app.register_blueprint(bp_index)
    app.register_blueprint(bp_auth)
    app.register_blueprint(bp_timer)
    app.register_blueprint(v1_api)
    init_api(app)

    file_handler = FileHandler("flask.log")
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)

    config[config_name].init_app(app)
    #
    init_flask_security(app)

    mail = Mail(app)

    return app
    def start(self):
        from logging import getLogger, FileHandler, Formatter, DEBUG
        self.log = getLogger(self.name)
        self.log.setLevel(DEBUG)

        formatter = Formatter(
            '%(asctime)s [%(process)d:%(threadName)s] %(levelname)-8s %(name)s:  %(message)s')
        # Unique log handler (single file)
        handler = FileHandler(self.uniquefile, "w")
        handler.setLevel(DEBUG)
        handler.setFormatter(formatter)
        self.log.addHandler(handler)

        # If you suspect that the diff stuff isn't working, un comment the next
        # line.  You should see this show up once per-process.
        # self.log.info("Here is a line that should only be in the first output.")

        # Setup output used for testing
        handler = self.getLogHandler(self.sharedfile)
        handler.setLevel(DEBUG)
        handler.setFormatter(formatter)
        self.log.addHandler(handler)

        # If this ever becomes a real "Thread", then remove this line:
        self.run()
Beispiel #12
0
 def create_app(self):
     self.twill = Twill(app)        
     file_handler = FileHandler('loggingFile')
     file_handler.setLevel(logging.DEBUG)
     app.logger.addHandler(file_handler)
     app.logger.debug('WTFCAKES')
     return app
Beispiel #13
0
class Logging(logging.FileHandler):

    @classmethod
    def __init__(self,user_connect_=None):

        self.user_connect = user_connect_
        self.application = app\


    @classmethod
    def create_login_info(self,user,time,url):
        self.logging = FileHandler('icollect_info.log')
        self.logging.setLevel(logging.DEBUG)
        self.logging.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
        self.application.logger.addHandler(self.logging)
        create_dict_to_loggin_info = dict({'user_connect':user,'time':time,'url':url})
        self.application.logger.info('Info LogIn' + ":" + str(create_dict_to_loggin_info))

    @classmethod
    def create_logout_info(self,user,time):
        self.logging = FileHandler('icollect_info.log')
        self.logging.setLevel(logging.DEBUG)
        self.logging.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
        self.application.logger.addHandler(self.logging)
        create_dict_to_loggin_info = dict({'user_connect':user,'time':time})
        self.application.logger.info('Info Logout' + ":" + str(create_dict_to_loggin_info))
Beispiel #14
0
def start_logging():
    global logfocus, file_handler
    from lmfdb.utils.config import Configuration
    config = Configuration()
    logging_options = config.get_logging()

    file_handler = FileHandler(logging_options['logfile'])
    file_handler.setLevel(WARNING)

    if 'logfocus' in logging_options:
        logfocus = logging_options['logfocus']
        getLogger(logfocus).setLevel(DEBUG)

    root_logger = getLogger()
    root_logger.setLevel(INFO)
    root_logger.name = "LMFDB"

    formatter = Formatter(LmfdbFormatter.fmtString.split(r'[')[0])
    ch = StreamHandler()
    ch.setFormatter(formatter)
    root_logger.addHandler(ch)

    cfg = config.get_all()
    if "postgresql_options" and "password" in cfg["postgresql_options"]:
        cfg["postgresql_options"]["password"] = "******"
    info("Configuration = {}".format(cfg) )
    check_sage_version()
Beispiel #15
0
def root_doc(args, l, rc):

    from ambry.ui import app, configure_application, setup_logging
    import ambry.ui.views as views
    import os

    import logging
    from logging import FileHandler
    import webbrowser

    port = args.port if args.port else 8085

    cache_dir = l._doc_cache.path('', missing_ok=True)

    config = configure_application(dict(port=port))

    file_handler = FileHandler(os.path.join(cache_dir, "web.log"))
    file_handler.setLevel(logging.WARNING)
    app.logger.addHandler(file_handler)

    print 'Serving documentation for cache: ', cache_dir

    if not args.debug:
        # Don't open the browser on debugging, or it will re-open on every
        # application reload
        webbrowser.open("http://localhost:{}/".format(port))

    app.run(host=config['host'], port=int(port), debug=args.debug)
Beispiel #16
0
def create_app(config_name):
    app.config.from_object(config[config_name])
    db.init_app(app)
    login_manager.init_app(app)
    login_manager.session_protection = 'strong'
    login_manager.login_view = 'admin.login'

    if not app.debug:
        import logging
        from logging import FileHandler, Formatter

        file_handler = FileHandler(Constant.LOG_DIR, encoding='utf8')
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(Formatter(
            '[%(asctime)s] %(levelname)s: %(message)s '
            '[in %(pathname)s:%(lineno)d]'))
        app.logger.addHandler(file_handler)

    from main import main as main_blueprint
    app.register_blueprint(main_blueprint)

    from admin import admin as admin_blueprint
    app.register_blueprint(admin_blueprint, url_prefix='/admin')

    patch_request_class(app, size=16*1024*1024) # 16MB
    configure_uploads(app, resource_uploader)

    return app
Beispiel #17
0
def init_logging():
    # Disable default stderr handler
    root = getLogger().addHandler(logging.NullHandler())

    # Get the loggers used in pytelemetry.telemetry.telemetry file
    rx = getLogger("telemetry.rx")
    tx = getLogger("telemetry.tx")
    rx.setLevel(logging.DEBUG)
    tx.setLevel(logging.DEBUG)

    # Format how data will be .. formatted
    formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')

    # Create a handler to save logging output to a file
    dateTag = datetime.datetime.now().strftime("%Y-%b-%d_%H-%M-%S")
    in_handler = FileHandler('in-%s.log' % dateTag)
    in_handler.setLevel(logging.DEBUG) # Also pass all messages
    in_handler.setFormatter(formatter)

    out_handler = FileHandler('out-%s.log' % dateTag)
    out_handler.setLevel(logging.DEBUG) # Also pass all messages
    out_handler.setFormatter(formatter)

    # Attach the logger to the handler
    rx.addHandler(in_handler)
    tx.addHandler(out_handler)
Beispiel #18
0
def init_app_logger(app):
    file_handler = FileHandler('flask.log')
    file_handler.setFormatter(Formatter(
        '%(asctime)s|%(levelname)s|%(pathname)s:%(lineno)d|%(message)s'
    ))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
Beispiel #19
0
def configure_logging(app, filename):
    """Configure app to log to that file."""
    handler = FileHandler(filename, encoding='utf-8')
    handler.setFormatter(Formatter('%(asctime)s %(funcName)s %(message)s'))
    handler.setLevel(logging.INFO)

    app.logger.addHandler(handler)
Beispiel #20
0
    def actionWork(self, *args, **kwargs):
        """Performing the set of actions"""
        nextinput = args

        #set the logger to save the tasklog
        formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(module)s:%(message)s")
        taskdirname = "logs/tasks/%s/" % self._task['tm_username']
        if not os.path.isdir(taskdirname):
            os.mkdir(taskdirname)
        taskhandler = FileHandler(taskdirname + self._task['tm_taskname'] + '.log')
        taskhandler.setLevel(logging.DEBUG)
        self.logger.addHandler(taskhandler)

        for work in self.getWorks():
            self.logger.debug("Starting %s on %s" % (str(work), self._task['tm_taskname']))
            t0 = time.time()
            try:
                output = work.execute(nextinput, task=self._task)
            except StopHandler, sh:
                msg = "Controlled stop of handler for %s on %s " % (self._task, str(sh))
                self.logger.error(msg)
                nextinput = Result(task=self._task, result='StopHandler exception received, controlled stop')
                break #exit normally. Worker will not notice there was an error
            except TaskWorkerException, twe:
                self.logger.debug(str(traceback.format_exc())) #print the stacktrace only in debug mode
                raise WorkerHandlerException(str(twe)) #TaskWorker error, do not add traceback to the error propagated to the REST
Beispiel #21
0
def configure_loggers(log, verbosity, log_file, log_verbosity):
    LOGFMT_CONSOLE = (
        "[%(asctime)s] %(name)-10s %(levelname)-7s in %(module)s.%(funcName)s()," " line %(lineno)d\n\t%(message)s"
    )

    LOGFMT_FILE = (
        "[%(asctime)s] [%(process)d]%(name)-10s %(levelname)-7s in %(module)s.%(funcName)s(),"
        " line %(lineno)d\n\t%(message)s"
    )

    # Configure root logger to log to stdout
    logging.basicConfig(level=verbosity, datefmt="%H:%M:%S", format=LOGFMT_CONSOLE)

    # Configure main logger to rotate log files
    rh = RotatingFileHandler(log_file, maxBytes=100000, backupCount=25)
    log.addHandler(rh)

    # Configure main logger to log to a file
    if log_file:
        fh = FileHandler(log_file, "w")
        fh.setFormatter(Formatter(LOGFMT_FILE, "%Y-%m-%d %H:%M:%S"))
        fh.setLevel(log_verbosity)
        log.addHandler(fh)

    return log
Beispiel #22
0
def setup_logging():
    log_formatter = Formatter(
        '''{"message_type":"%(levelname)s","location":"%(pathname)s","line_number":%(lineno)d,"module":"%(module)s","function":"%(funcName)s","time":"%(asctime)s","message":"%(message)s"}''')  # pylint: disable=C0301
    fh = FileHandler('flask_logs.log')
    fh.setLevel(INFO)
    fh.setFormatter(log_formatter)
    application.logger.addHandler(fh)
    application.logger.setLevel(INFO)
    if not application.debug:
        from application.models import Administrator
        dbadmins = Administrator.query.all()
        if dbadmins is not None:
            emails = [dbadmin.email for dbadmin in dbadmins]
            emailErrorHandler = TlsSMTPHandler(
                ('smtp.gmail.com', 587),
                '*****@*****.**',
                emails,
                'Server Error',
                ('*****@*****.**', 'ANRISNTPTV')
            )
            emailErrorHandler.setFormatter(Formatter(
                '''
    Message type:       %(levelname)s
    Location:           %(pathname)s:%(lineno)d
    Module:             %(module)s
    Function:           %(funcName)s
    Time:               %(asctime)s

    Message:

    %(message)s
    '''))
            application.logger.addHandler(emailErrorHandler)
Beispiel #23
0
def init_app(app, name=''):
    """
    Configures the provided app's logger.

    :param app: the application object to configure the logger
    :param name: the name of the logger to create and configure
    """

    # flask app object automatically registers its own debug logger if
    # app.debug is True. Remove it becuase debug logging is handled here
    # instead.
    del app.logger.handlers[:]

    log_path = app.config['LOG_PATH']
    log_level = app.config['LOG_LEVEL'] or ''
    log_filter = app.config['LOG_FILTER']
    log_ignore = app.config['LOG_IGNORE']

    handler = FileHandler(log_path) if log_path else StreamHandler()
    handler.setLevel(log_level.upper() or ('DEBUG' if app.debug else 'WARNING'))  # noqa
    handler.addFilter(MultiNameFilter(log_filter, log_ignore))
    handler.setFormatter(Formatter(
        '%(asctime)s %(process)s %(thread)-15s %(name)-10s %(levelname)-8s %(message)s',  # noqa
        '%H:%M:%S' if app.debug else '%Y-%m-%d %H:%M:%S%z'))

    logger = getLogger(name)
    logger.setLevel(handler.level)
    logger.addHandler(handler)
Beispiel #24
0
def log_server(level, queue, filename, mode='w'):
    """Run the logging server.

    This listens to the queue of log messages, and handles them using Python's
    logging handlers.  It prints to stderr, as well as to a specified file, if
    it is given.

    """
    formatter = _get_formatter()
    handlers = []

    sh = StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(level)
    handlers.append(sh)

    if filename:
        fh = FileHandler(filename, mode)
        fh.setFormatter(formatter)
        fh.setLevel(level)
        handlers.append(fh)

    listener = QueueListener(queue, *handlers)
    listener.start()

    # For some reason, queuelisteners run on a separate thread, so now we just
    # "busy wait" until terminated.
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        pass
    finally:
        listener.stop()
Beispiel #25
0
def get_logger(log_name = 'default.log', log_level = DEBUG):
    formatter = Formatter('[%(asctime)s][%(process)d][%(filename)s:%(lineno)s][%(levelname)s]: %(message)s')
    logger = getLogger('%s.%s' % (log_name.replace('/', '.'),
                                                time.time()))
    logger.handlers = []
    logger.setLevel(DEBUG)
    logger.propagate = False

    console = StreamHandler()
    console.setFormatter(formatter)
    console.setLevel(WARN)
    logger.addHandler(console)

    logfile = os.path.dirname(os.path.abspath(sys.argv[0]))
    logfile = os.path.join(logfile, log_name)
    logfiledebug = FileHandler(filename = logfile, mode='a')
    logfiledebug.setFormatter(formatter)
    logfiledebug.setLevel(log_level)
    logger.addHandler(logfiledebug)

    def _logger_die(logger, msg):
        logger.error(msg)
        raise AssertionError(msg)

    logger.die = lambda msg: _logger_die(logger, msg)

    return logger
Beispiel #26
0
        def setHandler(logger, lvl, path, _format):
            """
            Set right handler related to input lvl, path and format.

            :param Logger logger: logger on which add an handler.
            :param str lvl: logging level.
            :param str path: file path.
            :param str _format: logging message format.
            """

            class _Filter(Filter):
                """Ensure message will be given for specific lvl"""

                def filter(self, record):
                    return record.levelname == lvl

            # get the rights formatter and filter to set on a file handler
            handler = FileHandler(path)
            handler.addFilter(_Filter())
            handler.setLevel(lvl)
            formatter = Formatter(_format)
            handler.setFormatter(formatter)

            # if an old handler exist, remove it from logger
            if hasattr(logger, lvl):
                old_handler = getattr(logger, lvl)
                logger.removeHandler(old_handler)

            logger.addHandler(handler)
            setattr(logger, lvl, handler)
Beispiel #27
0
def configureOutput(logger_p, path='./Log.txt'):
    """
    configures the logging
    """
    # if no handlers have been configured...
    if 0 == len(logger_p.handlers): 
        
        # let everything pass by default
        logger_p.setLevel(logging.DEBUG)

        # this will make the project logger write all messages in a file
        file_handler = FileHandler(path, mode = 'w')
        file_handler.setFormatter(Formatter('%(levelname)-8s : %(message)s'))
        
        # accept even the most trivial shit
        file_handler.setLevel(logging.DEBUG)
        logger_p.addHandler(file_handler)
        
        # this will make the logger write INFO messages to the console 
        console_handler = StreamHandler()
        console_handler.setFormatter(Formatter('%(message)s'))
        
        #ignore debug messages, only Info, warnings, errors and criticals
        console_handler.setLevel(logging.INFO)
    
        logger_p.addHandler(console_handler)
    else:
        pass
Beispiel #28
0
def _setup_task_logger(logger):
    """Configure a task logger to generate site- and task-specific logs."""
    if logger.handlers:  # Already processed
        return

    parts = logger.name.split(".")
    if len(parts) < 4:  # Malformed
        return
    site = parts[2]
    task = parts[3]

    _ensure_dirs(os.path.join(_log_dir, site))

    formatter = Formatter(
        fmt="[%(asctime)s %(levelname)-7s] %(message)s",
        datefmt=_DATE_FORMAT)

    infohandler = TimedRotatingFileHandler(
        os.path.join(_log_dir, site, task + ".log"), "midnight", 1, 30)
    infohandler.setLevel("INFO")

    debughandler = FileHandler(
        os.path.join(_log_dir, site, task + ".log.verbose"), "w")
    debughandler.setLevel("DEBUG")

    errorhandler = RotatingFileHandler(
        os.path.join(_log_dir, site, task + ".err"), maxBytes=1024**2,
        backupCount=4)
    errorhandler.setLevel("WARNING")

    for handler in [infohandler, debughandler, errorhandler]:
        handler.setFormatter(formatter)
        logger.addHandler(handler)
Beispiel #29
0
    def actionWork(self, *args, **kwargs):
        """Performing the set of actions"""
        nextinput = args

        #set the logger to save the tasklog
        formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(module)s:%(message)s")
        taskdirname = "logs/tasks/%s/" % self._task['tm_username']
        if not os.path.isdir(taskdirname):
            os.mkdir(taskdirname)
        taskhandler = FileHandler(taskdirname + self._task['tm_taskname'] + '.log')
        taskhandler.setLevel(logging.DEBUG)
        self.logger.addHandler(taskhandler)

        for work in self.getWorks():
            self.logger.debug("Starting %s on %s" % (str(work), self._task['tm_taskname']))
            t0 = time.time()
            try:
                output = work.execute(nextinput, task=self._task)
            except StopHandler as sh:
                msg = "Controlled stop of handler for %s on %s " % (self._task, str(sh))
                self.logger.error(msg)
                nextinput = Result(task=self._task, result='StopHandler exception received, controlled stop')
                break #exit normally. Worker will not notice there was an error
            except TaskWorkerException as twe:
                self.logger.debug(str(traceback.format_exc())) #print the stacktrace only in debug mode
                self.removeTaskLogHandler(taskhandler)
                raise WorkerHandlerException(str(twe)) #TaskWorker error, do not add traceback to the error propagated to the REST
            except Exception as exc:
                msg = "Problem handling %s because of %s failure, traceback follows\n" % (self._task['tm_taskname'], str(exc))
                msg += str(traceback.format_exc())
                self.logger.error(msg)
                self.removeTaskLogHandler(taskhandler)
                raise WorkerHandlerException(msg) #Errors not foreseen. Print everything!
            finally:
                #upload logfile of the task to the crabcache
                logpath = 'logs/tasks/%s/%s.log' % (self._task['tm_username'], self._task['tm_taskname'])
                if os.path.isfile(logpath) and 'user_proxy' in self._task: #the user proxy might not be there if myproxy retrieval failed
                    cacheurldict = {'endpoint': self._task['tm_cache_url'], 'cert' : self._task['user_proxy'], 'key' : self._task['user_proxy']}
                    try:
                        ufc = UserFileCache(cacheurldict)
                        logfilename = self._task['tm_taskname'] + '_TaskWorker.log'
                        ufc.uploadLog(logpath, logfilename)
                    except HTTPException as hte:
                        msg = ("Failed to upload the logfile to %s for task %s. More details in the http headers and body:\n%s\n%s" %
                               (self._task['tm_cache_url'], self._task['tm_taskname'], hte.headers, hte.result))
                        self.logger.error(msg)
                    except Exception as e:
                        msg = "Unknown error while uploading the logfile for task %s" % self._task['tm_taskname']
                        self.logger.exception(msg)
            t1 = time.time()
            self.logger.info("Finished %s on %s in %d seconds" % (str(work), self._task['tm_taskname'], t1-t0))
            try:
                nextinput = output.result
            except AttributeError:
                nextinput = output

        self.removeTaskLogHandler(taskhandler)

        return nextinput
Beispiel #30
0
        endTime = unix_time_millis(datetime.datetime.now())
        totalTime = endTime - startTime
        response_data['vthResponse']['testDuration'] = totalTime
        response_data['vthResponse']['abstractMessage'] = 'error: ' + str(ex)
        app.logger.error('ERROR:{}'.format(str(ex)))
        return jsonify(response_data)

    #finish up building response
    endTime = unix_time_millis(datetime.datetime.now())
    totalTime = endTime - startTime
    response_data['vthResponse']['testDuration'] = totalTime
    if ret_url is not None:
        sendCallback(ret_url, response_data)
        return '', 200
    return jsonify(response_data), 200


@app.route("/otf/vth/oran/smo/v1/health", methods=['GET'])
def getHealth():
    return 'UP'


if __name__ == '__main__':
    logHandler = FileHandler('smo-o1-vth.log', mode='a')
    logHandler.setLevel(logging.INFO)
    app.logger.setLevel(logging.INFO)
    app.logger.addHandler(logHandler)
    # context = ('opt/cert/otf.pem', 'opt/cert/privateKey.pem')
    # app.run(debug = False, host = '0.0.0.0', port = 5000, ssl_context = context)
    app.run(debug=False, host='0.0.0.0', port=5000)
Beispiel #31
0
def note_and_log(cls):
    """
    This will be used as a decorator on class to activate
    logging and store messages in the variable cls._notes
    This will allow quick access to events in the web app.
    A note can be added to cls._notes without logging if passing
    the argument log=false to function note()
    Something can be logged without addind a note using function log()
    """
    if hasattr(cls, "DEBUG_LEVEL"):
        if cls.DEBUG_LEVEL == "debug":
            file_level = logging.DEBUG
            console_level = logging.DEBUG
        elif cls.DEBUG_LEVEL == "info":
            file_level = logging.INFO
            console_level = logging.INFO
    else:
        file_level = logging.WARNING
        console_level = logging.INFO
    # Notes object
    cls._notes = namedtuple("_notes", ["timestamp", "notes"])
    cls._notes.timestamp = []
    cls._notes.notes = []

    # Defining log object
    cls.logname = "{} | {}".format(cls.__module__, cls.__name__)
    cls._log = logging.getLogger("BAC0_Root.{}.{}".format(
        cls.__module__, cls.__name__))

    # Set level to debug so filter is done by handler
    cls._log.setLevel(logging.DEBUG)

    # Console Handler
    ch = logging.StreamHandler(sys.stderr)
    ch.set_name("stderr")
    ch.setLevel(logging.CRITICAL)

    ch2 = logging.StreamHandler(sys.stdout)
    ch2.set_name("stdout")
    ch2.setLevel(console_level)

    formatter = logging.Formatter("{asctime} - {levelname:<8}| {message}",
                                  style="{")

    # File Handler
    _PERMISSION_TO_WRITE = True
    logUserPath = expanduser("~")
    logSaveFilePath = join(logUserPath, ".BAC0")

    logFile = join(logSaveFilePath, "BAC0.log")
    if not os.path.exists(logSaveFilePath):
        try:
            os.makedirs(logSaveFilePath)
        except:
            _PERMISSION_TO_WRITE = False
    if _PERMISSION_TO_WRITE:
        fh = FileHandler(logFile)
        fh.set_name("file_handler")
        fh.setLevel(file_level)
        fh.setFormatter(formatter)

    ch.setFormatter(formatter)
    ch2.setFormatter(formatter)
    # Add handlers the first time only...
    if not len(cls._log.handlers):
        if _PERMISSION_TO_WRITE:
            cls._log.addHandler(fh)
        cls._log.addHandler(ch)
        cls._log.addHandler(ch2)

    LogList.LOGGERS.append(cls._log)

    def log_title(self, title, args=None, width=35):
        cls._log.debug("")
        cls._log.debug("#" * width)
        cls._log.debug("# {}".format(title))
        cls._log.debug("#" * width)
        if args:
            cls._log.debug("{!r}".format(args))
            cls._log.debug("#" * 35)

    def log_subtitle(self, subtitle, args=None, width=35):
        cls._log.debug("")
        cls._log.debug("=" * width)
        cls._log.debug("{}".format(subtitle))
        cls._log.debug("=" * width)
        if args:
            cls._log.debug("{!r}".format(args))
            cls._log.debug("=" * width)

    def log(self, note, *, level=logging.DEBUG):
        """
        Add a log entry...no note
        """
        if not note:
            raise ValueError("Provide something to log")
        note = "{} | {}".format(cls.logname, note)
        cls._log.log(level, note)

    def note(self, note, *, level=logging.INFO, log=True):
        """
        Add note to the object. By default, the note will also
        be logged
        :param note: (str) The note itself
        :param level: (logging.level)
        :param log: (boolean) Enable or disable logging of note
        """
        if not note:
            raise ValueError("Provide something to log")
        note = "{} | {}".format(cls.logname, note)
        cls._notes.timestamp.append(datetime.now())
        cls._notes.notes.append(note)
        if log:
            cls.log(level, note)

    @property
    def notes(self):
        """
        Retrieve notes list as a Pandas Series
        """
        if not _PANDAS:
            return dict(zip(self._notes.timestamp, self._notes.notes))
        return pd.Series(self._notes.notes, index=self._notes.timestamp)

    def clear_notes(self):
        """
        Clear notes object
        """
        cls._notes.timestamp = []
        cls._notes.notes = []

    # Add the functions to the decorated class
    cls.clear_notes = clear_notes
    cls.note = note
    cls.notes = notes
    cls.log = log
    cls.log_title = log_title
    cls.log_subtitle = log_subtitle
    return cls
Beispiel #32
0
import logging
import sys
from logging import FileHandler
from logging import Formatter

LOG_FORMAT = ("%(message)s")
LOG_LEVEL = logging.INFO

# messaging logger
RESULTADOS_LOG_FILE = str(sys.argv[1]) + ".log"

resultados_logger = logging.getLogger(str(sys.argv[1]))
resultados_logger.setLevel(LOG_LEVEL)
resultados_logger_file_handler = FileHandler(RESULTADOS_LOG_FILE)
resultados_logger_file_handler.setLevel(LOG_LEVEL)
resultados_logger_file_handler.setFormatter(Formatter(LOG_FORMAT))
resultados_logger.addHandler(resultados_logger_file_handler)

# payments logger
PROGRESSO_LOG_FILE = str(sys.argv[1]) + "_progresso_rodadas.log"
progresso_logger = logging.getLogger(str(sys.argv[1]) + "_progresso_rodadas")

progresso_logger.setLevel(LOG_LEVEL)
progresso_file_handler = FileHandler(PROGRESSO_LOG_FILE)
progresso_file_handler.setLevel(LOG_LEVEL)
progresso_file_handler.setFormatter(Formatter(LOG_FORMAT))
progresso_logger.addHandler(progresso_file_handler)

#1 FASE

RESULTADOS_1FASE_LOG_FILE = str(sys.argv[1]) + "_1fase.log"
Beispiel #33
0
import time

import sqlite3
from tqdm import tqdm

# 상수
종목별매수상한 = 1000000  # 종목별매수상한 백만원
매수수수료비율 = 0.00015  # 매도시 평단가에 곱해서 사용
매도수수료비율 = 0.00015 + 0.003  # 매도시 현재가에 곱해서 사용
화면번호 = "1234"


# 로그 파일 핸들러
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
fh_log = FileHandler(os.path.join(BASE_DIR, 'logs/debug.log'), encoding='utf-8')
fh_log.setLevel(logging.DEBUG)

# 로거 생성 및 핸들러 등록
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh_log)


class SyncRequestDecorator:
	"""키움 API 비동기 함수 데코레이터
	"""

	@staticmethod
	def kiwoom_sync_request(func):
		def func_wrapper(self, *args, **kwargs):
			if kwargs.get('nPrevNext', 0) == 0:
Beispiel #34
0
import pandas as pd

from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode

logger = getLogger('dev-debug')
formatter = Formatter\
        ('%(asctime)s [%(levelname)s] [%(filename)s: \
        %(funcName)s: %(lineno)d] %(message)s'                                              )
handlerSh = StreamHandler()
handlerFile = FileHandler('error.log')
handlerSh.setFormatter(formatter)
handlerSh.setLevel(DEBUG)
handlerFile.setFormatter(formatter)
handlerFile.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handlerSh)
logger.addHandler(handlerFile)
logger.debug('log start')

# Yelp Fusion no longer uses OAuth as of December 7, 2017.
# You no longer need to provide Client ID to fetch Data
# It now uses private keys to authenticate requests (API Key)
# You can find it on
# https://www.yelp.com/developers/v3/manage_app
API_KEY = os.environ.get("YELP_API_KEY")

# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
Beispiel #35
0
from spider.CFSpider import CFSpider
from spider.HDUSpider import HDUSpider
from spider.POJSpider import POJSpider
from spider.ZOJSpider import ZOJSpider
from spider.UVASpider import UVASpider
from spider.BNUSpider import BNUSpider
from spider.BCSpider import BCSpider
from spider.VJSpider import VJSpider
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import or_

app = Flask(__name__)
app.config.from_pyfile('config.py')
db.init_app(app)
file_handler = FileHandler(app.root_path + "/log/spider_errors.log")
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(
    Formatter('%(asctime)s %(levelname)s: %(message)s '
              '[in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
spider_lock = RLock()


class AccountUpdateServer(Thread):
    def __init__(self, oj_name):
        Thread.__init__(self)
        self.oj_name = oj_name
        self.spider_module_name = sys.modules['spider.' +
                                              self.oj_name.upper() + 'Spider']
        self.spider_class_name = oj_name.upper() + 'Spider'
        self.spider = getattr(self.spider_module_name,
from flask import Flask, session, redirect, url_for, flash
from logging import FileHandler, WARNING
from functools import wraps

from isbnlib import is_isbn13, to_isbn10, is_isbn10
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker

# Set up app
app = Flask(__name__)

# Error Log:
if not app.debug:
    f_handler = FileHandler('error_log.txt')
    f_handler.setLevel(WARNING)
    app.logger.addHandler(f_handler)

# Change in production
app.secret_key = 'secret'

# Check for environment variable
if not os.getenv("DATABASE_URL"):
    raise RuntimeError("DATABASE_URL is not set")

# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"

# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
best_aa = pd.DataFrame(np.zeros(shape=(20, LEN))) * 1.0
best_nt = pd.DataFrame(np.zeros(shape=(12, LEN))) * 1.0
# ----- initialize best results ------------

# ----- initialize logger ------------
logger = getLogger("IUPAC")
logger.setLevel(logging.DEBUG)
handler_format = Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

stream_handler = StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(handler_format)

file_handler = FileHandler(OUTDIR + 'iupac.log', 'a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(handler_format)

logger.addHandler(stream_handler)
logger.addHandler(file_handler)

# ----- initialize logger ------------


#read csv
def readcsv(infile):
    df = pd.read_csv(infile, index_col=0, header=None)
    return df


def find_best_matches():
Beispiel #38
0
from logging import Logger, FileHandler, Formatter, WARN, INFO, DEBUG

DEBUG = False

### logger config
logger = Logger('cls')
WARN_LOGGER = "/tmp/cls_warn.log"
INFO_LOGGER = "/tmp/cls_info.log"
DEBUG_LOGGER = "/tmp/cls_debug.log"
formatter = Formatter("%(asctime)s - %(levelname)s - %(message)s")

filewarnhandler = FileHandler(WARN_LOGGER, 'a')
filewarnhandler.setLevel(WARN)
filewarnhandler.setFormatter(formatter)

fileinfohandler = FileHandler(INFO_LOGGER, 'a')
fileinfohandler.setLevel(INFO)
fileinfohandler.setFormatter(formatter)

filedebughandler = FileHandler(DEBUG_LOGGER, 'a')
filedebughandler.setLevel(DEBUG)
filedebughandler.setFormatter(formatter)

logger.addHandler(filewarnhandler)
logger.addHandler(fileinfohandler)
logger.addHandler(filedebughandler)

# origin_warning = logger.warning
#
#
# def my_warning(*args, **kwargs):
Beispiel #39
0
@app.errorhandler(404)
def not_found_error(error):
    return render_template('errors/404.html'), 404

@app.errorhandler(500)
def server_error(error):
    return render_template('errors/500.html'), 500


if not app.debug:
    file_handler = FileHandler('error.log')
    file_handler.setFormatter(
        Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
    )
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('errors')

#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#

# Default port:
if __name__ == '__main__':
    app.run()

# Or specify port manually:
'''
if __name__ == '__main__':
    port = int(os.environ.get('PORT', 5000))
Beispiel #40
0
    # Test text extraction
    test_extract_text_from_news(la_republica)

    # Test filtering
    test_filter_news_by_keywords(la_republica)

    del la_republica


if __name__ == "__main__":
    # Configure logger: oddcrawler needsd to be the top logger
    logger = getLogger('oddcrawler')
    logger.setLevel(DEBUG)
    # create file file handler
    fh = FileHandler('extractor_test.log')
    fh.setLevel(DEBUG)
    # create console handler
    ch = StreamHandler()
    ch.setLevel(ERROR)
    # create formatter and add it to handlers
    formatter = Formatter('%(levelname)s %(asctime)-15s %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(ch)

    complete_test_for_la_republica()
    complete_test_for_monumental()
    complete_test_for_cr_hoy()
    complete_test_for_la_prensa_libre()
Beispiel #41
0
        rate_s,
        rate_v,
        rate_o
    ]

    return list_ret


if __name__ == '__main__':
    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = FileHandler('svo.py.log', 'w')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    handler = StreamHandler()
    handler.setLevel('INFO')
    handler.setFormatter(log_fmt)
    logger.setLevel('INFO')
    logger.addHandler(handler)
    p = Pool()

    df = pandas.read_csv('../data/train_clean2_rev.csv',
                         usecols=['question1', 'question2']).values

    ret = numpy.array(list(p.map(load_data, df)))
Beispiel #42
0

@app.route('/messages')
@login_required()
def messages_list():
    return render_template('messages/messages_list.html',
                           active_nav='messages')


@app.route('/messages/<message_id>')
@login_required()
def message_detail(message_id):
    return render_template('messages/message_detail.html',
                           active_nav='messages')


userClass = user.User(app.config)
courseClass = course.Course(app.config)

if not app.config['DEBUG']:
    import logging
    from logging import FileHandler
    file_handler = FileHandler(app.config['LOG_FILE'])
    file_handler.setLevel(logging.WARNING)

if __name__ == '__main__':
    app.run(host="0.0.0.0",
            port=int(os.environ.get("PORT", 8080)),
            debug=app.config['DEBUG'],
            threaded=True)
Beispiel #43
0
def create_app(config):
    app = Flask(__name__)
    app.config.from_pyfile(config)

    # Automatically tear down SQLAlchemy.
    '''
    @app.teardown_request
    def shutdown_session(exception=None):
    db_session.remove()
    '''

    # Login required decorator.
    '''
    def login_required(test):
    @wraps(test)
    def wrap(*args, **kwargs):
        if 'logged_in' in session:
        return test(*args, **kwargs)
        else:
        flash('You need to login first.')
        return redirect(url_for('login'))
    return wrap
    '''

    #----------------------------------------------------------------------------#
    # Controllers.
    #----------------------------------------------------------------------------#


    @app.route('/')
    def index():
        return render_template('pages/index.html')

    @app.route('/login')
    def login():
        return render_template('pages/login.html')


    @app.route('/register')
    def register():
        form = RegisterForm(request.form)
        return render_template('forms/register.html', form=form)


    @app.route('/forgot')
    def forgot():
        form = ForgotForm(request.form)
        return render_template('forms/forgot.html', form=form)

    # Error handlers.


    @app.errorhandler(500)
    def internal_error(error):
        #db_session.rollback()
        return render_template('errors/500.html'), 500

    @app.errorhandler(404)
    def not_found_error(error):
        return render_template('errors/404.html'), 404

    if not app.debug:
        file_handler = FileHandler('error.log')
        file_handler.setFormatter(
            Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
        )
        app.logger.setLevel(logging.INFO)
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)
        app.logger.info('errors')

    return app
Beispiel #44
0
import warnings
warnings.simplefilter('ignore')

utils.start(__file__)
#==============================================================================
# Logger
#==============================================================================
from logging import getLogger, FileHandler, Formatter, DEBUG
logger = getLogger(__name__)
logger.setLevel(DEBUG)

file_handler = FileHandler(os.path.join('logs', 'log_{}'.format(str(datetime.datetime.today().date()).replace('-', ''))))
formatter = Formatter('%(message)s')
file_handler.setFormatter(formatter)
file_handler.setLevel(DEBUG)

logger.addHandler(file_handler)
logger.propagate = False

#==============================================================================
PATH = os.path.join('..', 'data')

KEY = 'card_id'

SEED = 18
# SEED = np.random.randint(9999)

NTHREAD = cpu_count()

NFOLD = 11
Beispiel #45
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from project import app
from logging import FileHandler, WARNING

file_handler = FileHandler('errorlog.txt')
file_handler.setLevel(WARNING)
app.logger.addHandler(file_handler)

if __name__ == '__main__':
    port = int(os.environ.get("PORT", 8080))
    app.run('0.0.0.0', port=port)
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name",
                        type=str,
                        help="bert-name used for biencoder")
    parser.add_argument("--model_path", type=str, help="model save path")
    parser.add_argument("--index_path", type=str, help="model save path")
    parser.add_argument("--load_index",
                        action="store_true",
                        help="model save path")
    parser.add_argument("--mention_dataset",
                        type=str,
                        help="mention dataset path")
    parser.add_argument("--category", type=str, help="mention dataset path")
    parser.add_argument("--candidate_dataset",
                        type=str,
                        help="candidate dataset path")
    parser.add_argument("--candidate_preprocessed",
                        action="store_true",
                        help="whether candidate_dataset is preprocessed")
    parser.add_argument("--builder_gpu",
                        action="store_true",
                        help="bert-name used for biencoder")
    parser.add_argument("--max_ctxt_len",
                        type=int,
                        help="maximum context length")
    parser.add_argument("--max_title_len",
                        type=int,
                        help="maximum title length")
    parser.add_argument("--max_desc_len",
                        type=int,
                        help="maximum description length")
    parser.add_argument("--mlflow",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument("--parallel",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument("--fp16",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument('--fp16_opt_level', type=str, default="O1")
    parser.add_argument("--logging",
                        action="store_true",
                        help="whether using inbatch negative")
    parser.add_argument("--log_file",
                        type=str,
                        help="whether using inbatch negative")

    args = parser.parse_args()

    if args.mlflow:
        mlflow.start_run()
        arg_dict = vars(args)
        for key, value in arg_dict.items():
            mlflow.log_param(key, value)

    logger = None

    if args.logging:
        logger = getLogger(__name__)
        #handler = StreamHandler()

        logger.setLevel(DEBUG)
        #handler.setLevel(DEBUG)
        formatter = Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        #handler.setFormatter(formatter)
        #logger.addHandler(handler)

        if args.log_file:
            fh = FileHandler(filename=args.log_file)
            fh.setLevel(DEBUG)
            fh.setFormatter(formatter)
            logger.addHandler(fh)

    return args, logger
Beispiel #47
0
"""


LOG_FORMAT = (
    "%(asctime)s \t [%(levelname)s]: \t %(message)s \t in %(pathname)s:%(lineno)d"
)
LOG_LEVEL = logging.INFO


# canvas logger
CANVAS_LOG_FILE = "ACP/logs/canvas.log"

canvas_logger = logging.getLogger("Canvas")
canvas_logger.setLevel(LOG_LEVEL)
canvas_logger_file_handler = FileHandler(CANVAS_LOG_FILE)
canvas_logger_file_handler.setLevel(LOG_LEVEL)
canvas_logger_file_handler.setFormatter(Formatter(LOG_FORMAT))
canvas_logger.addHandler(canvas_logger_file_handler)

# crf logger

CRF_LOG_FILE = "ACP/logs/crf.log"

crf_logger = logging.getLogger("CRF")
crf_logger.setLevel(LOG_LEVEL)
crf_file_handler = FileHandler(CRF_LOG_FILE)
crf_file_handler.setLevel(LOG_LEVEL)
crf_file_handler.setFormatter(Formatter(LOG_FORMAT))
crf_logger.addHandler(crf_file_handler)

# email logger
Beispiel #48
0
from logging import Logger, FileHandler, Formatter, WARN, INFO

DEBUG = False

### logger config
logger = Logger('cls')
WARN_LOGGER = "/tmp/cls_warn.log"
INFO_LOGGER = "/tmp/cls_info.log"
formatter = Formatter("%(asctime)s - %(levelname)s - %(message)s")

filewarnhandler = FileHandler(WARN_LOGGER, 'a')
filewarnhandler.setLevel(WARN)
filewarnhandler.setFormatter(formatter)

fileinfohandler = FileHandler(INFO_LOGGER, 'a')
fileinfohandler.setLevel(INFO)
fileinfohandler.setFormatter(formatter)

logger.addHandler(filewarnhandler)
logger.addHandler(fileinfohandler)

# origin_warning = logger.warning
#
#
# def my_warning(*args, **kwargs):
#     origin_warning(locals())
#     return origin_warning(*args, **kwargs)
#
#
# logger.warning = my_warning
Beispiel #49
0
    with gzip.open(FEATURE_FOLDER + patient_id + '.pkl.gz', 'rb') as f:
        img = pickle.load(f)
    img = np.array([rotate(im, -10) for im in img])
    #img = rotate(img, -10).astype(np.int8)

    with gzip.open(FEATURE_FOLDER_OUT + patient_id + '.pkl.gz', 'wb') as f:
        pickle.dump(img, f, -1)

    logger.debug('{} img size] {}'.format(patient_id, img.shape))


if __name__ == '__main__':
    from logging import StreamHandler, DEBUG, Formatter, FileHandler

    log_fmt = Formatter(
        '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
    )
    handler = FileHandler('rotate.log', 'w')
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    handler = StreamHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(log_fmt)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)

    load_data()
class ConfigRpmMaker(object):

    ERROR_MSG = """
------------------------------------------------------------------------
Your commit has been accepted by the SVN server, but due to the errors
that it contains no RPMs have been created.

See %s/%s.txt for details.

Please fix the issues and trigger the RPM creation with a dummy commit.
------------------------------------------------------------------------
"""

    def __init__(self, revision, svn_service):
        self.revision = revision
        self.svn_service = svn_service
        self.temp_dir = get_temporary_directory()
        self._assure_temp_dir_if_set()
        self._create_logger()
        self.work_dir = None
        self.host_queue = Queue()
        self.failed_host_queue = Queue()

    def __build_error_msg_and_move_to_public_access(self, revision):
        err_url = get_error_log_url()
        error_msg = self.ERROR_MSG % (err_url, revision)
        for line in error_msg.split('\n'):
            LOGGER.error(line)
        self._move_error_log_for_public_access()
        self._clean_up_work_dir()
        return error_msg

    def build(self):
        LOGGER.info('Working on revision %s', self.revision)
        self.logger.info("Starting with revision %s", self.revision)
        try:
            changed_paths = self.svn_service.get_changed_paths(self.revision)
            available_hosts = self.svn_service.get_hosts(self.revision)

            affected_hosts = list(
                self._get_affected_hosts(changed_paths, available_hosts))
            if not affected_hosts:
                LOGGER.info(
                    "No rpm(s) built. No host affected by change set: %s",
                    str(changed_paths))
                return

            log_elements_of_list(LOGGER.debug, 'Detected %s affected host(s).',
                                 affected_hosts)

            self._prepare_work_dir()
            rpms = self._build_hosts(affected_hosts)
            self._upload_rpms(rpms)
            self._move_configviewer_dirs_to_final_destination(affected_hosts)

        except BaseConfigRpmMakerException as exception:
            self.logger.error('Last error during build:\n%s' % str(exception))
            self.__build_error_msg_and_move_to_public_access(self.revision)
            raise exception

        except Exception as exception:
            self.logger.exception('Last error during build:')
            error_msg = self.__build_error_msg_and_move_to_public_access(
                self.revision)
            raise Exception(
                'Unexpected error occurred, stacktrace will follow.\n%s\n\n%s'
                % (traceback.format_exc(), error_msg))

        self._clean_up_work_dir()
        return rpms

    def _clean_up_work_dir(self):
        if self._keep_work_dir():
            LOGGER.info(
                'All working data can be found in "{working_directory}"'.
                format(working_directory=self.work_dir))
        else:
            if self.work_dir and exists(self.work_dir):

                if is_verbose_enabled():
                    log_directories_summary(LOGGER.debug, self.work_dir)

                LOGGER.debug('Cleaning up working directory "%s"',
                             self.work_dir)
                rmtree(self.work_dir)

            if exists(self.error_log_file):
                LOGGER.debug('Removing error log "%s"', self.error_log_file)
                remove(self.error_log_file)

    def _keep_work_dir(self):
        return is_no_clean_up_enabled()

    def _move_error_log_for_public_access(self):
        error_log_dir = os.path.join(get_error_log_directory())
        if error_log_dir:
            if not os.path.exists(error_log_dir):
                os.makedirs(error_log_dir)
            shutil.move(self.error_log_file,
                        os.path.join(error_log_dir, self.revision + '.txt'))

    def _read_integer_from_file(self, path):

        with open(path) as file_which_contains_integer:
            integer_from_file = int(file_which_contains_integer.read())

        return integer_from_file

    def _move_configviewer_dirs_to_final_destination(self, hosts):
        LOGGER.info("Updating configviewer data.")

        for host in hosts:
            temp_path = build_config_viewer_host_directory(
                host, revision=self.revision)
            dest_path = build_config_viewer_host_directory(host)

            if exists(dest_path):
                path_to_revision_file = join(dest_path, "%s.rev" % host)
                revision_from_file = self._read_integer_from_file(
                    path_to_revision_file)

                if revision_from_file > int(self.revision):
                    LOGGER.debug(
                        'Will not update configviewer data for host "%s" since the current revision file contains revision %d which is higher than %s',
                        host, revision_from_file, self.revision)
                    rmtree(temp_path)
                    continue

                rmtree(dest_path)

            LOGGER.debug('Updating configviewer data for host "%s"', host)
            move(temp_path, dest_path)

    def _notify_that_host_failed(self, host_name, stack_trace):
        failure_information = (host_name, stack_trace)
        self.failed_host_queue.put(failure_information)
        approximately_count = self.failed_host_queue.qsize()
        LOGGER.error(
            'Build for host "{host_name}" failed. Approximately {count} builds failed.'
            .format(host_name=host_name, count=approximately_count))

        maximum_allowed_failed_hosts = get_max_failed_hosts()
        if approximately_count >= maximum_allowed_failed_hosts:
            LOGGER.error(
                'Stopping to build more hosts since the maximum of %d failed hosts has been reached'
                % maximum_allowed_failed_hosts)
            self.host_queue.queue.clear()

    def _build_hosts(self, hosts):
        if not hosts:
            LOGGER.warn('Trying to build rpms for hosts, but no hosts given!')
            return

        for host in hosts:
            self.host_queue.put(host)

        rpm_queue = Queue()
        svn_service_queue = Queue()
        svn_service_queue.put(self.svn_service)

        thread_count = self._get_thread_count(hosts)
        thread_pool = [
            BuildHostThread(
                name='Thread-%d' % i,
                revision=self.revision,
                svn_service_queue=svn_service_queue,
                rpm_queue=rpm_queue,
                notify_that_host_failed=self._notify_that_host_failed,
                host_queue=self.host_queue,
                work_dir=self.work_dir,
                error_logging_handler=self.error_handler)
            for i in range(thread_count)
        ]

        for thread in thread_pool:
            LOGGER.debug('%s: starting ...', thread.name)
            thread.start()

        for thread in thread_pool:
            thread.join()

        failed_hosts = dict(self._consume_queue(self.failed_host_queue))
        if failed_hosts:
            failed_hosts_str = [
                '\n%s:\n\n%s\n\n' % (key, value)
                for (key, value) in failed_hosts.iteritems()
            ]
            raise CouldNotBuildSomeRpmsException(
                "Could not build config rpm for some host(s): %s" %
                '\n'.join(failed_hosts_str))

        LOGGER.info("Finished building configuration rpm(s).")
        built_rpms = self._consume_queue(rpm_queue)
        log_elements_of_list(LOGGER.debug, 'Built %s rpm(s).', built_rpms)

        return built_rpms

    @measure_execution_time
    def _upload_rpms(self, rpms):
        rpm_upload_cmd = get_rpm_upload_command()
        chunk_size = self._get_chunk_size(rpms)

        if rpm_upload_cmd:
            LOGGER.info("Uploading %s rpm(s).", len(rpms))
            LOGGER.debug(
                'Uploading rpm(s) using command "%s" and chunk_size "%s"',
                rpm_upload_cmd, chunk_size)

            pos = 0
            while pos < len(rpms):
                rpm_chunk = rpms[pos:pos + chunk_size]
                cmd = '%s %s' % (rpm_upload_cmd, ' '.join(rpm_chunk))
                process = subprocess.Popen(cmd,
                                           shell=True,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)
                stdout, stderr = process.communicate()
                if process.returncode:
                    error_message = 'Rpm upload failed with exit code %s. Executed command "%s"\n' % (
                        process.returncode, cmd)
                    if stdout:
                        error_message += 'stdout: "%s"\n' % stdout.strip()
                    if stderr:
                        error_message += 'stderr: "%s"\n' % stderr.strip()
                    raise CouldNotUploadRpmsException(error_message)
                pos += chunk_size
        else:
            LOGGER.info(
                "Rpms will not be uploaded since no upload command has been configured."
            )

    def _get_affected_hosts(self, changed_paths, available_host):
        result = set()
        for segment in OVERLAY_ORDER:
            for changed_path in changed_paths:
                result |= set(
                    self._find_matching_hosts(segment, changed_path,
                                              available_host))

        return result

    def _find_matching_hosts(self, segment, svn_path, available_hosts):
        result = []
        for host in available_hosts:
            for path in segment.get_svn_paths(host):
                if svn_path.startswith(path):
                    result.append(host)
                    break

        return result

    def _get_thread_count(self, affected_hosts):
        thread_count = int(get_thread_count())
        if thread_count < 0:
            raise ConfigurationException(
                '%s is %s, values <0 are not allowed)' %
                (get_thread_count, thread_count))

        if not thread_count or thread_count > len(affected_hosts):
            if not thread_count:
                reason = 'Configuration property "%s" is %s' % (
                    get_thread_count, thread_count)
            elif thread_count > len(affected_hosts):
                reason = "More threads available than affected hosts"
            thread_count = len(affected_hosts)
            LOGGER.info("%s: using one thread for each affected host." %
                        (reason))
        return thread_count

    def _consume_queue(self, queue):
        items = []

        while not queue.empty():
            item = queue.get()
            queue.task_done()
            items.append(item)

        return items

    def _create_logger(self):
        self.error_log_file = tempfile.mktemp(dir=get_temporary_directory(),
                                              prefix='yadt-config-rpm-maker.',
                                              suffix='.revision-%s.error.log' %
                                              self.revision)
        self.error_handler = FileHandler(self.error_log_file)
        formatter = Formatter(configuration.LOG_FILE_FORMAT,
                              configuration.LOG_FILE_DATE_FORMAT)
        self.error_handler.setFormatter(formatter)
        self.error_handler.setLevel(ERROR)

        self.logger = getLogger('fileLogger')
        self.logger.addHandler(self.error_handler)
        self.logger.propagate = False

    def _assure_temp_dir_if_set(self):
        if self.temp_dir and not os.path.exists(self.temp_dir):
            os.makedirs(self.temp_dir)

    def _prepare_work_dir(self):
        LOGGER.debug('Preparing working directory "%s"', self.temp_dir)
        self.work_dir = mkdtemp(prefix='yadt-config-rpm-maker.',
                                suffix='.' + self.revision,
                                dir=self.temp_dir)

        self.rpm_build_dir = join(self.work_dir, 'rpmbuild')
        LOGGER.debug('Creating directory structure for rpmbuild in "%s"',
                     self.rpm_build_dir)
        for name in [
                'tmp', 'RPMS', 'RPMS/x86_64', 'RPMS/noarch', 'BUILD',
                'BUILDROOT', 'SRPMS', 'SPECS', 'SOURCES'
        ]:
            path = join(self.rpm_build_dir, name)
            if not exists(path):
                makedirs(path)

    def _get_chunk_size(self, rpms):
        chunk_size_raw = get_rpm_upload_chunk_size()
        try:
            chunk_size = int(chunk_size_raw)
        except ValueError:
            raise ConfigurationException(
                'rpm_upload_chunk_size (%s) is not a legal value (should be int)'
                % chunk_size_raw)
        if chunk_size < 0:
            raise ConfigurationException(
                "Config param 'rpm_upload_cmd_chunk_size' needs to be greater or equal 0"
            )

        if not chunk_size:
            chunk_size = len(rpms)

        return chunk_size
Beispiel #51
0
from flask import Flask, request, Response
import os
from copr_keygen.exceptions import BadRequestException, \
    KeygenServiceBaseException

app = Flask(__name__)
app.config.from_object("copr_keygen.default_settings")
app.config.from_envvar("COPR_KEYGEN_CONFIG", silent=True)


# setup logger
if not app.config["DEBUG"] or app.config["DEBUG_WITH_LOG"]:
    filename = os.path.join(app.config["LOG_DIR"], "main.log")
    if os.path.exists(app.config["LOG_DIR"]):
        handler = FileHandler(filename)
        handler.setLevel(app.config["LOG_LEVEL"])
        handler.setFormatter(logging.Formatter(
            '%(asctime)s %(levelname)s'
            '[%(module)s:%(pathname)s:%(lineno)d]'
            ': %(message)s '
        ))
        logger = getLogger(__name__)
        logger.addHandler(handler)
        logger.setLevel(app.config["LOG_LEVEL"])

# end setup logger


from .logic import create_new_key, user_exists

log = logging.getLogger(__name__)
Beispiel #52
0
 def setUpClass(cls):
     """
     Perform class setup before running the testcase
     Remove shared memory files, start vpp and connect the vpp-api
     """
     gc.collect()  # run garbage collection first
     cls.logger = getLogger(cls.__name__)
     cls.tempdir = tempfile.mkdtemp(prefix='vpp-unittest-' + cls.__name__ +
                                    '-')
     file_handler = FileHandler("%s/log.txt" % cls.tempdir)
     file_handler.setFormatter(
         Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
                   datefmt="%H:%M:%S"))
     file_handler.setLevel(DEBUG)
     cls.logger.addHandler(file_handler)
     cls.shm_prefix = cls.tempdir.split("/")[-1]
     os.chdir(cls.tempdir)
     cls.logger.info("Temporary dir is %s, shm prefix is %s", cls.tempdir,
                     cls.shm_prefix)
     cls.setUpConstants()
     cls.reset_packet_infos()
     cls._captures = []
     cls._zombie_captures = []
     cls.verbose = 0
     cls.vpp_dead = False
     cls.registry = VppObjectRegistry()
     # need to catch exceptions here because if we raise, then the cleanup
     # doesn't get called and we might end with a zombie vpp
     try:
         cls.run_vpp()
         cls.vpp_stdout_deque = deque()
         cls.vpp_stderr_deque = deque()
         cls.pump_thread_stop_flag = Event()
         cls.pump_thread_wakeup_pipe = os.pipe()
         cls.pump_thread = Thread(target=pump_output, args=(cls, ))
         cls.pump_thread.daemon = True
         cls.pump_thread.start()
         cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
         if cls.step:
             hook = StepHook(cls)
         else:
             hook = PollHook(cls)
         cls.vapi.register_hook(hook)
         cls.sleep(0.1, "after vpp startup, before initial poll")
         hook.poll_vpp()
         try:
             cls.vapi.connect()
         except:
             if cls.debug_gdbserver:
                 print(
                     colorize(
                         "You're running VPP inside gdbserver but "
                         "VPP-API connection failed, did you forget "
                         "to 'continue' VPP from within gdb?", RED))
             raise
     except:
         t, v, tb = sys.exc_info()
         try:
             cls.quit()
         except:
             pass
         raise t, v, tb
Beispiel #53
0
class MainBase(wx.App):
	def __init__(self):
		"""アプリを初期化する。"""
		super().__init__()

		#実行環境の取得(exeファイルorインタプリタ)
		self.frozen=hasattr(sys,"frozen")

		#各種初期設定
		self.InitLogger()
		self.LoadSettings()
		try:
			if self.config["general"]["locale"]!=None:
				locale.setlocale(locale.LC_TIME,self.config["general"]["locale"])
			else:
				locale.setlocale(locale.LC_TIME)
		except:
			locale.setlocale(locale.LC_TIME)
			self.config["general"]["locale"]=""
		self.SetTimeZone()
		self.InitTranslation()
		self.InitSpeech()
		# ログのファイルハンドラーが利用可能でなければ警告を出す
		if not self.log.hasHandlers():
			simpleDialog.errorDialog(_("ログ機能の初期化に失敗しました。下記のファイルへのアクセスが可能であることを確認してください。") + "\n" + os.path.abspath(constants.LOG_FILE_NAME))

	def InitSpeech(self):
		# 音声読み上げの準備
		try:
			self._InitSpeech()
		except OutputError as e:
			self.log.error("Failed to initialize speech output.")
			self.log.error(traceback.format_exc())
			simpleDialog.winDialog(_("音声エンジンエラー"), _("音声読み上げ機能の初期化に失敗したため、読み上げ機能を使用できません。出力先の変更をお試しください。"))
			self.speech = accessible_output2.outputs.nospeech.NoSpeech()

	def _InitSpeech(self):
		# 音声読み上げの準備
		reader=self.config["speech"]["reader"]
		if(reader=="PCTK"):
			self.log.info("use reader 'PCTalker'")
			self.speech=accessible_output2.outputs.pc_talker.PCTalker()
		elif(reader=="NVDA"):
			self.log.info("use reader 'NVDA'")
			self.speech=accessible_output2.outputs.nvda.NVDA()
		#SAPI4はバグってるっぽいので無効にしておく
#		elif(reader=="SAPI4"):
#			self.log.info("use reader 'SAPI4'")
#			self.speech=accessible_output2.outputs.sapi4.Sapi4()
		elif(reader=="SAPI5"):
			self.log.info("use reader 'SAPI5'")
			self.speech=accessible_output2.outputs.sapi5.SAPI5()
		elif(reader=="AUTO"):
			self.log.info("use reader 'AUTO'")
			self.speech=accessible_output2.outputs.auto.Auto()
		elif(reader=="JAWS"):
			self.log.info("use reader 'JAWS'")
			self.speech=accessible_output2.outputs.jaws.Jaws()
		elif(reader=="CLIPBOARD"):
			self.log.info("use reader 'CLIPBOARD'")
			self.speech=accessible_output2.outputs.clipboard.Clipboard()
		elif(reader=="NOSPEECH"):
			self.log.info("use reader 'NOSPEECH'")
			self.speech=accessible_output2.outputs.nospeech.NoSpeech()
		else:
			self.config.set("speech","reader","AUTO")
			self.log.warning("Setting missed! speech.reader reset to 'AUTO'")
			self.speech=accessible_output2.outputs.auto.Auto()

	def InitLogger(self):
		"""ログ機能を初期化して準備する。"""
		try:
			self.hLogHandler=FileHandler(constants.LOG_FILE_NAME, mode="w", encoding="UTF-8")
			self.hLogHandler.setLevel(logging.DEBUG)
			self.hLogFormatter=Formatter("%(name)s - %(levelname)s - %(message)s (%(asctime)s)")
			self.hLogHandler.setFormatter(self.hLogFormatter)
			logger=getLogger(constants.LOG_PREFIX)
			logger.setLevel(logging.DEBUG)
			logger.addHandler(self.hLogHandler)
		except Exception as e:
			traceback.print_exc()
		self.log=getLogger(constants.LOG_PREFIX+".Main")
		r="executable" if self.frozen else "interpreter"
		self.log.info("Starting"+constants.APP_NAME+" "+constants.APP_VERSION+" as %s!" % r)

	def LoadSettings(self):
		"""設定ファイルを読み込む。なければデフォルト設定を適用し、設定ファイルを書く。"""
		self.config = DefaultSettings.DefaultSettings.get()
		if not self.config.read(constants.SETTING_FILE_NAME):
			#初回起動
			self.config.read_dict(DefaultSettings.initialValues)
			self.config.write()
		self.hLogHandler.setLevel(self.config.getint("general","log_level",20,0,50))

	def InitTranslation(self):
		"""翻訳を初期化する。"""
		loc = locale.getdefaultlocale()[0].replace("_", "-")
		lang=self.config.getstring("general","language","",constants.SUPPORTING_LANGUAGE.keys())
		if lang == "":
			if loc in list(constants.SUPPORTING_LANGUAGE.keys()):
				self.config["general"]["language"] = loc
			else:
				# 言語選択を表示
				langSelect = views.langDialog.langDialog()
				langSelect.Initialize()
				langSelect.Show()
				self.config["general"]["language"] = langSelect.GetValue()
			lang = self.config["general"]["language"]
		self.translator=gettext.translation("messages","locale", languages=[lang], fallback=True)
		self.translator.install()

	def GetFrozenStatus(self):
		"""コンパイル済みのexeで実行されている場合はTrue、インタプリタで実行されている場合はFalseを帰す。"""
		return self.frozen

	def say(self,s,interrupt=False):
		"""スクリーンリーダーでしゃべらせる。"""
		self.speech.speak(s, interrupt=interrupt)
		self.speech.braille(s)

	def SetTimeZone(self):
		bias=win32api.GetTimeZoneInformation(True)[1][0]*-1
		hours=bias//60
		minutes=bias%60
		self.timezone=datetime.timezone(datetime.timedelta(hours=hours,minutes=minutes))

	def getAppPath(self):
		"""アプリの絶対パスを返す
		"""
		if self.frozen:
			# exeファイルで実行されている
			return sys.executable
		else:
			# pyファイルで実行されている
			return os.path.join(os.path.dirname(__file__), os.path.basename(sys.argv[0]))
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy

from .config import test_config
from .config import app_config

app = Flask("Entity matching tool")
# app.config.from_object(test_config)
app.config.from_object(app_config)
db = SQLAlchemy(app)
api = Api(app)

handler = FileHandler('log_file')
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)

# try:
#     mongo = connect(test_config.MONGO['db'])
#     conn = psycopg2.connect("dbname='{}' user='******' "
#                             "host='{}' password='******'".format(test_config.POSTGRES['db'],
#                                                              test_config.POSTGRES['user'],
#                                                              test_config.POSTGRES['host'],
#                                                              test_config.POSTGRES['pw']))
# except psycopg2.OperationalError as e:
#     engine = sqlalchemy.create_engine("postgres://{}:{}@{}/{}".format(test_config.POSTGRES['user'],
#                                                                       test_config.POSTGRES['pw'],
#                                                                       test_config.POSTGRES['host'],
#                                                                       test_config.POSTGRES['db']))
#     if not database_exists(engine.url):
Beispiel #55
0
LOG_FORMAT = (
    "%(asctime)s [%(levelname)s]: %(message)s in %(pathname)s:%(lineno)d")
LOG_LEVEL = logging.INFO

# messaging logger
MESSAGING_LOG_FILE = "project.log"

logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
                    level=logging.INFO,
                    stream=sys.stdout)

quest_gen_logger = logging.getLogger("project.quest_gen")
quest_gen_logger.setLevel(LOG_LEVEL)
quest_gen_logger_file_handler = FileHandler(MESSAGING_LOG_FILE)
quest_gen_logger_file_handler.setLevel(LOG_LEVEL)
quest_gen_logger_file_handler.setFormatter(Formatter(LOG_FORMAT))
quest_gen_logger.addHandler(quest_gen_logger_file_handler)


class RandomGenerator:
    """ Random generator controlling the games generation. """
    def __init__(self, seed=None):
        self.set_seed(seed)

    @property
    def seed(self):
        return self._seed

    def set_seed(self, seed):
        self._seed = seed
Beispiel #56
0
# -*- coding: utf-8 -*-

import os
import json
import paho.mqtt.client as mqtt
import camera_mode_selector
from logging import getLogger, FileHandler, StreamHandler, DEBUG
logger = getLogger(__name__)
if not logger.handlers:
    fileHandler = FileHandler(r'./log/camera_runner.log')
    fileHandler.setLevel(DEBUG)
    streamHander = StreamHandler()
    streamHander.setLevel(DEBUG)
    logger.setLevel(DEBUG)
    logger.addHandler(fileHandler)
    logger.addHandler(streamHander)


# MQTT broker server
host = os.getenv('SSS_MQTT_HOST')
port = int(os.getenv('SSS_MQTT_PORT'))

# subscribe topic
sub_topic = 'sensor/event'
# publish topic
pub_topic = 'sensor/feedback/result/'


def on_connect(client, data, flags, response_code):
    logger.info('status {0}'.format(response_code))
    camera_mode_selector.change_mode('2')
Beispiel #57
0
import os
import json
import time
import datetime
import traceback

import dropbox


#
from logging import getLogger, StreamHandler, FileHandler, DEBUG, INFO, WARN, ERROR
logger = getLogger(__name__)
sh = StreamHandler()
fh = FileHandler("/pws/log/uploader.log")
sh.setLevel(INFO)
fh.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(sh)
logger.addHandler(fh)

#
def get_log_header():
  return "{0} {1}".format(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"), os.path.splitext(os.path.basename(__file__))[0])

#
class DropboxConfig(object):
  APP_KEY_NAME = "app_key"
  APP_SECRET_NAME = "app_secret"
  ACCESS_TOKEN_NAME = "access_token"

  #
Beispiel #58
0
# encoding: utf-8

from webapp import app
from logging import FileHandler, INFO, Formatter
from config import current_config

handler = FileHandler(current_config.flask_log, encoding='UTF-8')

logging_format = Formatter(
    '%(levelname)s %(asctime)s [%(name)s:%(module)s:%(funcName)s:%(lineno)s] %(message)s'
)
handler.setFormatter(logging_format)
handler.setLevel(INFO)

app.logger.addHandler(handler)
Beispiel #59
0
def main():
    parser = argparse.ArgumentParser(description='argparse example.')
    parser.add_argument('input', help="Root directory", metavar='<input>')
    parser.add_argument('output', help="Output directory", metavar='<output>')
    parser.add_argument('--ext',
                        help='File extension. default: %(default)s',
                        metavar='str',
                        default='.mha')
    parser.add_argument(
        '--prefix',
        help='Prefix of the output filename. default: %(default)s',
        metavar='str',
        default='SE')
    parser.add_argument('--compress',
                        help='Compress the output image. default: %(default)s',
                        type=str,
                        choices=['auto', 'true', 'false'],
                        default='auto')
    parser.add_argument('--offset',
                        help='Offset to the number. default: %(default)s',
                        type=int,
                        metavar='int',
                        default=1)
    parser.add_argument('--logdir',
                        help='Directory to store logs. default: %(default)s',
                        metavar='str',
                        default=None)
    parser.add_argument('--verbose',
                        help='Verbosity. default: %(default)s',
                        type=int,
                        metavar='level',
                        default=0)

    args = parser.parse_args()

    logger.setLevel(verbosity_to_level(args.verbose))
    if args.logdir is not None:
        logdir = Path(args.logdir)
        logdir.mkdir(parents=True, exist_ok=True)
        handler = FileHandler(
            logdir /
            '{}.log'.format(datetime.today().strftime("%y%m%d_%H%M%S")))
        handler.setLevel(verbosity_to_level(args.verbose))
        handler.setFormatter(log_format)
        logger.addHandler(handler)

    root_dir = Path(args.input)
    out_dir = Path(args.output)

    compression = {'auto': None, 'true': True, 'false': False}[args.compress]
    dtype = None
    prefix = args.prefix
    ext = args.ext
    offset = args.offset

    logger.info('Collect dicom information')
    all_files = [
        str(e) for e in tqdm.tqdm(root_dir.glob('**/*'), desc='list all files')
        if e.is_file()
    ]

    key_tags = [
        'PatientID', 'SeriesInstanceUID', 'AcquisitionDate', 'AcquisitionTime',
        'ImageOrientationPatient', 'ImagePositionPatient'
    ]
    dcm_files = []
    for fn in tqdm.tqdm(all_files):
        try:
            dcm = pydicom.dcmread(fn, stop_before_pixels=True)
            dcm_files.append([fn] + [dcm.get(tag) for tag in key_tags])
        except Exception as e:
            logger.warning({'filename': fn, 'exception': e})

    df = pd.DataFrame(dcm_files, columns=['filepath'] + key_tags)

    logger.info('Convert dicom files')

    def sort_dicom(df):
        orientation = np.array(df['ImageOrientationPatient'].iloc[0]).reshape(
            (2, 3))
        third_axis = np.cross(orientation[0], orientation[1])
        locs = df['ImagePositionPatient'].map(lambda p: np.dot(third_axis, p))
        sorted_index = np.argsort(locs)
        return df.iloc[sorted_index]

    FLOAT_TYPES = set([
        sitk.sitkFloat32, sitk.sitkFloat64, sitk.sitkVectorFloat32,
        sitk.sitkVectorFloat64
    ])

    for patient_id, df_patient in df.groupby('PatientID'):
        logger.info(patient_id)
        sids, times = [], []
        for series_id, df_series in df_patient.groupby('SeriesInstanceUID'):
            sids.append(series_id)
            dts = df_series.apply(
                lambda row: DT(row.AcquisitionDate + row.AcquisitionTime),
                axis=1).tolist()
            if len(df_series) <= 2:
                times.append(dts[0])
            else:
                dts.sort()
                times.append(dts[len(dts) // 2])
        nums = np.argsort(np.argsort(times))
        series_id2series_number = dict(zip(sids, nums))

        for series_id, df_series in df_patient.groupby('SeriesInstanceUID'):
            logger.debug(series_id)
            output_filename = out_dir / patient_id / (prefix + '{:d}'.format(
                series_id2series_number[series_id] + offset) + ext)
            output_filename.parent.mkdir(parents=True, exist_ok=True)
            filenames = sort_dicom(df_series)['filepath'].tolist()
            reader = sitk.ImageSeriesReader()
            reader.SetFileNames(filenames)
            image = reader.Execute()
            if image.GetPixelID() == sitk.sitkFloat64 and dtype is None:
                f = sitk.CastImageFilter()
                f.SetOutputPixelType(sitk.sitkFloat32)
                image = f.Execute(image)
            writer = sitk.ImageFileWriter()
            if compression is None:
                compression = image.GetPixelID() not in FLOAT_TYPES
            writer.SetUseCompression(compression)
            writer.SetFileName(str(output_filename))
            writer.Execute(image)

    logger.info('End')
Beispiel #60
-1
def start(resultdir: str, configfile: str):
    basepath = Path(resultdir)
    print(("storing results in {}".format(basepath)))

    now = datetime.now()
    timestamp = now.strftime("%Y_%m_%d-%H_%M_%S")
    experiment_path = basepath.joinpath(timestamp + "_" + STRATEGY)
    num = 2
    while experiment_path.exists():
        experiment_path = basepath.joinpath(timestamp + "_" + "v" + str(num))
    experiment_path.mkdir()

    MODULE_LOGGER_NAME = 'salma'
    # logging.config.fileConfig("experiment01.logging.conf")
    logging.basicConfig()
    logger = logging.getLogger(MODULE_LOGGER_NAME)
    logger.setLevel(logging.DEBUG)
    fh = FileHandler(str(experiment_path / "experiment.log"))
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    experiment = Experiment01(experiment_path, None if configfile is None else Path(configfile))
    experiment.initialize()
    runner = SingleProcessExperimentRunner()

    with experiment_path.joinpath("experiment.csv").open("w") as f:
        f.write(create_csv_header() + "\n")
        f.flush()
        experiment.step_listeners.append(create_step_logger(f))
        experiment.step_listeners.append(break_when_all_delivered)
        experiment.step_listeners.append(break_when_all_broken)
        # _, res, trial_infos = runner.run_trials(experiment, number_of_trials=1, max_steps=3000, max_retrials=0)
        experiment.run(max_steps=5000)
    experiment.world.printState()