예제 #1
0
    def __init__(self):

        # Logger init
        logging.basicConfig(filename='/var/log/r2d2.log',
                            level=logging.DEBUG,
                            format='%(asctime)s %(levelname)7s: %(message)s')

        # Color the errors and warnings in red
        logging.addLevelName( logging.ERROR, "\033[91m%s\033[0m" % logging.getLevelName(logging.ERROR))
        logging.addLevelName( logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING))


        # EV3 init
        Ev3Dev.__init__(self)
        self.head        = Motor(port=Motor.PORT.A)
        self.right_wheel = Motor(port=Motor.PORT.B)
        self.left_wheel  = Motor(port=Motor.PORT.C)
        #print '%d%%' % self.get_battery_percentage(7945400)

        # "kill/kill -9" init
        signal.signal(signal.SIGTERM, self.signal_term_handler)
        signal.signal(signal.SIGINT, self.signal_int_handler)
        self.shutdown_flag = False

        self.rest_server = RESTServer(self)
        self.rest_server.start()
예제 #2
0
def _set_log_level(log_level):
        """
        helper method to change level of logs depending of log_level

        :param log_level: integer or string (10 for DEBUG, 20 for INFO)

        :return: A message indicating success or not
        """
        # setting logging level:
        if log_level and str(log_level) in ["10", "20"]:
            # Setting the root logger to that level
            level = int(log_level)
            logging.getLogger(__name__).info("Setting logging level to " +
                                             logging.getLevelName(level))
            logging.getLogger().setLevel(level)
            out = "Logging successfully set to level " + logging.getLevelName(level)
            logging.getLogger(__name__).info(out)
            return out

        else:
            # nofifying user and setting up loggers to debug by default
            logger = logging.getLogger(__name__)
            out = ("Logging level unknown! Should be 10 (DEBUG) or 20 (INFO). Keeping current level of " +
                   logging.getLevelName(logging.getLogger().getEffectiveLevel()))
            logger.warning(out)

            return out
예제 #3
0
파일: job.py 프로젝트: Stan-He/avocado
 def __start_job_logging(self):
     # Enable test logger
     fmt = ('%(asctime)s %(module)-16.16s L%(lineno)-.4d %('
            'levelname)-5.5s| %(message)s')
     test_handler = output.add_log_handler("avocado.test",
                                           logging.FileHandler,
                                           self.logfile, self.loglevel, fmt)
     root_logger = logging.getLogger()
     root_logger.addHandler(test_handler)
     root_logger.setLevel(self.loglevel)
     self.__logging_handlers[test_handler] = ["avocado.test", ""]
     # Add --store-logging-streams
     fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
     formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
     for name in getattr(self.args, "store_logging_stream", []):
         name = re.split(r'(?<!\\):', name, maxsplit=1)
         if len(name) == 1:
             name = name[0]
             level = logging.INFO
         else:
             level = (int(name[1]) if name[1].isdigit()
                      else logging.getLevelName(name[1].upper()))
             name = name[0]
         try:
             logfile = os.path.join(self.logdir, name + "." +
                                    logging.getLevelName(level))
             handler = output.add_log_handler(name, logging.FileHandler,
                                              logfile, level, formatter)
         except ValueError, details:
             self.log.error("Failed to set log for --store-logging-stream "
                            "%s:%s: %s.", name, level, details)
         else:
             self.__logging_handlers[handler] = [name]
예제 #4
0
파일: log.py 프로젝트: xww/nova-old
    def process(self, msg, kwargs):
        if 'extra' not in kwargs:
            kwargs['extra'] = {}
        extra = kwargs['extra']

        context = kwargs.pop('context', None)
        if not context:
            context = getattr(local.store, 'context', None)
        if context:
            extra.update(_dictify_context(context))

        # NOTE(hzyangtk): for unified log module
        if (context and context.to_dict().get('unified_log_id', None)
            and context.to_dict().get('unified_log_seq', None)
            and self.unified_logger):
            try:
                log_msg = msg
                log_level = inspect.stack()[1][3]
                if log_level == 'exception':
                    log_level = 'error'
                log_level_num = logging.getLevelName(log_level.upper())
                default_level_num = logging.getLevelName(
                                        CONF.unified_log_level.upper())
                if (isinstance(log_level_num, int)
                    and isinstance(default_level_num, int)
                    and log_level_num >= default_level_num):
                    arg = inspect.getargvalues(inspect.stack()[1][0])
                    args = arg.locals['args']
                    if len(args):
                        log_msg = log_msg % args[0]
                    self._record_unified_log(context, log_level, log_msg)
            except Exception, e:
                msg = 'Unified log raise Exception %s || %s' % (e, msg)
예제 #5
0
def configure_logging():
    logging.addLevelName(
        logging.WARNING, Fore.MAGENTA + logging.getLevelName(logging.WARNING))
    logging.addLevelName(
        logging.ERROR, Fore.RED + logging.getLevelName(logging.ERROR))
    logging.basicConfig(level=loglevel(),
                        format="%(levelname)s: %(message)s")
예제 #6
0
파일: api.py 프로젝트: caux/Flexget
        def line_filter(line, fields):
            line = json.loads(line)

            if not line:
                return False

            for f, filter_str in fields.iteritems():
                if not filter_str or f not in line:
                    continue

                if f == 'levelname':
                    line_level = logging.getLevelName(line['levelname'])
                    try:
                        filter_level = int(filter_str)
                    except ValueError:
                        filter_level = logging.getLevelName(filter_str.upper())

                    if line_level < filter_level:
                        return False
                    else:
                        continue

                if filter_str.lower() not in line.get(f, '').lower():
                    return False
            return True
예제 #7
0
def loglevel():
    """
    This functions stores or sets the proper log level based on the database configuration
    """

    logger = logging.getLogger(__name__)

    for case in Switch(config(key="verbosity").lower()):
        # choices=["info", "debug", "warn", "critical"])
        if case('debug'):
            level = logging.DEBUG
            break
        if case('critical'):
            level = logging.CRITICAL
            break
        if case('warn'):
            level = logging.WARN
            break
        if case('info'):
            level = logging.INFO
            break
        if case():
            # Default to DEBUG log level
            level = logging.DEBUG

    # If logging level has changed, redefine in logger, database and send message
    if logging.getLevelName(logger.level).lower() != config(key="verbosity"):
        logger.setLevel(level)
        logger.info(msg="Logging level set to %s" % config(key="verbosity"))
        setconfig(key="verbosity", value=logging.getLevelName(logger.level).lower())
    else:
        logger.debug(msg="Log level didn't changed from %s" % config(key="verbosity").lower())
def setLogger(options, logBacklog):
    """set up a logfile

    :param options: parsed options
    """
    logger = logging.getLogger('main')
    loglevel = logging.getLevelName(options.debuglevel)
    logger.setLevel(loglevel)
    formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s')

    # try until a working logging path is found
    for logpath in [options.logFile,
                    '/var/log/nagios3/nagiosmailer.log',
                    '/tmp/nagiosmailer.log', ]:
        try:
            file_logger = logging.handlers.RotatingFileHandler(logpath, maxBytes=100000, backupCount=5)
            file_logger.setLevel(loglevel)
            file_logger.setFormatter(formatter)
            logger.addHandler(file_logger)

            logBacklog.append(("DEBUG", "using %s for logging" % logpath))
            break
        except Exception as inst:
            logBacklog.append(("DEBUG", "not using %s for logpath (%s)" % (logpath, inst)))

    # log the things we couldn't log until now
    for (lvl, line) in logBacklog:
        logger.log(logging.getLevelName(lvl), line)

    return logger
예제 #9
0
def saveLogfileSection():
    """
    Save stuff
    """
    section = 'logfile'

    cfg = SafeConfigParser()
    try:
        with codecs.open(autosub.CONFIGFILE, 'r', autosub.SYSENCODING) as f:
            cfg.readfp(f)
    except:
        #no config yet
        cfg = SafeConfigParser()
        pass

    if not cfg.has_section(section):
        cfg.add_section(section)

    cfg.set(section, "loglevel", logging.getLevelName(int(autosub.LOGLEVEL)).lower())
    cfg.set(section, "loglevelconsole", logging.getLevelName(int(autosub.LOGLEVELCONSOLE)).lower())
    cfg.set(section, "logsize", str(autosub.LOGSIZE))
    cfg.set(section, "lognum", str(autosub.LOGNUM))

    with open(autosub.CONFIGFILE, 'wb') as file:
        cfg.write(file)
예제 #10
0
파일: Logger.py 프로젝트: nathanolla/bcfg2
def setup_logging():
    """Setup logging for Bcfg2 software."""
    if hasattr(logging, "already_setup"):
        return

    level = default_log_level()
    params = []

    to_console = True
    if hasattr(Bcfg2.Options.setup, "daemon"):
        if Bcfg2.Options.setup.daemon:
            to_console = False
        # if a command can be daemonized, but hasn't been, then we
        # assume that they're running it in the foreground and thus
        # want some more output.
        clvl = min(level, logging.INFO)
    else:
        clvl = level
    if to_console:
        params.append("%s to console" % logging.getLevelName(clvl))
        add_console_handler(level=clvl)

    if hasattr(Bcfg2.Options.setup, "syslog") and Bcfg2.Options.setup.syslog:
        slvl = min(level, logging.INFO)
        params.append("%s to syslog" % logging.getLevelName(slvl))
        add_syslog_handler(level=slvl)

    if Bcfg2.Options.setup.logfile:
        params.append("%s to %s" % (logging.getLevelName(level), Bcfg2.Options.setup.logfile))
        add_file_handler(level=level)

    logging.root.setLevel(logging.DEBUG)
    logging.root.debug("Configured logging: %s" % "; ".join(params))
    logging.already_setup = True
예제 #11
0
 def setLogLevel(self, level):
     level = logging.getLevelName(level)
     for logger in (girder.logger, cherrypy.log.access_log, cherrypy.log.error_log):
         logger.setLevel(level)
         for handler in logger.handlers:
             handler.setLevel(level)
     return logging.getLevelName(level)
예제 #12
0
 def __common_test_expected(self, expect_level, emit_level, logger_name, log_file):
     if expect_level is not None:
         exp_levelno = logging.getLevelName(expect_level)
         emit_levelno = logging.getLevelName(emit_level)
         expect_level = max(exp_levelno, emit_levelno)
     self._lgfile_watchers[log_file].check_level_output(
         self, expect_level, logger_name)
예제 #13
0
파일: Logger.py 프로젝트: dikim33/bcfg2
def setup_logging(procname, to_console=True, to_syslog=True,
                  syslog_facility='daemon', level=0, to_file=None):
    """Setup logging for Bcfg2 software."""
    if hasattr(logging, 'already_setup'):
        return

    params = []

    if to_console:
        if to_console == True:
            to_console = logging.WARNING
        if level == 0:
            clvl = to_console
        else:
            clvl = min(to_console, level)
        params.append("%s to console" % logging.getLevelName(clvl))
        add_console_handler(clvl)
    if to_syslog:
        if level == 0:
            slvl = logging.INFO
        else:
            slvl = min(level, logging.INFO)
        params.append("%s to syslog" % logging.getLevelName(slvl))
        add_syslog_handler(procname, syslog_facility, level=slvl)
    if to_file is not None:
        params.append("%s to %s" % (logging.getLevelName(level), to_file))
        add_file_handler(to_file, level=level)

    logging.root.setLevel(logging.DEBUG)
    logging.root.debug("Configured logging: %s" % "; ".join(params))
    logging.already_setup = True
예제 #14
0
파일: core.py 프로젝트: ahmadshahwan/ipopo
    def log_level(io_handler, level=None, name=None):
        """
        Prints/Changes log level
        """
        # Get the logger
        logger = logging.getLogger(name)

        # Normalize the name
        if not name:
            name = "Root"

        if not level:
            # Level not given: print the logger level
            io_handler.write_line(
                "{0} log level: {1} (real: {2})",
                name,
                logging.getLevelName(logger.getEffectiveLevel()),
                logging.getLevelName(logger.level))
        else:
            # Set the logger level
            try:
                logger.setLevel(level.upper())
                io_handler.write_line("New level for {0}: {1}", name, level)

            except ValueError:
                io_handler.write_line("Invalid log level: {0}", level)
예제 #15
0
    def _get_log_stmt(self, level, msg, *tags, **kwargs):
        msg = msg or ''

        kwargs[self.LEVEL] = logging.getLevelName(level)

        # append the optional constants defined on initialization
        kwargs.update(self._constants)

        # add message to the payload, substitute with the passed data
        kwargs[self.MESSAGE] = Template(msg).safe_substitute(kwargs)

        # caller location
        loc = self.get_caller_info()
        if loc:
            kwargs[self.LOCATION] = loc

        kwargs[self.TIME] = self.get_timestamp()

        if tags:
            kwargs[self.TAGS] = tags

        try:
            payload = self.to_json(kwargs)
        except (Exception) as ex:
            msg = 'LOGGER EXCEPTION "{0}" in  {1}'.format(str(ex), loc)
            return json.dumps({
                'msg': msg,
                'level': logging.getLevelName(logging.ERROR)
            })

        return payload
예제 #16
0
def setup_logging(loglevel=None, logfile=None, log_maxsize=None, log_maxfiles=None):
    if logfile:
        handler = logging.handlers.RotatingFileHandler(
            logfile,
            maxBytes=log_maxsize,
            backupCount=log_maxfiles,
        )
    else:
        handler = logging.StreamHandler()

    formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
    handler.setFormatter(formatter)

    logger = logging.getLogger()
    logger.addHandler(handler)
    logger.setLevel(logging.INFO)

    if logfile:
        log.info("Logging to %s; maxBytes=%s; backupCount=%s; loglevel=%s",
                 logfile, log_maxsize, log_maxfiles,
                 logging.getLevelName(loglevel)
                 )
    else:
        log.info("Logging to stderr; loglevel=%s",
                 logging.getLevelName(loglevel)
                 )

    logger.setLevel(loglevel)
예제 #17
0
파일: spike_log.py 프로젝트: dynaryu/eqrm
def initialise_logging(log_file_path=None):
    """
    """
    
    if not _setup:
        # sanity check the logging levels, require console >= file
        if file_logging_level > console_logging_level:
            file_logging_level = console_logging_level

        # define a console handler which writes to sys.stdout
        console = logging.StreamHandler(sys.stdout)
        console.setLevel(console_logging_level)
        formatter = logging.Formatter('%(message)s')
        console.setFormatter(formatter)
        logging.getLogger('').addHandler(console)

        # Add the logging of exceptions, see Rosses code.
        #FIXME
        
        # tell the world how we are set up
        start_msg = ("Logfile is '%s' with logging level of %s, "
                     "console logging level is %s"
                     % (log_filename,
                        logging.getLevelName(file_logging_level),
                        logging.getLevelName(console_logging_level)))
        
        # mark module as *setup*
        _setup = True
예제 #18
0
    def test_log_print(self):
        """
        Tests the "log" method
        """
        for cmd in ("debug", "info", "warn", "warning", "error"):
            # Log something
            self._run_command("log.{0} some text for {0}".format(cmd))

        # Get all logs
        logs = self.logger.get_log()

        # Basic filter: >= warning
        output = self._run_command("log.log")
        for entry in logs:
            if entry.level >= logging.WARNING:
                self.assertIn(entry.message, output)

        # Filter given
        for level in (logging.DEBUG, logging.INFO,
                      logging.WARNING, logging.ERROR):
            output = self._run_command(
                "log.log {0}".format(logging.getLevelName(level)))
            for entry in logs:
                if entry.level >= level:
                    self.assertIn(entry.message, output)

        # Test length filter, even when going beyond the log size
        for level in (logging.DEBUG, logging.INFO,
                      logging.WARNING, logging.ERROR):
            for i in range(1, len(logs) + 10):
                output = self._run_command(
                    "log.log {0} {1}".format(logging.getLevelName(level), i))
                for entry in logs[-i:]:
                    if entry.level >= level:
                        self.assertIn(entry.message, output)
예제 #19
0
    def setup_logging(self, args):
        self.verbosity = args.verbosity
        self.logfile = args.logfile

        verbosity_level_name = logging.getLevelName(self.verbosity)

        logfile_level_name = logging.getLevelName(self.logfile)

        highest_level = min([self.logfile, self.verbosity])
        print "set log level to:", highest_level
        self.log.setLevel(highest_level)

        if self.logfile > 0 and self.logfilename:
            handler = logging.FileHandler(self.logfilename, mode='w', encoding="utf8")
#             handler.set_level(self.logfile)
            handler.level = self.logfile
            handler.setFormatter(self.LOG_FORMATTER)
            self.log.addHandler(handler)

        if self.verbosity > 0:
            handler = logging.StreamHandler()
#             handler.set_level(self.verbosity)
            handler.level = self.verbosity
            handler.setFormatter(self.LOG_FORMATTER)
            self.log.addHandler(handler)

        self.log.debug(" ".join(sys.argv))

        verbosity_level_name = logging.getLevelName(self.verbosity)
        self.log.info("Verbosity log level: %s" % verbosity_level_name)

        logfile_level_name = logging.getLevelName(self.logfile)
        self.log.info("logfile log level: %s" % logfile_level_name)
예제 #20
0
    def __init__(self,setupObj):
        debug_log_filename = setupObj.args.debug_log_filename
        debug_log_level    = setupObj.args.debug_log_level
        debug_log_format   = setupObj.args.debug_log_format


        self.debugLogger = logging.getLogger("debug_log")
        self.debugLogger.setLevel(logging.DEBUG)
 
        # create console handler and set level to info
        handler = logging.FileHandler(debug_log_filename)
        handler.setLevel(logging.getLevelName(debug_log_level))
        formatter = logging.Formatter(debug_log_format)
        handler.setFormatter(formatter)
        self.debugLogger.addHandler(handler)


        records_log_filename =  setupObj.args.records_log_filename
        records_log_level    = setupObj.args.records_log_level
        records_log_format   = setupObj.args.records_log_format

        self.recordsLogger = logging.getLogger("records_log")
        self.recordsLogger.setLevel(logging.DEBUG)
        # create console handler and set level to info
        handler = logging.FileHandler(records_log_filename)
        handler.setLevel(logging.getLevelName(records_log_level))
        formatter = logging.Formatter(records_log_format)
        handler.setFormatter(formatter)
        self.recordsLogger.addHandler(handler)
예제 #21
0
def setupConfigAndLogging(environment):
    global config

    try: 
        config = __import__("config." + environment , fromlist=[environment])
    except ImportError:
        sys.exit("Unknown environment. Please create config/" + environment + \
                 ".py") 

    handler = logging.FileHandler(config.log_file)
    handler.setFormatter(logging.Formatter(config.log_format))
    
    errorHandler = logging.FileHandler(config.out_err_log)
    errorHandler.setFormatter(logging.Formatter(config.log_format))

    root_logger = logging.getLogger("")
    root_logger.setLevel(logging.getLevelName(config.global_log_level))
    root_logger.addHandler(handler)
    root_logger.addHandler(errorHandler)

    for log_path in config.log_levels:
        logger = logging.getLogger(log_path)
        logger_level = config.log_levels[log_path]
        logger.setLevel(logging.getLevelName(logger_level))
        logger.propagate = 0
        logger.addHandler(handler)
예제 #22
0
    def __init__(self, widget: tk.Text, level=logging.NOTSET):
        self.widget = widget
        super().__init__(level)

        # Assign colours for each logging level
        for level, colour in LVL_COLOURS.items():
            widget.tag_config(
                logging.getLevelName(level),
                foreground=colour,
                # For multi-line messages, indent this much.
                lmargin2=30,
            )
        widget.tag_config(
            logging.getLevelName(logging.CRITICAL),
            background='red',
        )
        # If multi-line messages contain carriage returns, lmargin2 doesn't
        # work. Add an additional tag for that.
        widget.tag_config(
            'INDENT',
            lmargin1=30,
            lmargin2=30,
        )

        widget['state'] = "disabled"
예제 #23
0
def save_logfile_section():
    """
    Save stuff
    """
    section = 'logfile'

    cfg = SafeConfigParser()
    try:
        with codecs.open(autosubliminal.CONFIGFILE, 'r', autosubliminal.SYSENCODING) as f:
            cfg.readfp(f)
    except:
        # No config yet
        cfg = SafeConfigParser()
        pass

    if not cfg.has_section(section):
        cfg.add_section(section)

    cfg.set(section, "loglevel", logging.getLevelName(int(autosubliminal.LOGLEVEL)).lower())
    cfg.set(section, "lognum", str(autosubliminal.LOGNUM))
    cfg.set(section, "logsize", str(autosubliminal.LOGSIZE))
    cfg.set(section, "loglevelconsole", logging.getLevelName(int(autosubliminal.LOGLEVELCONSOLE)).lower())
    cfg.set(section, "loghttpaccess", str(autosubliminal.LOGHTTPACCESS))
    cfg.set(section, "logreversed", str(autosubliminal.LOGREVERSED))

    with open(autosubliminal.CONFIGFILE, 'wb') as file:
        cfg.write(file)
예제 #24
0
파일: bioio.py 프로젝트: benedictpaten/toil
def setLogLevel(level):
    level = level.upper()
    if level == "OFF": level = "CRITICAL"
    # Note that getLevelName works in both directions, numeric to textual and textual to numeric
    numericLevel = logging.getLevelName(level)
    assert logging.getLevelName(numericLevel) == level
    rootLogger.setLevel(numericLevel)
예제 #25
0
    def __getLogger(klass):
        if klass.klassLogger is not None:
            return klass.klassLogger
        else:
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            klassLogger = logging.getLogger(appConfig.logger.name)

            #file handler
            if "file_log" in appConfig.logger:
                level = logging.getLevelName(appConfig.logger.file_log.level)
                klassLogger.setLevel(level)

                logPath = os.path.join(appConfig.root,'log',appConfig.logger.file_log.filename)
                hdlr = logging.FileHandler(logPath)

                hdlr.setFormatter(formatter)

                klassLogger.addHandler(hdlr)

            #std handler
            if "stdout_log" in appConfig.logger:
                ch = logging.StreamHandler(sys.stdout)

                level = logging.getLevelName(appConfig.logger.stdout_log.level)
                ch.setLevel(level)

                ch.setFormatter(formatter)

                klassLogger.addHandler(ch)

            klass.klassLogger = klassLogger
            return klass.klassLogger
예제 #26
0
def install_std_handler():
    global handler
    if handler:
        logger.removeHandler(handler)

    settings = sublime.load_settings("SublimeLinter.sublime-settings")
    level = settings.get('debug', False)

    if level is False:
        level = DEBUG_FALSE_LEVEL
        formatter = TaskNumberFormatter(
            fmt="SublimeLinter: {LEVELNAME}{message}",
            style='{')
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
    else:
        if level is True:
            level = DEBUG_TRUE_LEVEL
        else:
            level = logging.getLevelName(level.upper())

        formatter = TaskNumberFormatter(
            fmt="SublimeLinter: {TASK_NUMBER}{filename}:{lineno}: {LEVELNAME}{message}",
            style='{')
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)

    handler.setLevel(level)
    logger.addHandler(handler)
    logger.setLevel(min(ERROR_PANEL_LEVEL, STATUS_BAR_LEVEL, level))
    logger.info(
        'Logging installed; log level {}'.format(logging.getLevelName(level))
    )
예제 #27
0
파일: agent.py 프로젝트: romana/core
def delete_all_rules_for_policy(iptables_rules, policy_name, tenants):
    """
    Specify the policy name, such as 'foo' and a list of tenants.
    This will delete all the rules that refer to anything related
    to this rule, such as 'ROMANA-P-foo_', 'ROMANA-P-foo-IN_' for each tenant.

    """

    # Some dirty logs. No need to run all this loops if logging level less then DEBUG
    if logging.getLevelName(logging.getLogger().getEffectiveLevel()) == 'DEBUG':
        logging.debug("In delete_all_rules_for_policy")
        for i, line in enumerate(iptables_rules):
            logging.debug("Current rules --> line %3d : %s" % (i,line))

    full_names = []

    full_names += [ 'ROMANA-P-%s%s_' % (policy_name, p)
                        for p in [ "", "-IN", "-OUT" ] ]

    logging.debug("In delete_all_rules_for_policy -> deleteing policy chains %s" % full_names)

    # Only transcribe those lines that don't mention any of the chains
    # related to the policy.
    clean_rules = [ r for r in iptables_rules if not
                            any([ p in r for p in full_names ]) ]

    # Some dirty logs. No need to run all this loops if logging level less then DEBUG
    if logging.getLevelName(logging.getLogger().getEffectiveLevel()) == 'DEBUG':
        logging.debug("In delete_all_rules_for_policy")
        for i, line in enumerate(clean_rules):
            logging.debug("Clean rules --> line %3d : %s" % (i,line))


    return clean_rules
예제 #28
0
def _install_std_handler(level=False):
    global handler
    if handler:
        logger.removeHandler(handler)

    if level is False:
        level = DEBUG_FALSE_LEVEL
        formatter = TaskNumberFormatter(
            fmt="SublimeLinter: {LEVELNAME}{message}",
            style='{')
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
    else:
        if level is True:
            level = DEBUG_TRUE_LEVEL
        else:
            level = logging.getLevelName(level.upper())

        formatter = TaskNumberFormatter(
            fmt="SublimeLinter: {LOC:<22} {LEVELNAME}{message}",
            style='{')
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)

    handler.setLevel(level)
    logger.addHandler(handler)
    logger.setLevel(min(ERROR_PANEL_LEVEL, level))
    logger.info(
        'Logging installed; log level {}'.format(logging.getLevelName(level))
    )
예제 #29
0
def mrf_log_init(level = install.log_level):
    #logging.basicConfig(filename=install.logdir+install.mrflog,level=level)
    formatter = logging.Formatter(fmt='%(asctime)s] %(levelname)s %(filename)s.%(lineno)d - %(message)s')# , datefmt='%Y-%m-%d,%H:%M:%S')
    #formatter = MyFormatter(fmt='%(asctime)s.%(msecs)03d] %(levelname)s %(filename)s.%(lineno)d - %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
    mrflog = logging.getLogger(install.logger_name)
    hdlr = logging.FileHandler(install.logdir+install.mrflog)
    hdlr.setFormatter(formatter)

    mrflog.addHandler(hdlr)

    #ch = logging.StreamHandler()
    #ch.setFormatter(formatter)
    #mrflog.addHandler(ch)
    mrflog.setLevel(level)

    return mrflog



    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    ch.setLevel(level)
    alog.addHandler(ch)
    logging.addLevelName( logging.WARNING, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
    logging.addLevelName( logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
    return alog
예제 #30
0
def configure_logging():
    """Configure root logger"""
    logger = logging.getLogger()

    logger.setLevel(logging.DEBUG)

    # Level name colored differently (both console and file)
    logging.addLevelName(logging.WARNING, "\x1b[0;33m%s\x1b[0m" % logging.getLevelName(logging.WARNING))
    logging.addLevelName(logging.ERROR, "\x1b[0;31m%s\x1b[0m" % logging.getLevelName(logging.ERROR))

    # Configure console logging
    console_log_handler = logging.StreamHandler()
    console_log_handler.setLevel(logging.INFO)
    # All console messages are the same color (except with colored level names)
    console_formatter = logging.Formatter("\x1b[0;32m%(levelname)s" "\t%(message)s\x1b[0m")
    console_log_handler.setFormatter(console_formatter)
    logger.addHandler(console_log_handler)

    # Configure log file
    hf.clean_dir(conf.log_dir)
    log_file = os.path.join(conf.log_dir, "stacktrain.log")
    file_log_handler = logging.FileHandler(log_file)
    file_log_handler.setLevel(logging.DEBUG)
    file_formatter = logging.Formatter(
        "%(process)s %(asctime)s.%(msecs)03d" " %(name)s %(levelname)s %(message)s", datefmt="%H:%M:%S"
    )
    file_log_handler.setFormatter(file_formatter)
    logger.addHandler(file_log_handler)

    logger.debug("Root logger configured.")
예제 #31
0
파일: app.py 프로젝트: cullen-beta/stm32pio
    settings.endArray()
    settings.endGroup()

    engine = QQmlApplicationEngine(parent=app)

    qmlRegisterType(ProjectListItem, 'ProjectListItem', 1, 0,
                    'ProjectListItem')
    qmlRegisterType(Settings, 'Settings', 1, 0, 'Settings')

    projects_model = ProjectsList(parent=engine)
    boards = []
    boards_model = QStringListModel(parent=engine)

    engine.rootContext().setContextProperty(
        'Logging', {
            logging.getLevelName(logging.CRITICAL): logging.CRITICAL,
            logging.getLevelName(logging.ERROR): logging.ERROR,
            logging.getLevelName(logging.WARNING): logging.WARNING,
            logging.getLevelName(logging.INFO): logging.INFO,
            logging.getLevelName(logging.DEBUG): logging.DEBUG,
            logging.getLevelName(logging.NOTSET): logging.NOTSET
        })
    engine.rootContext().setContextProperty('projectsModel', projects_model)
    engine.rootContext().setContextProperty('boardsModel', boards_model)
    engine.rootContext().setContextProperty('appSettings', settings)

    engine.load(QUrl.fromLocalFile('stm32pio-gui/main.qml'))

    main_window = engine.rootObjects()[0]

    # Getting PlatformIO boards can take long time when the PlatformIO cache is outdated but it is important to have
예제 #32
0
# coding:utf8
import logging
import os
import random
import sys

import zxing  # 导入解析包
from PIL import Image
import cv2

logger = logging.getLogger(__name__)  # 记录数据

if not logger.handlers:
    logging.basicConfig(level=logging.INFO)

DEBUG = (logging.getLevelName(logger.getEffectiveLevel()) == 'DEBUG')  # 记录调式过程


# 在当前目录生成临时文件,规避java的路径问题
def ocr_qrcode_zxing(filename):
    img = Image.open(filename)
    ran = int(random.random() * 100000)  # 设置随机数据的大小
    img.save('%s%s.jpg' % (os.path.basename(filename).split('.')[0], ran))
    zx = zxing.BarCodeReader()  # 调用zxing二维码读取包
    data = ''
    zxdata = zx.decode('%s%s.jpg' %
                       (os.path.basename(filename).split('.')[0], ran))  # 图片解码

    # 删除临时文件
    os.remove('%s%s.jpg' % (os.path.basename(filename).split('.')[0], ran))
예제 #33
0
def _(loglevel: str) -> int:
    return logging.getLevelName(loglevel.upper())  # type: ignore
예제 #34
0
def level_name(loglevel: int) -> str:
    """Convert log level to number."""
    return cast(str, logging.getLevelName(loglevel))
예제 #35
0
def main(args=None):
    args = arg_parser().parse_args(args)
    if args.verbosity == 1:
        level = logging.getLevelName('INFO')
    elif args.verbosity >= 2:
        level = logging.getLevelName('DEBUG')
    else:
        level = logging.getLevelName('WARNING')
    logging.basicConfig(
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        level=level)
    logger = logging.getLogger(__name__)
    try:
        img_fns = io.glob_nii(args.img_dir)
        mask_fns = io.glob_nii(args.mask_dir)
        if len(img_fns) != len(mask_fns) or len(img_fns) == 0:
            raise NormalizationError(
                'Image directory ({}) and mask directory ({}) must contain the same '
                '(positive) number of images!'.format(args.img_dir,
                                                      args.mask_dir))

        logger.info('Normalizing the images according to RAVEL')
        Z, _ = ravel.ravel_normalize(
            args.img_dir,
            args.mask_dir,
            args.contrast,
            do_whitestripe=args.no_whitestripe,
            b=args.num_unwanted_factors,
            membership_thresh=args.control_membership_threshold,
            do_registration=args.no_registration,
            segmentation_smoothness=args.segmentation_smoothness,
            use_fcm=not args.use_atropos)

        V = ravel.image_matrix(img_fns, args.contrast, masks=mask_fns)
        V_norm = ravel.ravel_correction(V, Z)
        normalized = ravel.image_matrix_to_images(V_norm, img_fns)

        # save the normalized images to disk
        output_dir = os.getcwd(
        ) if args.output_dir is None else args.output_dir
        out_fns = []
        for fn in img_fns:
            _, base, ext = io.split_filename(fn)
            out_fns.append(os.path.join(output_dir, base + '_ravel' + ext))
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        for norm, out_fn in zip(normalized, out_fns):
            norm.to_filename(out_fn)

        if args.plot_hist:
            with warnings.catch_warnings():
                warnings.filterwarnings('ignore', category=FutureWarning)
                from intensity_normalization.plot.hist import all_hists
                import matplotlib.pyplot as plt
            ax = all_hists(output_dir, args.mask_dir)
            ax.set_title('RAVEL')
            plt.savefig(os.path.join(output_dir, 'hist.png'))

        return 0
    except Exception as e:
        logger.exception(e)
        return 1
예제 #36
0
def main():

    #    pdb.set_trace()

    args = Args()
    args.parse_args()

    logger = logging.getLogger('Browse_Process')
    level = logging.getLevelName(args.log_level)
    logger.setLevel(level)
    logFileHandle = logging.FileHandler(pds_log + 'Process.log')
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s, %(message)s')
    logFileHandle.setFormatter(formatter)
    logger.addHandler(logFileHandle)

    RQ_main = RedisQueue('Browse_ReadyQueue')
    RQ_lock = RedisLock(lock_obj)
    RQ_lock.add({RQ_main.id_name: '1'})

    PDSinfoDICT = json.load(open(pds_info, 'r'))

    pds_session, pds_engine = db_connect(pds_db)
    upc_session, upc_engine = db_connect(upc_db)

    tid = get_tid('fullimageurl', upc_session)

    while int(RQ_main.QueueSize()) > 0 and RQ_lock.available(RQ_main.id_name):
        item = literal_eval(RQ_main.QueueGet().decode("utf-8"))
        inputfile = item[0]
        fid = item[1]
        archive = item[2]
        if os.path.isfile(inputfile):
            logger.info('Starting Process: %s', inputfile)
            finalpath = makedir(inputfile)

            recipeOBJ = Recipe()
            recip_json = recipeOBJ.getRecipeJSON(archive)
            recipeOBJ.AddJsonFile(recip_json, 'reduced')
            infile = workarea + os.path.splitext(
                os.path.basename(inputfile))[0] + '.Binput.cub'
            outfile = workarea + os.path.splitext(
                os.path.basename(inputfile))[0] + '.Boutput.cub'
            status = 'success'
            for item in recipeOBJ.getProcesses():
                if status == 'error':
                    logger.error("Error processing %s", inputfile)
                    break
                elif status == 'success':
                    processOBJ = Process()
                    processOBJ.ProcessFromRecipe(item, recipeOBJ.getRecipe())

                    if '2isis' in item:
                        processOBJ.updateParameter('from_', inputfile)
                        processOBJ.updateParameter('to', outfile)
                    elif item == 'spiceinit':
                        processOBJ.updateParameter('from_', infile)
                    elif item == 'cubeatt':
                        label = pvl.load(infile)
                        bands = PDSinfoDICT[archive]['bandorder']
                        query_bands = label['IsisCube']['BandBin'][
                            PDSinfoDICT[archive]['bandbinQuery']]
                        # Create a set from the list / single value
                        try:
                            query_band_set = set(query_bands)
                        except:
                            query_band_set = set([query_bands])

                        # Iterate through 'bands' and grab the first value that is present in the
                        #  set defined by 'bandbinquery' -- if not present, default to 1
                        exband = next(
                            (band for band in bands if band in query_band_set),
                            1)

                        band_infile = infile + '+' + str(exband)
                        processOBJ.updateParameter('from_', band_infile)
                        processOBJ.updateParameter('to', outfile)

                    elif item == 'ctxevenodd':
                        label = pvl.load(infile)
                        SS = label['IsisCube']['Instrument']['SpatialSumming']
                        if SS != 1:
                            break
                        else:
                            processOBJ.updateParameter('from_', infile)
                            processOBJ.updateParameter('to', outfile)

                    elif item == 'reduce':
                        label = pvl.load(infile)
                        Nline = label['IsisCube']['Core']['Dimensions'][
                            'Lines']
                        Nsample = label['IsisCube']['Core']['Dimensions'][
                            'Samples']
                        Nline = int(Nline)
                        Nsample = int(Nsample)
                        Sfactor = scaleFactor(Nline, Nsample, recip_json)
                        processOBJ.updateParameter('lscale', Sfactor)
                        processOBJ.updateParameter('sscale', Sfactor)
                        processOBJ.updateParameter('from_', infile)
                        processOBJ.updateParameter('to', outfile)

                    elif item == 'isis2std':
                        final_outfile = finalpath + '/' + os.path.splitext(
                            os.path.basename(inputfile))[0] + '.browse.jpg'
                        processOBJ.updateParameter('from_', infile)
                        processOBJ.updateParameter('to', final_outfile)

                    else:
                        processOBJ.updateParameter('from_', infile)
                        processOBJ.updateParameter('to', outfile)

                    for k, v in processOBJ.getProcess().items():
                        func = getattr(isis, k)
                        try:
                            func(**v)
                            logger.info('Process %s :: Success', k)
                            if os.path.isfile(outfile):
                                if '.cub' in outfile:
                                    os.rename(outfile, infile)
                            status = 'success'
                            if '2isis' in item:
                                isisSerial = getISISid(infile)
                        except ProcessError as e:
                            print(e)
                            logger.error('Process %s :: Error', k)
                            status = 'error'
            if status == 'success':
                DB_addURL(upc_session, isisSerial, final_outfile, tid)
                os.remove(infile)
                logger.info('Browse Process Success: %s', inputfile)
                AddProcessDB(pds_session, fid, 't')
        else:
            logger.error('File %s Not Found', inputfile)

    upc_session.close()
    pds_session.close()
    upc_engine.dispose()
    pds_engine.dispose()
예제 #37
0
def _setup_logging_from_conf(project, version):
    log_root = getLogger(None).logger
    for handler in log_root.handlers:
        log_root.removeHandler(handler)

    if CONF.use_syslog:
        facility = _find_facility_from_conf()
        if CONF.use_syslog_rfc_format:
            syslog = RFCSysLogHandler(address='/dev/log', facility=facility)
        else:
            syslog = logging.handlers.SysLogHandler(address='/dev/log',
                                                    facility=facility)
        log_root.addHandler(syslog)

    logpath = _get_log_file_path()
    if logpath:
        filelog = logging.handlers.WatchedFileHandler(logpath)
        log_root.addHandler(filelog)

    if CONF.use_stderr:
        streamlog = ColorHandler()
        log_root.addHandler(streamlog)

    elif not logpath:
        # pass sys.stdout as a positional argument
        # python2.6 calls the argument strm, in 2.7 it's stream
        streamlog = logging.StreamHandler(sys.stdout)
        log_root.addHandler(streamlog)

    if CONF.publish_errors:
        try:
            handler = importutils.import_object(
                "vnfsvc.openstack.common.log_handler.PublishErrorsHandler",
                logging.ERROR)
        except ImportError:
            handler = importutils.import_object(
                "oslo_messaging.notify.log_handler.PublishErrorsHandler",
                logging.ERROR)
        log_root.addHandler(handler)

    datefmt = CONF.log_date_format
    for handler in log_root.handlers:
        # NOTE(alaski): CONF.log_format overrides everything currently.  This
        # should be deprecated in favor of context aware formatting.
        if CONF.log_format:
            handler.setFormatter(
                logging.Formatter(fmt=CONF.log_format, datefmt=datefmt))
            log_root.info('Deprecated: log_format is now deprecated and will '
                          'be removed in the next release')
        else:
            handler.setFormatter(
                ContextFormatter(project=project,
                                 version=version,
                                 datefmt=datefmt))

    if CONF.debug:
        log_root.setLevel(logging.DEBUG)
    elif CONF.verbose:
        log_root.setLevel(logging.INFO)
    else:
        log_root.setLevel(logging.WARNING)

    for pair in CONF.default_log_levels:
        mod, _sep, level_name = pair.partition('=')
        logger = logging.getLogger(mod)
        # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
        # to integer code.
        if sys.version_info < (2, 7):
            level = logging.getLevelName(level_name)
            logger.setLevel(level)
        else:
            logger.setLevel(level_name)
예제 #38
0
        log.addHandler(file_handler)
        log.addHandler(syslog_handler)

        app.run(host=settings.config.prometheus_host,
                port=settings.config.prometheus_port,
                debug=False,
                threaded=True)

    else:
        logger.info("Integrated Prometheus exporter is disabled")


if __name__ == '__main__':

    settings.init()
    logger_level = logging.getLevelName(settings.config.logger_level)

    # setup syslog handler to help diagnostics
    logger = logging.getLogger('rbd-target-gw')
    logger.setLevel(logging.DEBUG)

    # syslog (systemctl/journalctl messages)
    syslog_handler = logging.handlers.SysLogHandler(address='/dev/log')
    syslog_handler.setLevel(logging.INFO)
    syslog_format = logging.Formatter("%(message)s")
    syslog_handler.setFormatter(syslog_format)

    # file target - more verbose logging for diagnostics
    file_handler = RotatingFileHandler(
        '/var/log/rbd-target-gw/rbd-target-gw.log',
        maxBytes=5242880,
예제 #39
0
 def setValue(level, value):
     settings.setValue("log/showlevel/%s" % logging.getLevelName(level),
                       int(value))
예제 #40
0
import logging
import StringIO
import csv
import json
import sys
import time
import subprocess
import yaml
import argparse

__version__ = '0.1'

# color logging
logging.addLevelName(
    logging.ERROR,
    "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(
    logging.INFO,
    "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(
    logging.WARNING,
    "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))


# log level
logging.basicConfig(level=logging.INFO)
MAIN_LOGGER = logging.getLogger('main')
TELNET_LOGGER = logging.getLogger('telnet')
MAIN_LOGGER.setLevel('INFO')
TELNET_LOGGER.setLevel('INFO')
예제 #41
0
def register():
    #icons
    global icons_dict
    icons_dict = iconsLib.new()
    icons_dir = os.path.join(os.path.dirname(__file__), "icons")
    for icon in os.listdir(icons_dir):
        name, ext = os.path.splitext(icon)
        icons_dict.load(name, os.path.join(icons_dir, icon), 'IMAGE')

    #operators
    prefs.register()
    geoscene.register()

    for menu in menus:
        try:
            bpy.utils.register_class(menu)
        except ValueError as e:
            logger.warning(
                '{} is already registered, now unregister and retry... '.
                format(menu))
            bpy.utils.unregister_class(menu)
            bpy.utils.register_class(menu)

    bpy.utils.register_class(BGIS_OT_logs)

    if BASEMAPS:
        view3d_mapviewer.register()
    if IMPORT_GEORASTER:
        io_import_georaster.register()
    if IMPORT_SHP:
        io_import_shp.register()
    if EXPORT_SHP:
        io_export_shp.register()
    if IMPORT_OSM:
        io_import_osm.register()
    if IMPORT_ASC:
        io_import_asc.register()
    if DELAUNAY:
        mesh_delaunay_voronoi.register()
    if DROP:
        object_drop.register()
    if GET_SRTM:
        io_get_srtm.register()
    if CAM_GEOPHOTO:
        add_camera_exif.register()
    if CAM_GEOREF:
        add_camera_georef.register()
    if TERRAIN_NODES:
        nodes_terrain_analysis_builder.register()
    if TERRAIN_RECLASS:
        nodes_terrain_analysis_reclassify.register()
    if EARTH_SPHERE:
        mesh_earth_sphere.register()

    #menus
    bpy.types.VIEW3D_MT_editor_menus.append(add_gis_menu)

    #shortcuts
    if not bpy.app.background:  #no ui when running as background
        wm = bpy.context.window_manager
        kc = wm.keyconfigs.active
        if '3D View' in kc.keymaps:
            km = kc.keymaps['3D View']
            if BASEMAPS:
                kmi = km.keymap_items.new(idname='view3d.map_start',
                                          type='NUMPAD_ASTERIX',
                                          value='PRESS')

    #Setup prefs
    preferences = bpy.context.preferences.addons[__package__].preferences
    #>>logger
    #logger = logging.getLogger(__name__)
    logger.setLevel(logging.getLevelName(
        preferences.logLevel))  #will affect all child logger
    #>>core settings
    cfg = getSettings()
    cfg['proj_engine'] = preferences.projEngine
    cfg['img_engine'] = preferences.imgEngine
    setSettings(cfg)
예제 #42
0
import logging

from flask import Flask
from flask_restful import Api, Resource
from flask_cors import CORS

from utils.config import CONFIG
import mock

app = Flask(__name__)
api = Api(app)
CORS(app)

log_level = logging.getLevelName(CONFIG['log_level'])
logging.basicConfig(level=log_level)


class Main(Resource):
    def get(self):
        logging.info('GET fucntion called')
        return 'get done'

    def post(self):
        logging.info('POST fucntion called')
        return 'post done'


api.add_resource(Main, '/')

if CONFIG['enable_worker_api']:
    api.add_resource(mock.WorkerMock, '/worker/<string:function>')
예제 #43
0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Health check will returns 0 when service is working properly."""

import logging
from urllib import request
import os
import sys

LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)

API_PORT = os.environ.get('MONASCA_CONTAINER_LOG_API_PORT', '5607')
url = "http://localhost:" + API_PORT + "/healthcheck"


def main():
    """Send health check request to health check endpoint of log API."""
    logger.debug('Send health check request to %s', url)
    try:
        request.urlopen(url=url)
    except Exception as ex:
        logger.error('Exception during request handling: ' + repr(ex))
        sys.exit(1)
예제 #44
0
 def getValue(level, default):
     return int(
         settings.value("log/showlevel/%s" % logging.getLevelName(level),
                        default))
예제 #45
0
 def get_level(self):
     """Returns the current log level."""
     return logging.getLevelName(self.backend.level)
예제 #46
0
TERRAIN_NODES = True
TERRAIN_RECLASS = True
BASEMAPS = True
DROP = True
EARTH_SPHERE = True

import os, sys, tempfile

import logging
#temporary set log level, will be overriden reading addon prefs
#logsFormat = "%(levelname)s:%(name)s:%(lineno)d:%(message)s"
logsFormat = '{levelname}:{name}:{lineno}:{message}'
logsFileName = 'bgis.log'
try:
    logsFilePath = os.path.join(os.path.dirname(__file__), logsFileName)
    logging.basicConfig(level=logging.getLevelName('DEBUG'),
                        format=logsFormat,
                        style='{',
                        filename=logsFilePath,
                        filemode='w')
except PermissionError:
    #logsFilePath = os.path.join(bpy.app.tempdir, logsFileName)
    logsFilePath = os.path.join(tempfile.gettempdir(), logsFileName)
    logging.basicConfig(level=logging.getLevelName('DEBUG'),
                        format=logsFormat,
                        style='{',
                        filename=logsFilePath,
                        filemode='w')
logger = logging.getLogger(__name__)

예제 #47
0
import datetime
import logging
import os
import sys

from brainscore import score_model

from base_models.test_models import cornet_s_brainmodel, resnet_brainmodel, alexnet_brainmodel, mobilenet_brainmodel, \
    hmax_brainmodel
from benchmark.database import create_connection, store_score

logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout,
                    level=logging.getLevelName('DEBUG'),
                    format='%(asctime)-15s %(levelname)s:%(name)s:%(message)s')
for disable_logger in [
        's3transfer', 'botocore', 'boto3', 'urllib3', 'peewee', 'PIL'
]:
    logging.getLogger(disable_logger).setLevel(logging.WARNING)


def run_benchmark(benchmark_identifier, model_name):
    print(
        f'>>>>>Start running model {model_name} on benchmark {benchmark_identifier}'
    )
    # model = brain_translated_pool[model_name]
    model = get_model(model_name)
    score = score_model(model_identifier=model.identifier,
                        model=model,
                        benchmark_identifier=benchmark_identifier)
    return score, model.identifier
예제 #48
0
def _validate_by_service_async(service, config_set, path, content, ctx):
    """Validates a config with an external service.

  Validation results will be stored in the validation context.

  Args:
    service (service_config_pb2.Service): service to be validated against.
    config_set (str): config set being validated.
    path (str): path of the config file being validated.
    content (str): byte-form of the content of the file being validated.
    ctx (validation.Context): context in which validation messages
      will be stored.
  """
    try:
        metadata = yield services.get_metadata_async(service.id)
    except services.DynamicMetadataError as ex:
        logging.error('Could not load dynamic metadata for %s: %s', service.id,
                      ex)
        return

    assert metadata and metadata.validation
    url = metadata.validation.url
    if not url:
        return

    match = False
    for p in metadata.validation.patterns:
        # TODO(nodir): optimize if necessary.
        if (validation.compile_pattern(p.config_set)(config_set)
                and validation.compile_pattern(p.path)(path)):
            match = True
            break
    if not match:
        return

    res = None

    def report_error(text):
        text = ('Error during external validation: %s\n'
                'url: %s\n'
                'config_set: %s\n'
                'path: %s\n'
                'response: %r') % (text, url, config_set, path, res)
        logging.error(text)
        ctx.critical('%s', text)

    try:
        req = {
            'config_set': config_set,
            'path': path,
            'content': base64.b64encode(content),
        }
        res = yield services.call_service_async(service,
                                                url,
                                                method='POST',
                                                payload=req)
    except net.Error as ex:
        report_error('Net error: %s' % ex)
        return

    try:
        for msg in res.get('messages', []):
            if not isinstance(msg, dict):
                report_error('invalid response: message is not a dict: %r' %
                             msg)
                continue
            severity = msg.get('severity') or 'INFO'
            # validation library for Go services sends severity as an integer
            # corresponding to Python's logging severity level.
            if severity in (logging.DEBUG, logging.INFO, logging.WARNING,
                            logging.ERROR, logging.CRITICAL):
                severity = logging.getLevelName(severity)
            if (severity not in service_config_pb2.ValidationResponseMessage.
                    Severity.keys()):
                report_error(
                    'invalid response: unexpected message severity: %r' %
                    severity)
                continue
            # It is safe because we've validated |severity|.
            func = getattr(ctx, severity.lower())
            func('%s', msg.get('text') or '')
    except Exception as ex:
        report_error(ex)
예제 #49
0
def main():
    parser = argparse.ArgumentParser(prog='gvm-pyshell',
                                     description=help_text,
                                     formatter_class=RawTextHelpFormatter,
                                     add_help=False,
                                     epilog="""
usage: gvm-pyshell [-h] [--version] [connection_type] ...
   or: gvm-pyshell connection_type --help""")
    subparsers = parser.add_subparsers(metavar='[connection_type]')
    subparsers.required = True
    subparsers.dest = 'connection_type'

    parser.add_argument('-h',
                        '--help',
                        action='help',
                        help='Show this help message and exit.')

    parent_parser = argparse.ArgumentParser(add_help=False)

    parent_parser.add_argument(
        '-c',
        '--config',
        nargs='?',
        const='~/.config/gvm-tools.conf',
        help='Configuration file path. Default: ~/.config/gvm-tools.conf')
    args_before, remaining_args = parent_parser.parse_known_args()

    defaults = {'gmp_username': '', 'gmp_password': ''}

    # Retrieve data from config file
    if args_before.config:
        try:
            config = configparser.SafeConfigParser()
            path = os.path.expanduser(args_before.config)
            config.read(path)
            defaults = dict(config.items('Auth'))
        except Exception as e:
            print(str(e))

    parent_parser.set_defaults(**defaults)

    parent_parser.add_argument(
        '--timeout',
        required=False,
        default=60,
        type=int,
        help='Wait <seconds> for response or if value -1, then wait '
        'continuously. Default: 60')
    parent_parser.add_argument(
        '--log',
        nargs='?',
        dest='loglevel',
        const='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='Activates logging. Default level: INFO.')
    parent_parser.add_argument('-i',
                               '--interactive',
                               action='store_true',
                               default=False,
                               help='Start an interactive Python shell.')
    parent_parser.add_argument('--gmp-username', help='GMP username.')
    parent_parser.add_argument('--gmp-password', help='GMP password.')
    parent_parser.add_argument(
        'script', nargs='*', help='Preload gmp script. Example: myscript.gmp.')

    parser_ssh = subparsers.add_parser(
        'ssh',
        help='Use SSH connection for gmp service.',
        parents=[parent_parser])
    parser_ssh.add_argument('--hostname',
                            required=True,
                            help='Hostname or IP-Address.')
    parser_ssh.add_argument('--port',
                            required=False,
                            default=22,
                            help='Port. Default: 22.')
    parser_ssh.add_argument('--ssh-user',
                            default='gmp',
                            help='SSH Username. Default: gmp.')

    parser_tls = subparsers.add_parser(
        'tls',
        help='Use TLS secured connection for gmp service.',
        parents=[parent_parser])
    parser_tls.add_argument('--hostname',
                            required=True,
                            help='Hostname or IP-Address.')
    parser_tls.add_argument('--port',
                            required=False,
                            default=9390,
                            help='Port. Default: 9390.')

    parser_socket = subparsers.add_parser(
        'socket',
        help='Use UNIX-Socket connection for gmp service.',
        parents=[parent_parser])
    parser_socket.add_argument(
        '--sockpath',
        nargs='?',
        default='/usr/local/var/run/gvmd.sock',
        help='UNIX-Socket path. Default: /usr/local/var/run/gvmd.sock.')

    parser.add_argument(
        '-V',
        '--version',
        action='version',
        version='%(prog)s {version}'.format(version=__version__),
        help='Show program\'s version number and exit')

    global args
    args = parser.parse_args(remaining_args)

    # Sets the logging
    if args.loglevel is not None:
        level = logging.getLevelName(args.loglevel)
        logging.basicConfig(filename='gvm-pyshell.log', level=level)

    # If timeout value is -1, then the socket has no timeout for this session
    if args.timeout == -1:
        args.timeout = None

    # Open the right connection. SSH at last for default
    global gmp
    if 'socket' in args.connection_type:
        try:
            gmp = UnixSocketConnection(sockpath=args.sockpath,
                                       shell_mode=True,
                                       timeout=args.timeout)
        except OSError as e:
            print('{0}: {1}'.format(e, args.sockpath))
            sys.exit(1)

    elif 'tls' in args.connection_type:
        try:
            gmp = TLSConnection(hostname=args.hostname,
                                port=args.port,
                                timeout=args.timeout,
                                shell_mode=True)
        except OSError as e:
            print('{0}: Host: {1} Port: {2}'.format(e, args.hostname,
                                                    args.port))
            sys.exit(1)
    else:
        try:
            gmp = SSHConnection(hostname=args.hostname,
                                port=args.port,
                                timeout=args.timeout,
                                ssh_user=args.ssh_user,
                                ssh_password='',
                                shell_mode=True)
        except Exception as e:
            print('{0}: Host: {1} Port: {2}'.format(e, args.hostname,
                                                    args.port))
            sys.exit(1)

    # Ask for login credentials if none are given
    if not args.gmp_username:
        while True:
            args.gmp_username = input('Enter username: '******'Enter password for ' +
                                            args.gmp_username + ': ')

    try:
        gmp.authenticate(args.gmp_username, args.gmp_password)
    except Exception as e:
        print('Please check your credentials!')
        print(e)
        sys.exit(1)

    with_script = args.script and len(args.script) > 0
    no_script_no_interactive = not args.interactive and not with_script
    script_and_interactive = args.interactive and with_script
    only_interactive = not with_script and args.interactive
    only_script = not args.interactive and with_script

    if no_script_no_interactive:
        enterInteractiveMode()

    if only_interactive:
        enterInteractiveMode()

    if script_and_interactive:
        load(args.script[0])
        enterInteractiveMode()

    if only_script:
        load(args.script[0])

    gmp.close()
예제 #50
0
def setup_file(level, file):
    file_handler = logging.handlers.WatchedFileHandler(file)
    file_handler.set_name(FILE_HANDLER)
    file_handler.setLevel(logging.getLevelName(level.upper()))
    file_handler.setFormatter(_formatter)
    _register_handler(file_handler)
    def init(self,
             init_logger=False,
             load_default_config=True,
             load_config=True):
        """Initializes experimenter.

        :param bool init_logger: If True, initializes loggers
        :param bool load_default_config: If false, does not load standard configuration.
        :param bool load_config: If true, loads configuration specified on a command line
        """
        # Parse command line arguments
        parser = argparse.ArgumentParser()
        parser.add_argument(
            'action',
            type=str,
            help=
            'Action to perform. Valid actions: "print-config", "run", "build" and "analyze-plan".'
        )
        parser.add_argument('--config',
                            required=False,
                            type=str,
                            help='Configuration file (json) of an experiment.\
                                                                        Will override values from default configuration.'
                            )
        parser.add_argument('--plan',
                            required=False,
                            type=str,
                            help='Pre-built plan of an experiment (json).\
                                                                      If action is "build", a file name to write plan to.\
                                                                      If action is "run", a file name to read plan from.'
                            )
        parser.add_argument('--progress_file', '--progress-file', required=False, type=str, default=None,
                            help='A JSON file that experimenter will be updating on its progress.'\
                                 'If not present, no progress info will be available.'\
                                 'Put it somewhere in /dev/shm')
        parser.add_argument(
            '-P',
            action='append',
            required=False,
            default=[],
            help='Parameters that override parameters in configuration file.\
                                                                                     For instance, -Pexp.phase=2. Values must be json parsable (json.loads()).'
        )
        parser.add_argument(
            '-V',
            action='append',
            required=False,
            default=[],
            help=
            'Variables that override variables in configuration file in section "variables". \
                                                                                     These variables are used to generate different combinations of experiments.\
                                                                                     For instance: -Vexp.framework=\'["tensorflow", "caffe2"]\'.\
                                                                                     Values must be json parsable (json.loads()).'
        )
        parser.add_argument(
            '--log_level',
            '--log-level',
            required=False,
            default='info',
            help=
            'Python logging level. Valid values: "critical", "error", "warning", "info" and "debug"'
        )
        parser.add_argument('--discard_default_config',
                            '--discard-default-config',
                            required=False,
                            default=False,
                            action='store_true',
                            help='Do not load default configuration.')
        parser.add_argument(
            '--no_validation',
            '--no-validation',
            required=False,
            default=False,
            action='store_true',
            help='Do not perform config validation before running benchmarks.')
        parser.add_argument(
            '-E',
            action='append',
            required=False,
            default=[],
            help=
            'Extensions to add. Can be usefull to quickly customize experiments.\
                                                                                     Must be valid json parsable array element for "extension" array.'
        )
        args = parser.parse_args()

        log_level = logging.getLevelName(args.log_level.upper())
        self.action = args.action
        self.config_file = args.config
        self.plan_file = args.plan
        self.validation = not args.no_validation
        self.__progress_file = args.progress_file

        # Initialize logger
        if init_logger:
            logging.debug("Initializing logger to level %s", args.log_level)
            root = logging.getLogger()
            root.setLevel(log_level)
            handler = logging.StreamHandler(sys.stdout)
            handler.setLevel(log_level)
            root.addHandler(handler)

        logging.debug("Parsing parameters on a command line")
        DictUtils.add(self.params,
                      args.P,
                      pattern='(.+?(?=[=]))=(.+)',
                      must_match=True)
        logging.debug("Parsing variables on a command line")
        DictUtils.add(self.variables,
                      args.V,
                      pattern='(.+?(?=[=]))=(.+)',
                      must_match=True)

        # Load default configuration
        if load_default_config and not args.discard_default_config:
            logging.debug("Loading default configuration")
            _, self.config, self.param_info = ConfigurationLoader.load(
                os.path.join(os.path.dirname(__file__), 'configs'))

        # Load configurations specified on a command line
        if load_config:
            logging.debug("Loading user configuration")
            self.load_configuration()

        # Add extensions from command line
        DictUtils.ensure_exists(self.config, 'extensions', [])
        if len(args.E) > 0:
            logging.debug("Parsing extensions on a command line")
        for extension in args.E:
            try:
                ext = json.loads(extension)
                logging.debug('Found extension: %s', str(ext))
                self.config['extensions'].append(ext)
            except Exception as err:
                logging.warn("Found non-json parsable extension: %s",
                             extension)
                raise err
예제 #52
0
def get_level_name(level):
    return _logging.getLevelName(name)
예제 #53
0
import logging
import os


def get_env(env_var, default_val):
    return os.environ.get(env_var, default_val)


LOG_LEVEL_CONFIG = (lambda lvl: lvl if lvl in map(
                        logging.getLevelName,
                        [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
                    ) else logging.getLevelName(logging.INFO))(
    get_env('LOG_LEVEL_CONFIG', 'INFO')
)
LOGGING_CONFIG = {
    'version': 1,
    'formatters': {
        'json-fmt': {
            'datefmt': '%Y-%m-%d %H:%M:%S',
            'format': '{"time": "%(asctime)s.%(msecs)03d", "level": "%(levelname)s", %(message)s, '
                      '"module": "%(module)s", "function": "%(funcName)s", '
                      '"thread": "%(thread)d", "process": "%(process)d"}'
        }
    },
    'handlers': {
        'json-log-console': {
            'class': 'logging.StreamHandler',
            'level': LOG_LEVEL_CONFIG,
            'formatter': 'json-fmt'
        }
    },
예제 #54
0
    level = 0

    if loglevel == 'INFO':
        level = logging.INFO
    elif loglevel == 'WARNING':
        level = logging.WARNING
    elif loglevel == "ERROR":
        level = logging.ERROR
    elif loglevel == "CRITICAL":
        level = logging.CRITICAL
    elif loglevel == 'DEBUG':
        level = logging.DEBUG

    logging.basicConfig(format='%(levelname)s:%(message)s', level=level)
    rootlogger = logging.getLogger(name=None)
    print("loglevel set at: " + logging.getLevelName(rootlogger.level))

    logging.info("starting with following variables:")
    logging.info("IP Caster: " + ip_caster)
    logging.info("RTCM port: " + str(rtcm_port))
    logging.info("Mountpoint: " + mountpoint)
    logging.info("User caster: " + user_caster)
    logging.info("Password caster: " + pass_caster)
    logging.info("User MQTT: " + user_mqtt)
    logging.info("Password MQTT: " + pass_mqtt)
    logging.info("IP MQTT: " + ip_mqtt)
    logging.info("Port MQTT: " + str(mqtt_port))

    ##### CONNECT TO MQTT SERVER
    broker_address = str(ip_mqtt)
    client = mqtt.Client("rtk")  #create new instance
예제 #55
0
###########################################################################
import logging
from copy import deepcopy
from logging import config
from aiida.common import setup

# Custom logging level, intended specifically for informative log messages
# reported during WorkChains and Workflows. We want the level between INFO(20)
# and WARNING(30) such that it will be logged for the default loglevel, however
# the value 25 is already reserved for SUBWARNING by the multiprocessing module.
LOG_LEVEL_REPORT = 23
logging.addLevelName(LOG_LEVEL_REPORT, 'REPORT')

# Convenience dictionary of available log level names and their log level integer
LOG_LEVELS = {
    logging.getLevelName(logging.NOTSET): logging.NOTSET,
    logging.getLevelName(logging.DEBUG): logging.DEBUG,
    logging.getLevelName(logging.INFO): logging.INFO,
    logging.getLevelName(LOG_LEVEL_REPORT): LOG_LEVEL_REPORT,
    logging.getLevelName(logging.WARNING): logging.WARNING,
    logging.getLevelName(logging.ERROR): logging.ERROR,
    logging.getLevelName(logging.CRITICAL): logging.CRITICAL,
}

# The AiiDA logger
aiidalogger = logging.getLogger('aiida')


# A logging filter that can be used to disable logging
class NotInTestingFilter(logging.Filter):
    def filter(self, record):
예제 #56
0
 def updateLogLevel(self, context):
     logger = logging.getLogger(PKG)
     logger.setLevel(logging.getLevelName(self.logLevel))
예제 #57
0
    def initialize(self, *args, **kwargs):
        """Load configuration settings."""
        super().initialize(*args, **kwargs)
        self.load_config_file(self.config_file)
        # hook up tornado logging
        if self.debug:
            self.log_level = logging.DEBUG
        tornado.options.options.logging = logging.getLevelName(self.log_level)
        tornado.log.enable_pretty_logging()
        self.log = tornado.log.app_log

        self.init_pycurl()

        # initialize kubernetes config
        if self.builder_required:
            try:
                kubernetes.config.load_incluster_config()
            except kubernetes.config.ConfigException:
                kubernetes.config.load_kube_config()
            self.tornado_settings[
                "kubernetes_client"] = kubernetes.client.CoreV1Api()

        # times 2 for log + build threads
        self.build_pool = ThreadPoolExecutor(self.concurrent_build_limit * 2)

        jinja_options = dict(autoescape=True, )
        jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_PATH),
                                **jinja_options)
        if self.use_registry and self.builder_required:
            registry = DockerRegistry(self.docker_auth_host,
                                      self.docker_token_url,
                                      self.docker_registry_host)
        else:
            registry = None

        self.launcher = Launcher(
            parent=self,
            hub_url=self.hub_url,
            hub_api_token=self.hub_api_token,
        )

        self.tornado_settings.update({
            "docker_push_secret":
            self.docker_push_secret,
            "docker_image_prefix":
            self.docker_image_prefix,
            "static_path":
            os.path.join(os.path.dirname(__file__), "static"),
            "github_auth_token":
            self.github_auth_token,
            "debug":
            self.debug,
            'hub_url':
            self.hub_url,
            'hub_api_token':
            self.hub_api_token,
            'launcher':
            self.launcher,
            'appendix':
            self.appendix,
            "build_namespace":
            self.build_namespace,
            "builder_image_spec":
            self.builder_image_spec,
            'build_node_selector':
            self.build_node_selector,
            'build_pool':
            self.build_pool,
            'per_repo_quota':
            self.per_repo_quota,
            'repo_providers':
            self.repo_providers,
            'use_registry':
            self.use_registry,
            'registry':
            registry,
            'traitlets_config':
            self.config,
            'google_analytics_code':
            self.google_analytics_code,
            'google_analytics_domain':
            self.google_analytics_domain,
            'jinja2_env':
            jinja_env,
            'build_memory_limit':
            self.build_memory_limit,
            'build_docker_host':
            self.build_docker_host,
            'base_url':
            self.base_url,
            'static_url_prefix':
            url_path_join(self.base_url, 'static/'),
        })

        handlers = [
            (r'/metrics', MetricsHandler),
            (r"/build/([^/]+)/(.+)", BuildHandler),
            (r"/v2/([^/]+)/(.+)", ParameterizedMainHandler),
            (r"/repo/([^/]+)/([^/]+)(/.*)?", LegacyRedirectHandler),
            # for backward-compatible mybinder.org badge URLs
            # /assets/images/badge.svg
            (r'/assets/(images/badge\.svg)', tornado.web.StaticFileHandler, {
                'path': self.tornado_settings['static_path']
            }),
            # /badge.svg
            (r'/(badge\.svg)', tornado.web.StaticFileHandler, {
                'path':
                os.path.join(self.tornado_settings['static_path'], 'images')
            }),
            # /favicon_XXX.ico
            (r'/(favicon\_fail\.ico)', tornado.web.StaticFileHandler, {
                'path':
                os.path.join(self.tornado_settings['static_path'], 'images')
            }),
            (r'/(favicon\_success\.ico)', tornado.web.StaticFileHandler, {
                'path':
                os.path.join(self.tornado_settings['static_path'], 'images')
            }),
            (r'/(favicon\_building\.ico)', tornado.web.StaticFileHandler, {
                'path':
                os.path.join(self.tornado_settings['static_path'], 'images')
            }),
            (r'/', MainHandler),
            (r'.*', Custom404),
        ]
        handlers = self.add_url_prefix(self.base_url, handlers)
        self.tornado_app = tornado.web.Application(handlers,
                                                   **self.tornado_settings)
예제 #58
0
    SETTINGS = config['settings']

    # Templates for incident displaying
    acknowledgement_tmpl_d = "{message}\n\n###### {ack_time} by {author}\n\n______\n"
    templates = config.get('templates')
    if templates:
        acknowledgement_tmpl = templates.get('acknowledgement',
                                             acknowledgement_tmpl_d)
        investigating_tmpl = templates.get('investigating')
        resolving_tmpl = templates.get('resolving')
    else:
        acknowledgement_tmpl = acknowledgement_tmpl_d

    exit_status = 0
    # Set Logging
    log_level = logging.getLevelName(SETTINGS['log_level'])
    log_level_requests = logging.getLevelName(SETTINGS['log_level_requests'])
    logging.basicConfig(
        level=log_level,
        format='%(asctime)s %(levelname)s: (%(threadName)s) %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S %Z')
    logging.getLogger("requests").setLevel(log_level_requests)
    logging.info('Zabbix Cachet v.{} started'.format(__version__))
    inc_update_t = threading.Thread()
    event = threading.Event()
    try:
        zapi = Zabbix(ZABBIX['server'], ZABBIX['user'], ZABBIX['pass'],
                      ZABBIX['https-verify'])
        cachet = Cachet(CACHET['server'], CACHET['token'],
                        CACHET['https-verify'])
        logging.info('Zabbix ver: {}. Cachet ver: {}'.format(
예제 #59
0
def pt_sample(step,
              n_chains,
              n_samples=100000,
              start=None,
              swap_interval=(100, 300),
              beta_tune_interval=10000,
              n_workers_posterior=1,
              homepath='',
              progressbar=True,
              buffer_size=5000,
              buffer_thinning=1,
              model=None,
              rm_flag=False,
              resample=False,
              keep_tmp=False,
              record_worker_chains=False):
    """
    Paralell Tempering algorithm

    (adaptive) Metropolis sampling over n_jobs of MC chains.
    Half (floor) of these are sampling at beta = 1 (the posterior).
    The other half of the MC chains are tempered linearly down to
    beta = 1e-6. Randomly, the states of chains are swapped based on
    the Metropolis-Hastings acceptance criterion to the power of the
    differences in beta of the involved chains.
    The samples are written to disk only by the master process. Once
    the specified number of samples is reached sampling is stopped.

    Parameters
    ----------
    step : :class:`beat.sampler.Metropolis`
        sampler object
    n_chains : int
        number of Markov Chains to use
    n_samples : int
        number of samples in the result trace, if reached sampling stops
    swap_interval : tuple
        interval for uniform random integer that determines the length
        of each MarkovChain on each worker. The chain end values of workers
        are proposed for swapping state and are written in the final trace
    beta_tune_interval : int
        Evaluate acceptance rate of chain swaps and tune betas similar
        to proposal step tuning
    n_workers_posterior : int
        number of workers that sample from the posterior distribution at beta=1
    homepath : string
        Result_folder for storing stages, will be created if not existing
    progressbar : bool
        Flag for displaying a progress bar
    buffer_size : int
        this is the number of samples after which the buffer is written to disk
        or if the chain end is reached
    buffer_thinning : int
        every nth sample of the buffer is written to disk,
        default: 1 (no thinning)
    model : :class:`pymc3.Model`
        (optional if in `with` context) has to contain deterministic
        variable name defined under step.likelihood_name' that contains the
        model likelihood
    rm_flag : bool
        If True existing stage result folders are being deleted prior to
        sampling.
    resample : bool
        If True all the Markov Chains are starting sampling at the testvalue
    keep_tmp : bool
        If True the execution directory (under '/tmp/') is not being deleted
        after process finishes
    record_worker_chains : bool
        If True worker chain samples are written to disc using the specified
        backend trace objects (during sampler initialization).
        Very useful for debugging purposes. MUST be False for runs on
        distributed computing systems!
    """
    if n_chains < 2:
        raise ValueError(
            'Parallel Tempering requires at least 2 Markov Chains!')

    if start is not None:
        if len(start) != step.n_chains:
            raise TypeError('Argument `start` should have dicts equal the '
                            'number of chains (step.N-chains)')
        else:
            step.population = start

    sampler_args = [
        step, n_samples, swap_interval, beta_tune_interval,
        n_workers_posterior, homepath, progressbar, buffer_size,
        buffer_thinning, resample, rm_flag, record_worker_chains
    ]

    project_dir = os.path.dirname(homepath)
    loglevel = getLevelName(logger.getEffectiveLevel()).lower()

    distributed.run_mpi_sampler(sampler_name='pt',
                                model=model,
                                sampler_args=sampler_args,
                                keep_tmp=keep_tmp,
                                n_jobs=n_chains,
                                loglevel=loglevel,
                                project_dir=project_dir)
예제 #60
0
'''
Return a redirect location set in the environment.
'''

import json
import logging
import os

log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.root.setLevel(logging.getLevelName(log_level))
_logger = logging.getLogger(__name__)

REDIRECT_LOCATION = os.environ.get('REDIRECT_LOCATION')
STATUS_CODE = int(os.environ.get('STATUS_CODE'))


def _get_location(redirect_location, path, parameters):
    '''Return the full location path'''
    location = redirect_location + path
    if parameters:
        location += '?' + '&'.join(parameters)
    return location


def _get_path_from_event(event):
    '''Get path from event'''
    return event.get('path')


def _get_query_parameters_from_event(event):
    '''Get query parameters from event'''