def __setup_advanced_logging() -> None:
    """Sets up advanced logging over mail and Discord
    """
    if config.getboolean("logging", "enable_mail_logging"):
        mailcfg = dict(config.items("mail_logging"))
        mailhost = (mailcfg["mailhost"], mailcfg["mailport"])
        toaddrs = mailcfg["toaddrs"].split(",")
        credentials = (mailcfg["username"], mailcfg["password"])
        eh = SMTPHandler(mailhost=mailhost,
                         fromaddr=mailcfg["fromaddr"],
                         toaddrs=toaddrs,
                         subject=mailcfg["subject"],
                         credentials=credentials,
                         secure=(),
                         timeout=config.getint("mail_logging",
                                               "timeout"))
        eh.setFormatter(formatter)
        eh.setLevel(logging.WARNING)
        logger.addHandler(eh)

    if config.getboolean("logging", "enable_discord_logging"):
        avatar_url = config["discord_logging"]["avatar_url"]
        avatar_url = avatar_url if avatar_url else None
        dh = DiscordHandler(config["discord_logging"]["username"],
                            config["discord_logging"]["webhook_url"],
                            avatar_url)
        dh.setFormatter(formatter)
        dh.setLevel(logging.WARNING)
        logger.addHandler(dh)
Exemple #2
0
    def __init__(self):
        if config.getboolean('MAIN', 'DownloadNLTKResources'):
            download_nltk_resources()
        colorama.init()

        self.bearer = config.get('CONNECTION', 'Bearer')
        self.timeout = config.getfloat('CONNECTION', 'Timeout')
        self.show_next_info = config.getboolean('MAIN', 'ShowNextShowInfo')
        self.exit_if_offline = config.getboolean('MAIN', 'ExitIfShowOffline')
        self.show_bearer_info = config.getboolean('MAIN', 'ShowBearerInfo')
        self.headers = {
            'User-Agent': 'Android/1.40.0',
            'x-hq-client': 'Android/1.40.0',
            'x-hq-country': 'US',
            'x-hq-lang': 'en',
            'x-hq-timezone': 'America/New_York',
            'Authorization': f'Bearer {self.bearer}'
        }

        self.session = requests.Session()
        self.session.headers.update(self.headers)

        init_root_logger()
        self.logger = logging.getLogger(__name__)

        # Find local UTC offset
        now = time.time()
        self.local_utc_offset = datetime.fromtimestamp(
            now) - datetime.utcfromtimestamp(now)

        self.validate_bearer()
        self.logger.info('HackQ-Trivia initialized.\n',
                         extra={'pre': colorama.Fore.GREEN})
Exemple #3
0
def main():
    logging_config = get_logging_config("logging_config.yaml")
    set_up_logging(logging_config)

    config = configparser.ConfigParser()
    config.read("config.ini")

    alarm_duration = config.getint("Alarmmonitor", "hdmi_cec_device_on_time")
    polling_interval = config.getint("Alarmmonitor", "polling_interval")
    send_errors = config.getboolean("Alarmmonitor", "send_errors")
    send_starts = config.getboolean("Alarmmonitor", "send_starts")
    show_infos = config.getboolean("blaulichtSMS Einsatzmonitor", "show_infos")

    blaulichtsms_controller = BlaulichtSmsController(
        config["blaulichtSMS Einsatzmonitor"]["customer_id"],
        config["blaulichtSMS Einsatzmonitor"]["username"],
        config["blaulichtSMS Einsatzmonitor"]["password"],
        alarm_duration=alarm_duration,
        show_infos=show_infos)
    mail_sender = AlarmMonitorMailSender()
    hdmi_cec_controller = get_cec_controller(config, send_errors, mail_sender)
    browser_controller = ChromiumBrowserController(
        blaulichtsms_controller.get_session())
    alarm_monitor = AlarmMonitor(polling_interval, send_errors, send_starts,
                                 blaulichtsms_controller, hdmi_cec_controller,
                                 browser_controller, mail_sender)
    alarm_monitor.run()
    def __init__(self):
        HackQ.download_nltk_resources()
        colorama.init()

        self.bearer = config.get("CONNECTION", "BEARER")
        self.timeout = config.getfloat("CONNECTION", "Timeout")
        self.show_next_info = config.getboolean("MAIN", "ShowNextShowInfo")
        self.exit_if_offline = config.getboolean("MAIN", "ExitIfShowOffline")
        self.show_bearer_info = config.getboolean("MAIN", "ShowBearerInfo")
        self.headers = {
            "User-Agent": "Android/1.40.0",
            "x-hq-client": "Android/1.40.0",
            "x-hq-country": "US",
            "x-hq-lang": "en",
            "x-hq-timezone": "America/New_York",
            "Authorization": f"Bearer {self.bearer}",
            "Connection": "close"
        }

        self.session = requests.Session()
        self.session.headers.update(self.headers)

        self.init_root_logger()
        self.logger = logging.getLogger(__name__)

        # Find local UTC offset
        now = time.time()
        self.local_utc_offset = datetime.fromtimestamp(
            now) - datetime.utcfromtimestamp(now)

        self.validate_bearer()
        self.logger.info("HackQ-Trivia initialized.\n",
                         extra={"pre": colorama.Fore.GREEN})
Exemple #5
0
    def __init__(self, config_file, tvdb):
        config = configparser.ConfigParser()
        config.read(config_file)
        log_file = config.get('main', 'log.config', fallback=None)
        if log_file:
            import yaml
            with open(log_file) as f:
                log_config = yaml.load(f)
                logging.config.dictConfig(log_config)

        template = config.get(
            'directories',
            'out.pattern',
            fallback=
            '${series}/Season ${season}/${series} - S${season|zpad}E${episode|zpad} - ${episode_name}.${ext}'
        )
        self.template = template

        self.wtv_in_dir = config.get('directories', 'tv.in')
        self.tv_pattern = config.get('directories', 'tv.pattern')
        self.com_in_dir = config.get('directories', 'commercial.in')
        self.srt_in_dir = config.get('directories', 'srt.in')
        self.temp_dir = config.get('directories', 'temp.dir')
        self.out_dir = config.get('directories', 'out.dir')
        self.delete_source = config.getboolean('directories',
                                               'delete.source.files',
                                               fallback=True)

        self.convert_config = convert_config_from_config_section(
            config, 'transcode')

        if config.has_section('ffmpeg'):
            logger.error('You are using an outdated configuration')
            raise Exception('You are using an outdated configuration')

        self.debug = config.getboolean('main', 'debug', fallback=False)

        self.ccextractor_exe = config.get('ccextractor',
                                          'executable',
                                          fallback=ccextractor())
        self.ccextractor_run = config.getboolean('ccextractor',
                                                 'run.if.missing',
                                                 fallback=False)

        self.comskip_exe = config.get('comskip', 'executable', fallback=None)
        self.comskip_run = config.getboolean('comskip',
                                             'run.if.missing',
                                             fallback=comskip())
        self.comskip_ini = config.get('comskip', 'comskip.ini', fallback=None)

        db_file = config.get('main', 'database.file', fallback='db.sqlite')

        self.wtvdb = WtvDb(db_file)
        self.tvdb = tvdb

        if self.convert_config.include_subtitles:
            logger.warning(
                'Include Subtitles is True. This usually does not work with TV captions.'
            )
Exemple #6
0
def mirror(config):
    # Load the filter plugins so the loading doesn't happen in the fast path
    filter_project_plugins()
    filter_release_plugins()

    # Always reference those classes here with the fully qualified name to
    # allow them being patched by mock libraries!
    master = bandersnatch.master.Master(
        config.get("mirror", "master"), config.getfloat("mirror", "timeout")
    )

    # `json` boolean is a new optional option in 2.1.2 - want to support it
    # not existing in old configs and display an error saying that this will
    # error in the not to distance release
    try:
        json_save = config.getboolean("mirror", "json")
    except configparser.NoOptionError:
        logger.error(
            "Please update your config to include a json "
            + "boolean in the [mirror] section. Setting to False"
        )
        json_save = False

    try:
        root_uri = config.get("mirror", "root_uri")
    except configparser.NoOptionError:
        root_uri = None

    try:
        digest_name = config.get("mirror", "digest_name")
    except configparser.NoOptionError:
        digest_name = "sha256"
    if digest_name not in ("md5", "sha256"):
        raise ValueError(
            f"Supplied digest_name {digest_name} is not supported! Please "
            + "update digest_name to one of ('sha256', 'md5') in the [mirror] "
            + "section."
        )

    mirror = bandersnatch.mirror.Mirror(
        config.get("mirror", "directory"),
        master,
        stop_on_error=config.getboolean("mirror", "stop-on-error"),
        workers=config.getint("mirror", "workers"),
        hash_index=config.getboolean("mirror", "hash-index"),
        json_save=json_save,
        root_uri=root_uri,
        digest_name=digest_name,
        keep_index_versions=config.getint("mirror", "keep_index_versions", fallback=0),
    )

    changed_packages = mirror.synchronize()
    logger.info("{} packages had changes".format(len(changed_packages)))
    for package_name, changes in changed_packages.items():
        logger.debug(f"{package_name} added: {changes}")
Exemple #7
0
def getOptres(tsn=None):
    try:
        return config.getboolean('_tivo_' + tsn, 'optres')
    except:
        try:
            return config.getboolean(get_section(tsn), 'optres')
        except:
            try:
                return config.getboolean('Server', 'optres')
            except:
                return False
Exemple #8
0
def main():
    def setlogger(conf_file=None):
        if conf_file:
            return logging.config.fileConfig(conf_file)

        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(levelname)s %(message)s',
                            datefmt='%Y/%m/%d %H:%M:%S')

    def loadmodules(path=None):
        from . import mmhandler    # load default modules

        if path:
            mmplugin.load(path)

        for name, action in mmplugin.iteractions():
            logging.info('[plugin] [action] [%s] <%s.%s> loaded',
                         name, action.__module__, action.__name__)

    def getsslctx():
        crt = config.get('outgoing', 'ssl_crt')
        key = config.get('outgoing', 'ssl_key')
        return (util.abspath(crt), util.abspath(key)) if crt and key else None

    def parseargs():
        ap = argparse.ArgumentParser()
        ap.add_argument('-l', '--logging-config', type=util.abspath,
                        help='configuration file for the logging')
        ap.add_argument('config', type=util.abspath,
                        help='configuration file for the Mattermost client')
        return ap.parse_args()

    args = parseargs()

    config.read(args.config)
    setlogger(args.logging_config)
    loadmodules(config.get('plugin', 'path'))

    sslctx = getsslctx()

    app = flask.Flask(__name__)
    app.config['JSON_AS_ASCII'] = False
    app.register_blueprint(bp, url_prefix=config.get('outgoing', 'path'))

    if config.getboolean('mm', 'daemon'):
        util.daemonize()

    sched = Scheduler()
    sched.start()

    app.run(host=config.get('outgoing', 'host'),
            port=config.getint('outgoing', 'port'),
            debug=config.getboolean('mm', 'debug'),
            ssl_context=sslctx, use_reloader=False, threaded=True)
Exemple #9
0
def getOptres(tsn=None):
    try:
        return config.getboolean('_tivo_' + tsn, 'optres')
    except:
        try:
            return config.getboolean(get_section(tsn), 'optres')
        except:
            try:
                return config.getboolean('Server', 'optres')
            except:
                return False
Exemple #10
0
def getOptres(tsn=None):
    try:
        return config.getboolean("_tivo_" + tsn, "optres")
    except:
        try:
            return config.getboolean(get_section(tsn), "optres")
        except:
            try:
                return config.getboolean("Server", "optres")
            except:
                return False
Exemple #11
0
def mirror(config):
    # Always reference those classes here with the fully qualified name to
    # allow them being patched by mock libraries!
    master = bandersnatch.master.Master(
        config.get('mirror', 'master'),
        float(config.get('mirror', 'timeout')))
    mirror = bandersnatch.mirror.Mirror(
        config.get('mirror', 'directory'), master,
        stop_on_error=config.getboolean('mirror', 'stop-on-error'),
        workers=config.getint('mirror', 'workers'),
        delete_packages=config.getboolean('mirror', 'delete-packages'))
    mirror.synchronize()
Exemple #12
0
    def __init__(self, log):
        self.logger = log
        config = ConfigParser.ConfigParser()
        config.read("./email.config")

        self.livDeviceName = config.get('EMAIL', 'liv_device_name')
        self.eAlarm = config.getboolean('EMAIL', 'email_alarm_active')
        self.eReport = config.getboolean('EMAIL', 'email_report_active')
        self.eReportTime = config.getint('EMAIL', 'email_report_time')
        self.eServer = config.get('EMAIL', 'email_server')
        self.eFrom = config.get('EMAIL', 'from')
        self.eFromPassword = config.get('EMAIL', 'from_password')
        self.eToList = config.get('EMAIL', 'toList')
        self.minuteCounter = 0
Exemple #13
0
def init_root_logger() -> None:
    import os

    class LogFilterColor(logging.Filter):
        def filter(self, record):
            if "hackq" not in record.name and "__main__" not in record.name:
                return None

            if not hasattr(record, "pre"):
                record.pre = ""
                record.post = ""
            elif not hasattr(record, "post"):
                record.post = colorama.Style.RESET_ALL

            return record

    log_filename = config.get("LOGGING", "File")
    script_dir = os.path.dirname(os.path.abspath(__file__))
    if not os.path.isabs(log_filename):
        log_filename = os.path.join(script_dir, log_filename)

    inc_filenames = config.getboolean("LOGGING", "IncrementFileNames")
    # check if name contains format string placeholder
    if inc_filenames and log_filename.format(0) == log_filename:
        inc_filenames = False
    if inc_filenames:
        log_filename = next_available_name(log_filename)

    with open(os.path.join(script_dir,
                           "logging_config.json")) as log_conf_file:
        log_conf_dict = json.load(log_conf_file)
        log_conf_dict["handlers"]["fileHandler"]["filename"] = log_filename
        log_conf_dict["filters"]["LogFilterColor"]["()"] = LogFilterColor

        logging.config.dictConfig(log_conf_dict)
Exemple #14
0
def parse_config_file(filename):
    config = ConfigParser.ConfigParser()
    config.read(filename)

    try:
        config.items('loggers')
        # We have at least the loggers section so we can set logging config
        logging.config.fileConfig(filename)
    except ConfigParser.NoSectionError:
        log.info('No section loggers in %s' % filename)

    try:
        items = dict(config.items('sqla_taskq')).keys()
    except ConfigParser.NoSectionError:
        log.info('No section sqla_taskq in %s' % filename)
        return None

    dic = {}
    if 'sqla_url' in items:
        dic['sqla_url'] = config.get('sqla_taskq', 'sqla_url')

    if 'kill' in items:
        dic['kill'] = config.getboolean('sqla_taskq', 'kill')
    else:
        dic['kill'] = False

    if 'timeout' in items:
        dic['timeout'] = config.getint('sqla_taskq', 'timeout')
    else:
        dic['timeout'] = 60

    return dic
Exemple #15
0
def init_root_logger() -> None:
    import os

    class LogFilterColor(logging.Filter):
        def filter(self, record):
            if 'hackq' not in record.name and '__main__' not in record.name:
                return None

            if not hasattr(record, 'pre'):
                record.pre = ''
                record.post = ''
            elif not hasattr(record, 'post'):
                record.post = colorama.Style.RESET_ALL

            return record

    log_filename = config.get('LOGGING', 'File')
    script_dir = os.path.dirname(os.path.abspath(__file__))
    if not os.path.isabs(log_filename):
        log_filename = os.path.join(script_dir, log_filename)

    inc_filenames = config.getboolean('LOGGING', 'IncrementFileNames')
    # check if name contains format string placeholder
    if inc_filenames and log_filename.format(0) == log_filename:
        inc_filenames = False
    if inc_filenames:
        log_filename = next_available_name(log_filename)

    with open(os.path.join(script_dir,
                           'logging_config.json')) as log_conf_file:
        log_conf_dict = json.load(log_conf_file)
        log_conf_dict['handlers']['fileHandler']['filename'] = log_filename
        log_conf_dict['filters']['LogFilterColor']['()'] = LogFilterColor

        logging.config.dictConfig(log_conf_dict)
Exemple #16
0
def start():
    """Start the logging according to the configuration."""
    filename = os.path.expanduser(config.get("logging", "config"))
    debug = config.getboolean("logging", "debug")

    if os.path.exists(filename):
        # Configuration taken from file
        configure_from_file(filename, debug)
        # Reload config on SIGHUP (UNIX only)
        if hasattr(signal, 'SIGHUP'):

            def handler(signum, frame):
                configure_from_file(filename, debug)

            signal.signal(signal.SIGHUP, handler)
    else:
        # Default configuration, standard output
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(logging.Formatter("%(message)s"))
        LOGGER.addHandler(handler)
        if debug:
            LOGGER.setLevel(logging.DEBUG)
            LOGGER.debug(
                "Logging configuration file '%s' not found, using stdout." %
                filename)
Exemple #17
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    os.makedirs(cache_dir, exist_ok=True)
    shutil.copyfile(os.path.expanduser(os.path.expandvars(config.get('cache', 'category'))), os.path.join(cache_dir, 'category'))
    category = utils.get_category(config)
    category_index = dict([(name, i) for i, name in enumerate(category)])
    datasets = config.get('cache', 'datasets').split()
    for phase in args.phase:
        path = os.path.join(cache_dir, phase) + '.pkl'
        logging.info('save cache file: ' + path)
        data = []
        for dataset in datasets:
            logging.info('load %s dataset' % dataset)
            module, func = dataset.rsplit('.', 1)
            module = importlib.import_module(module)
            func = getattr(module, func)
            data += func(config, path, category_index)
        if config.getboolean('cache', 'shuffle'):
            random.shuffle(data)
        with open(path, 'wb') as f:
            pickle.dump(data, f)
    logging.info('%s data are saved into %s' % (str(args.phase), cache_dir))
Exemple #18
0
def parse_config_file(filename):
    config = ConfigParser.ConfigParser()
    config.read(filename)

    try:
        config.items('loggers')
        # We have at least the loggers section so we can set logging config
        logging.config.fileConfig(filename)
    except ConfigParser.NoSectionError:
        log.info('No section loggers in %s' % filename)

    try:
        items = dict(config.items('sqla_taskq')).keys()
    except ConfigParser.NoSectionError:
        log.info('No section sqla_taskq in %s' % filename)
        return None

    dic = {}
    if 'sqla_url' in items:
        dic['sqla_url'] = config.get('sqla_taskq', 'sqla_url')

    if 'kill' in items:
        dic['kill'] = config.getboolean('sqla_taskq', 'kill')
    else:
        dic['kill'] = False

    if 'timeout' in items:
        dic['timeout'] = config.getint('sqla_taskq', 'timeout')
    else:
        dic['timeout'] = 60

    return dic
def main():
    args = parse_args()

    logging.config.fileConfig(args.config[0])

    config = MyConfigParser(allow_no_value=True)
    config.read(args.config[0])

    update_config(config, args)

    signal.signal(signal.SIGTERM, sigterm_handler)

    if config.getboolean('general', 'daemon'):
        daemonize(config.get('general', 'logfile'))

    write_pidfile(config.get('general', 'pidfile'))

    try:
        run_workers(config)
    except (KeyboardInterrupt, SystemExit):
        sys.exit(2)
    except Exception as e:
        logger.exception("%s %s", type(e).__name__, str(e))
        sys.exit(1)

    logger.info("Leave main")
    return 0
Exemple #20
0
def getShares(tsn=''):
    shares = [(section, Bdict(config.items(section)))
              for section in config.sections()
              if not (section.startswith(special_section_prefixes)
                      or section in special_section_names)
             ]

    tsnsect = '_tivo_' + tsn
    if config.has_section(tsnsect) and config.has_option(tsnsect, 'shares'):
        # clean up leading and trailing spaces & make sure ref is valid
        tsnshares = []
        for x in config.get(tsnsect, 'shares').split(','):
            y = x.strip()
            if config.has_section(y):
                tsnshares.append((y, Bdict(config.items(y))))
        shares = tsnshares

    shares.sort()

    if not config.getboolean('Server', 'nosettings', fallback=False):
        shares.append(('Settings', {'type': 'settings'}))
    if get_server('tivo_mak') and get_togo('path'):
        shares.append(('ToGo', {'type': 'togo'}))

    return shares
Exemple #21
0
def set_up_checks(config):

    check_section = [s for s in config.sections() if s.startswith('check.')]
    for section in check_section:
        name = section[len('check.'):]
        # legacy method to determine the check name from the section header
        class_name = name
        # if there is an explicit class, use that one with higher priority
        if 'class' in config[section]:
            class_name = config[section]['class']
        enabled = config.getboolean(section, 'enabled', fallback=False)

        if not enabled:
            _logger.debug('Skipping disabled check %s', name)
            continue

        _logger.info('Configuring check %s with class %s', name, class_name)
        try:
            klass = globals()[class_name]
        except KeyError:
            _logger.error('Cannot create check named %s: Class does not exist',
                          class_name)
            sys.exit(2)

        check = klass.create(name, config[section])
        if not isinstance(check, Check):
            _logger.exception('Check %s is not a correct Check instance',
                              check)
            sys.exit(2)
        _logger.debug('Created check instance %s', check)
        _checks.append(check)

    if not _checks:
        _logger.error('No checks enabled')
        sys.exit(2)
Exemple #22
0
def set_up_logger_syslog_handler(config):
    handlers = {}
    formatters = {}
    if config.has_section('Logging'):
        if config.has_option('Logging', 'loghost') and \
            config.has_option('Logging', 'logport'):
            log_host = config.get('Logging', 'loghost')
            log_port = int(config.get('Logging', 'logport'))
            log_address = (log_host, log_port)
            formatters['syslog_formatter'] = {
                'format': '%(asctime)s %(name)s: %(levelname)s %(message)s',
                'datefmt': '%b %e %H:%M:%S',
            }
            socktype = socket.SOCK_DGRAM
            if config.has_option('Logging', 'logtcp'):
                if config.getboolean('Logging', 'logtcp'):
                    socktype = socket.SOCK_STREAM
                else:
                    socktype = socket.SOCK_DGRAM
            facility = logging.handlers.SysLogHandler.LOG_USER
            if config.has_option('Logging', 'logfacility'):
                try:
                    facility = logging.handlers.SysLogHandler.facility_names[
                        config.get('Logging', 'logfacility')]
                except KeyError:
                    raise Exception('Invalid "logfacility" value of "%s"' %
                        config.get('Logging', 'logfacility'))
            handlers['syslog'] = {
                'class': 'logging.handlers.SysLogHandler',
                'formatter': 'syslog_formatter',
                'address': log_address,
                'facility': facility,
                'socktype': socktype,
            }
    return handlers, formatters
def load_modules():
    analysis_modules = []
    for section in config:
        if "analysis_module_" in section:
            if not config.getboolean(section, "enabled"):
                continue

            module_name = config.get(section, "module")
            try:
                _module = importlib.import_module(module_name)
            except Exception as e:
                log.error("Unable to import module {0}: {1}".format(module_name, str(e)))
                continue

            class_name = config.get(section, "class")
            try:
                module_class = getattr(_module, class_name)
            except Exception as e:
                log.error("Unable to load module class {0}: {1}".format(module_class, str(e)))
                continue

            try:
                analysis_module = module_class(str(section))
            except Exception as e:
                log.error("Unable to load analysis module {0}: {1}".format(section, str(e)))
                continue

            analysis_modules.append(analysis_module)

    return analysis_modules
Exemple #24
0
    def isenabled(sect, data={}):
        if not config.has_section(sect):
            return False

        if not config.getboolean(sect, 'enable'):
            return False

        try:
            per = config.get(sect, 'percentage')             # allow '0'
            if per and int(per) < random.randint(1, 100):
                return False

            time_ = config.get(sect, 'time')
            if time_ and not util.time_in(time_):
                return False

            if 'source' in data:
                pattern = config.get(sect, 'source_pattern')
                data['source_match'] = re.search(pattern, data['source'])
                if not data['source_match']:
                    return False

            if 'message' in data:
                pattern = config.get(sect, 'pattern')
                data['match'] = re.search(pattern, data['message'])
                if not data['match']:
                    return False
        except:
            logging.exception('[%s] %s', sect, data)
            return False

        return True
Exemple #25
0
def set_up_checks(config):

    check_section = [s for s in config.sections() if s.startswith('check.')]
    for section in check_section:
        name = section[len('check.'):]
        # legacy method to determine the check name from the section header
        class_name = name
        # if there is an explicit class, use that one with higher priority
        if 'class' in config[section]:
            class_name = config[section]['class']
        enabled = config.getboolean(section, 'enabled', fallback=False)

        if not enabled:
            _logger.debug('Skipping disabled check %s', name)
            continue

        _logger.info('Configuring check %s with class %s', name, class_name)
        try:
            klass = globals()[class_name]
        except KeyError:
            _logger.error('Cannot create check named %s: Class does not exist',
                          class_name)
            sys.exit(2)

        check = klass.create(name, config[section])
        if not isinstance(check, Check):
            _logger.exception('Check %s is not a correct Check instance',
                              check)
            sys.exit(2)
        _logger.debug('Created check instance %s', check)
        _checks.append(check)

    if not _checks:
        _logger.error('No checks enabled')
        sys.exit(2)
Exemple #26
0
def handle_search_failure(search, group_by, exception=None):
    if not config.getboolean('smtp', 'enabled'):
        return

    header = '\r\n'.join([
        'From: splunk_detect@localhost',
        'To: {0}'.format(config.get('smtp', 'recipients')),
        'Subject: Splunk Search Failure'
    ])

    message = '{0}\r\n\r\n{1}'.format(
        header,
        "The following splunk search failed.\r\n\r\n{0}".format(search))

    if exception is not None:
        message += "\r\n\r\nThe following exception was thrown.\r\n\r\n{0}".format(
            traceback.format_exc())
    else:
        message += "\r\n\r\nThe splunk server returned an HTTP error code."

    try:
        server = smtplib.SMTP(config.get('smtp', 'server'))
        server.set_debuglevel(1)
        logging.warning("sending email to {0}".format(
            config.get('smtp', 'recipients')))
        server.sendmail('splunk_detect@localhost',
                        config.get('smtp', 'recipients').split(','), message)
        server.quit()
    except Exception as e:
        logging.error("unable to send email: {0}".format(str(e)))
Exemple #27
0
    def __init__(self, srcf, destf, config):
        """Get an NNTPSucka with two NNTPClient objects representing the
        source and destination."""
        self.log = logging.getLogger("NNTPSucka")
        self.src = srcf()
        self.dest = destf()

        self.reqQueue = Queue.Queue(1000)
        self.doneQueue = Queue.Queue(1000)

        # Figure out the maximum number of articles per group
        self.maxArticles = config.getint("misc", "maxArticles")
        self.log.debug("Max articles is configured as %d" % (self.maxArticles))

        # NewsDB setup
        self.db = NewsDB(config.get("misc", "newsdb"))
        self.db.setShouldMarkArticles(
            config.getboolean("misc", "shouldMarkArticles"))

        # Initialize stats
        self.stats = Stats()

        self.workers = [
            Worker(srcf, destf, self.reqQueue, self.doneQueue)
            for x in range(config.getint("misc", "workers"))
        ]
Exemple #28
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.model_dir = utils.get_model_dir(config)
     self.cache_dir = utils.get_cache_dir(config)
     self.category = utils.get_category(config, self.cache_dir)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     logging.info('use cache directory ' + self.cache_dir)
     logging.info('tensorboard --logdir ' + self.model_dir)
     if args.delete:
         logging.warning('delete model directory: ' + self.model_dir)
         shutil.rmtree(self.model_dir, ignore_errors=True)
     os.makedirs(self.model_dir, exist_ok=True)
     with open(self.model_dir + '.ini', 'w') as f:
         config.write(f)
     self.saver = utils.train.Saver(self.model_dir,
                                    config.getint('save', 'keep'))
     self.timer_save = utils.train.Timer(config.getfloat('save', 'secs'),
                                         False)
     try:
         self.timer_eval = utils.train.Timer(
             eval(config.get('eval', 'secs')),
             config.getboolean('eval', 'first'))
     except configparser.NoOptionError:
         self.timer_eval = lambda: False
     self.summary_worker = SummaryWorker(self)
     self.summary_worker.start()
Exemple #29
0
def run():
    import sys
    if len(sys.argv) < 2:
        filename = "/etc/keyserver-ng/config.ini"
    else:
        filename = sys.argv[1]
    config = configparser.ConfigParser()
    config.read(filename)
    log_cfg = config["logging"]
    LOGGING["handlers"]["file"]["level"] = log_cfg["level"]
    LOGGING["handlers"]["file"]["filename"] = log_cfg["file"]
    LOGGING["handlers"]["file"]["maxBytes"] = int(log_cfg["rotate_bytes"])
    LOGGING["handlers"]["console"]["level"] = log_cfg["console_level"]
    if not config.getboolean("logging", "log_console"):
        LOGGING["loggers"][""]["handlers"] = ["file"]

    logging.config.dictConfig(LOGGING)

    db_module = importlib.import_module(config["database"]["module"])
    db = db_module.DB(**config["database"])
    server = hkp.Server(db)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(
            server.start(loop,
                         host=config["keyserver"]["listen_addr"],
                         port=config["keyserver"]["listen_port"],
                         )
    )
    loop.run_forever()
Exemple #30
0
def load_configuration(configuration_file):
    config = ConfigParser.ConfigParser()
    config.read(configuration_file)

    section = 'news'
    source_screen_names = config.get(section, 'source_screen_names').split(',')
    polling_interval = int(config.get(section, 'polling_interval'))

    last_names = unicode(config.get(section, 'last_names'), 'utf-8').split(',')
    first_names = unicode(config.get(section, 'first_names'), 'utf-8').split(',')

    if len(last_names) != len(first_names):
        raise Exception(u"invalid name parameter length.")

    names = []
    for index in xrange(len(last_names)):
        names.append((last_names[index], first_names[index]))

    consumer_key = config.get(section, 'consumer_key')
    consumer_key = config.get(section, 'consumer_key')
    consumer_secret = config.get(section, 'consumer_secret')
    access_key = config.get(section, 'access_key')
    access_secret = config.get(section, 'access_secret')

    dry_run = config.getboolean(section, 'dry_run')

    return (source_screen_names, polling_interval, names,
            consumer_key, consumer_secret, access_key, access_secret, dry_run)
Exemple #31
0
    def enabled(self, sect):
        if not config.has_section(sect):
            return False

        if not config.getboolean(sect, 'enable'):
            return False

        try:
            per = config.get(sect, 'percentage')             # allow '0'
            if per and int(per) < random.randint(1, 100):
                return False

            time_ = config.get(sect, 'time')
            if time_ and not util.time_in(time_):
                return False

            if 'user_name' in self.data:
                pattern = config.get(sect, 'user_pattern')
                self.data['user_match'] = re.search(pattern,
                                                    self.data['user_name'])
                if not self.data['user_match']:
                    return False

            if 'text' in self.data:
                pattern = config.get(sect, 'pattern')
                self.data['match'] = re.search(pattern, self.data['text'])
                if not self.data['match']:
                    return False
        except:
            logging.exception('[%s] %s', sect, self.data)
            return False

        return True
Exemple #32
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    os.makedirs(cache_dir, exist_ok=True)
    mappers, _ = utils.get_dataset_mappers(config)
    for phase in args.phase:
        path = os.path.join(cache_dir, phase) + '.pkl'
        logging.info('save cache file: ' + path)
        data = []
        for dataset in mappers:
            logging.info('load %s dataset' % dataset)
            module, func = dataset.rsplit('.', 1)
            module = importlib.import_module(module)
            func = getattr(module, func)
            data += func(config, path, mappers[dataset])
        if config.getboolean('cache', 'shuffle'):
            random.shuffle(data)
        with open(path, 'wb') as f:
            pickle.dump(data, f)
    logging.info('%s data are saved into %s' % (str(args.phase), cache_dir))
Exemple #33
0
def _main(config):
    setup_logging(config)

    fp = config.get('ircbot', 'channel_config')
    if fp:
        fp = os.path.expanduser(fp)
        if not os.path.exists(fp):
            raise Exception("Unable to read layout config file at %s" % fp)
    else:
        raise Exception("Channel Config must be specified in config file.")

    channel_config = ChannelConfig(yaml.load(open(fp)))

    bot = GerritBot(channel_config.channels,
                    config.get('ircbot', 'nick'),
                    config.get('ircbot', 'pass'),
                    config.get('ircbot', 'server'),
                    config.getint('ircbot', 'port'),
                    config.getboolean('ircbot', 'force_ssl'),
                    config.get('ircbot', 'server_password'))
    g = Gerrit(bot,
               channel_config,
               config.get('gerrit', 'host'),
               config.get('gerrit', 'user'),
               config.getint('gerrit', 'port'),
               config.get('gerrit', 'key'))
    g.start()
    bot.start()
Exemple #34
0
    def _makeConfig(self):
        import datetime

        config = deployUtil.vdsmImport("config").config

        if not os.path.exists(VDSM_CONF):
            logging.debug("makeConfig: generating conf.")
            lines = []
            lines.append("# Auto-generated by vds_bootstrap at:" + str(datetime.datetime.now()) + "\n")
            lines.append("\n")

            lines.append("[vars]\n")  # Adding ts for the coming scripts.
            lines.append("trust_store_path = " + config.get("vars", "trust_store_path") + "\n")
            lines.append("ssl = " + config.get("vars", "ssl") + "\n")

            if config.getboolean("vars", "fake_kvm_support"):
                lines.append("fake_kvm_support = true\n")

            lines.append("\n")

            # Adding mgt port for the coming scripts.
            lines.append("[addresses]\n")
            lines.append("management_port = " + config.get("addresses", "management_port") + "\n")

            logging.debug("makeConfig: writing the following to " + VDSM_CONF)
            logging.debug(lines)
            fd, tmpName = tempfile.mkstemp()
            f = os.fdopen(fd, "w")
            f.writelines(lines)
            f.close()
            os.chmod(tmpName, 0o644)
            shutil.move(tmpName, VDSM_CONF)
        else:
            self.message = "Basic configuration found, skipping this step"
            logging.debug(self.message)
Exemple #35
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    os.makedirs(cache_dir, exist_ok=True)
    shutil.copyfile(
        os.path.expanduser(os.path.expandvars(config.get('cache',
                                                         'category'))),
        os.path.join(cache_dir, 'category'))
    category = utils.get_category(config)
    category_index = dict([(name, i) for i, name in enumerate(category)])
    datasets = config.get('cache', 'datasets').split()
    for phase in args.phase:
        path = os.path.join(cache_dir, phase) + '.pkl'
        logging.info('save cache file: ' + path)
        data = []
        for dataset in datasets:
            logging.info('load %s dataset' % dataset)
            module, func = dataset.rsplit('.', 1)
            module = importlib.import_module(module)
            func = getattr(module, func)
            data += func(config, path, category_index)
        if config.getboolean('cache', 'shuffle'):
            random.shuffle(data)
        with open(path, 'wb') as f:
            pickle.dump(data, f)
    logging.info('%s data are saved into %s' % (str(args.phase), cache_dir))
Exemple #36
0
def luigid(argv=sys.argv[1:]):
    import luigi.server
    import luigi.process
    import luigi.configuration
    parser = argparse.ArgumentParser(description=u'Central luigi server')
    parser.add_argument(u'--background',
                        help=u'Run in background mode',
                        action='store_true')
    parser.add_argument(u'--pidfile', help=u'Write pidfile')
    parser.add_argument(u'--logdir', help=u'log directory')
    parser.add_argument(u'--state-path', help=u'Pickled state file')
    parser.add_argument(u'--address', help=u'Listening interface')
    parser.add_argument(u'--unix-socket', help=u'Unix socket path')
    parser.add_argument(u'--port', default=8082, help=u'Listening port')

    opts = parser.parse_args(argv)

    if opts.state_path:
        config = luigi.configuration.get_config()
        config.set('scheduler', 'state_path', opts.state_path)

    if opts.background:
        # daemonize sets up logging to spooled log files
        logging.getLogger().setLevel(logging.INFO)
        luigi.process.daemonize(luigi.server.run,
                                api_port=opts.port,
                                address=opts.address,
                                pidfile=opts.pidfile,
                                logdir=opts.logdir,
                                unix_socket=opts.unix_socket)
    else:
        if opts.logdir:
            logging.basicConfig(level=logging.INFO,
                                format=luigi.process.get_log_format(),
                                filename=os.path.join(opts.logdir,
                                                      "luigi-server.log"))
        else:
            config = luigi.configuration.get_config()
            logging_conf = None
            if not config.getboolean('core', 'no_configure_logging', False):
                logging_conf = config.get('core', 'logging_conf_file', None)
                if logging_conf is not None and not os.path.exists(
                        logging_conf):
                    raise Exception(
                        "Error: Unable to locate specified logging configuration file!"
                    )
            if logging_conf is not None:
                print(
                    "Configuring logging from file: {0}".format(logging_conf))
                logging.config.fileConfig(logging_conf)
            else:
                print(
                    "Defaulting to basic logging; consider specifying logging_conf_file in luigi.cfg."
                )
                logging.basicConfig(level=logging.INFO,
                                    format=luigi.process.get_log_format())
        luigi.server.run(api_port=opts.port,
                         address=opts.address,
                         unix_socket=opts.unix_socket)
Exemple #37
0
def _main(config):
    setup_logging(config)

    fp = config.get('ircbot', 'channel_config')
    if fp:
        fp = os.path.expanduser(fp)
        if not os.path.exists(fp):
            raise Exception("Unable to read layout config file at %s" % fp)
    else:
        raise Exception("Channel Config must be specified in config file.")

    try:
        channel_config = ChannelConfig(yaml.load(open(fp)))
    except Exception:
        log = logging.getLogger('gerritbot')
        log.exception("Syntax error in chanel config file")
        raise

    bot = GerritBot(channel_config.channels,
                    config.get('ircbot', 'nick'),
                    config.get('ircbot', 'pass'),
                    config.get('ircbot', 'server'),
                    config.getint('ircbot', 'port'),
                    config.getboolean('ircbot', 'force_ssl'),
                    config.get('ircbot', 'server_password'))
    if config.has_option('ircbot', 'use_mqtt'):
        use_mqtt = config.getboolean('ircbot', 'use_mqtt')
    else:
        use_mqtt = False

    if use_mqtt:
        g = GerritMQTT(bot,
                       channel_config,
                       config.get('mqtt', 'host'),
                       config.get('mqtt', 'base_topic'),
                       config.getint('mqtt', 'port'),
                       config.getboolean('mqtt', 'websocket'))
    else:
        g = Gerrit(bot,
                   channel_config,
                   config.get('gerrit', 'host'),
                   config.get('gerrit', 'user'),
                   config.getint('gerrit', 'port'),
                   config.get('gerrit', 'key'))
    g.start()
    bot.start()
Exemple #38
0
def _main(config):
    setup_logging(config)

    fp = config.get('ircbot', 'channel_config')
    if fp:
        fp = os.path.expanduser(fp)
        if not os.path.exists(fp):
            raise Exception("Unable to read layout config file at %s" % fp)
    else:
        raise Exception("Channel Config must be specified in config file.")

    try:
        channel_config = ChannelConfig(yaml.load(open(fp)))
    except Exception:
        log = logging.getLogger('gerritbot')
        log.exception("Syntax error in chanel config file")
        raise

    bot = GerritBot(channel_config.channels,
                    config.get('ircbot', 'nick'),
                    config.get('ircbot', 'pass'),
                    config.get('ircbot', 'server'),
                    config.getint('ircbot', 'port'),
                    config.getboolean('ircbot', 'force_ssl'),
                    config.get('ircbot', 'server_password'))
    if config.has_option('ircbot', 'use_mqtt'):
        use_mqtt = config.getboolean('ircbot', 'use_mqtt')
    else:
        use_mqtt = False

    if use_mqtt:
        g = GerritMQTT(bot,
                       channel_config,
                       config.get('mqtt', 'host'),
                       config.get('mqtt', 'base_topic'),
                       config.getint('mqtt', 'port'),
                       config.getboolean('mqtt', 'websocket'))
    else:
        g = Gerrit(bot,
                   channel_config,
                   config.get('gerrit', 'host'),
                   config.get('gerrit', 'user'),
                   config.getint('gerrit', 'port'),
                   config.get('gerrit', 'key'))
    g.start()
    bot.start()
Exemple #39
0
def read_logging_config(file, section, status):
    pathname = '/home/chia/plot_manager/' + file
    config.read(pathname)
    if status == "logging":
        current_status = config.getboolean(section, status)
    else:
        current_status = config.get(section, status)
    return current_status
Exemple #40
0
def read_logging_config(file, section, status):
    pathname = '/root/coin_monitor/' + file
    config.read(pathname)
    if status == "logging":
        current_status = config.getboolean(section, status)
    else:
        current_status = config.get(section, status)
    return current_status
Exemple #41
0
    def __init__(self, log):
        self.logger = log
        config = ConfigParser.ConfigParser()
        config.read("./twitter.config")

        self.livDeviceName = config.get('TWITTER', 'liv_device_name')
        self.alarmActive = config.getboolean('TWITTER', 'twitter_alarm_active')
        self.reportActive = config.getboolean('TWITTER',
                                              'twitter_report_active')
        self.reportTime = config.getint('TWITTER', 'twitter_report_time')

        self.accessToken = config.get('TWITTER', 'access_token')
        self.accessTokenSecret = config.get('TWITTER', 'access_token_secret')
        self.consumerKey = config.get('TWITTER', 'consumer_key')
        self.consumerSecret = config.get('TWITTER', 'consumer_secret')

        self.minuteCounter = 0
def read_logging_config(file, section, status):
    pathname = main_path + file
    print(pathname)
    config.read(pathname)
    if status == "logging":
        current_status = config.getboolean(section, status)
    else:
        current_status = config.get(section, status)
    return current_status
Exemple #43
0
async def post_titles(request):
    config = next(iter(request.app["config"]), None)
    data = await request.post()
    assembly_url = data.get("assembly_url")
    ensemble = []
    if assembly_url and config and config.getboolean(
            "assembly", "enable_load", fallback=False):
        if not Presenter.validation["url"].match(assembly_url):
            raise web.HTTPUnauthorized(reason="User requested invalid URL.")

        try:
            async with request.app["client"].get(assembly_url,
                                                 trace_request_ctx={
                                                     "log_name": "app.client"
                                                 }) as response:

                if response.status != 200:
                    raise web.HTTPUnauthorized(reason=response.reason)

                text = await (response.text())
                try:
                    assembly = Assembly.loads(text)
                    ensemble = assembly.get("ensemble")
                except Exception as e:
                    request.app["log"].error(e)
                    raise web.HTTPUnauthorized(reason="Invalid data.")

        except (
                aiohttp.ClientResponseError,
                aiohttp.ClientConnectionError,
                aiohttp.ClientPayloadError,
                asyncio.TimeoutError,
        ) as e:
            request.app["log"].error(e)

        try:
            clone = next(i for i in reversed(ensemble)
                         if isinstance(i, Narrator))
            narrator = bluemonday78.story.build_narrator(
                id=None, memories=clone.memories, _states=clone._states)
            ensemble.remove(clone)
            ensemble.append(narrator)
        except:
            ensemble = None

    if not ensemble:
        narrator = bluemonday78.story.build_narrator()
        ensemble = bluemonday78.story.ensemble(narrator)
    else:
        request.app["log"].info("Load successful from assembly")

    presenter = Presenter(None, ensemble)
    presenter.log.debug(narrator)
    request.app["sessions"][narrator.id] = presenter
    request.app["log"].info("session: {0.id.hex}".format(narrator))
    raise web.HTTPFound("/{0.id.hex}".format(narrator))
Exemple #44
0
def getIsExternal(tsn):
    tsnsect = "_tivo_" + tsn
    if tsnsect in config.sections():
        if config.has_option(tsnsect, "external"):
            try:
                return config.getboolean(tsnsect, "external")
            except ValueError:
                pass

    return False
Exemple #45
0
def average_precision(config, tp, num, dtype=np.float):
    fp = ~tp
    tp = np.cumsum(tp)
    fp = np.cumsum(fp)
    if num > 0:
        rec = tp / num
    else:
        rec = np.zeros(len(tp), dtype=dtype)
    prec = tp / np.maximum(tp + fp, np.finfo(dtype).eps)
    return voc_ap(rec, prec, config.getboolean('eval', 'metric07'))
Exemple #46
0
def average_precision(config, tp, num, dtype=np.float):
    fp = ~tp
    tp = np.cumsum(tp)
    fp = np.cumsum(fp)
    if num > 0:
        rec = tp / num
    else:
        rec = np.zeros(len(tp), dtype=dtype)
    prec = tp / np.maximum(tp + fp, np.finfo(dtype).eps)
    return voc_ap(rec, prec, config.getboolean('eval', 'metric07'))
Exemple #47
0
def getIsExternal(tsn):
    tsnsect = '_tivo_' + tsn
    if tsnsect in config.sections():
        if config.has_option(tsnsect, 'external'):
            try:
                return config.getboolean(tsnsect, 'external')
            except ValueError:
                pass

    return False
Exemple #48
0
def connect(config, echo=False):
    """Connect to a given Clusto datastore.

    Accepts a config object with (at least) a DSN string

    e.g. mysql://user:[email protected]/clustodb
    e.g. sqlite:///somefile.db

    @param config: the config object
    """

    dsn = config.get('clusto', 'dsn')
    if dsn.startswith('http'):
        SESSION.clusto_api = True
    else:
        SESSION.configure(bind=create_engine(dsn,
                                             echo=echo,
                                             poolclass=SingletonThreadPool,
                                             pool_recycle=600
                                             ))

    SESSION.clusto_version = None

    if config.has_option('clusto', 'versioning'):
        SESSION.clusto_versioning_enabled = config.getboolean('clusto', 'versioning')
    else:
        SESSION.clusto_versioning_enabled = False

    # Set the log level from config, default is WARNING
    if config.has_option('clusto', 'loglevel'):
        rootlog = logging.getLogger()
        level = logging.getLevelName(config.get('clusto', 'loglevel'))
        rootlog.setLevel(level)

    # Setup audit logging to a file
    if config.has_option('clusto', 'auditlog'):
        auditlog = logging.getLogger('clusto.audit')
        auditlog.propagate = False
        auditlog.setLevel(logging.INFO)
        logfile = config.get('clusto', 'auditlog')
        handler = logging.handlers.WatchedFileHandler(logfile)
        handler.setFormatter(logging.Formatter(
            '%(asctime)s %(name)s %(levelname)s %(message)s',
            '%Y-%m-%d %H:%M:%S'
        ))
        auditlog.addHandler(handler)

    try:
        memcache_servers = config.get('clusto', 'memcached').split(',')
#       Memcache should only be imported if we're actually using it, yes?
        import memcache
        logging.info('Memcache server list: %s' % config.get('clusto', 'memcached'))
        SESSION.memcache = memcache.Client(memcache_servers, debug=0)
    except:
        SESSION.memcache = None
Exemple #49
0
    def handle(self):
        for action in config.getlist('mm', 'actions'):
            sect = ':'.join(('action', action))
            if not self.enabled(sect):
                continue

            fallthrough = config.getboolean(sect, 'fallthrough')
            result = self.action(sect)
            if (not fallthrough) and (result is not None):
                break

        return self
Exemple #50
0
def connect(config, echo=False):
    """Connect to a given Clusto datastore.

    Accepts a config object with (at least) a DSN string

    e.g. mysql://user:[email protected]/clustodb
    e.g. sqlite:///somefile.db

    @param config: the config object
    """

    dsn = config.get('clusto', 'dsn')
    if dsn.startswith('http'):
        SESSION.clusto_api = True
    else:
        SESSION.configure(bind=create_engine(
            dsn, echo=echo, poolclass=SingletonThreadPool, pool_recycle=600))

    SESSION.clusto_version = None

    if config.has_option('clusto', 'versioning'):
        SESSION.clusto_versioning_enabled = config.getboolean(
            'clusto', 'versioning')
    else:
        SESSION.clusto_versioning_enabled = False

    # Set the log level from config, default is WARNING
    if config.has_option('clusto', 'loglevel'):
        rootlog = logging.getLogger()
        level = logging.getLevelName(config.get('clusto', 'loglevel'))
        rootlog.setLevel(level)

    # Setup audit logging to a file
    if config.has_option('clusto', 'auditlog'):
        auditlog = logging.getLogger('clusto.audit')
        auditlog.propagate = False
        auditlog.setLevel(logging.INFO)
        logfile = config.get('clusto', 'auditlog')
        handler = logging.handlers.WatchedFileHandler(logfile)
        handler.setFormatter(
            logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s',
                              '%Y-%m-%d %H:%M:%S'))
        auditlog.addHandler(handler)

    try:
        memcache_servers = config.get('clusto', 'memcached').split(',')
        #       Memcache should only be imported if we're actually using it, yes?
        import memcache
        logging.info('Memcache server list: %s' %
                     config.get('clusto', 'memcached'))
        SESSION.memcache = memcache.Client(memcache_servers, debug=0)
    except:
        SESSION.memcache = None
Exemple #51
0
def filter_visible(config, iou, yx_min, yx_max, prob):
    prob_cls, cls = torch.max(prob, -1)
    if config.getboolean('detect', 'fix'):
        mask = (iou * prob_cls) > config.getfloat('detect', 'threshold_cls')
    else:
        mask = iou > config.getfloat('detect', 'threshold')
    iou, prob_cls, cls = (t[mask].view(-1) for t in (iou, prob_cls, cls))
    _mask = torch.unsqueeze(mask, -1).repeat(1, 2)  # PyTorch's bug
    yx_min, yx_max = (t[_mask].view(-1, 2) for t in (yx_min, yx_max))
    num = prob.size(-1)
    _mask = torch.unsqueeze(mask, -1).repeat(1, num)  # PyTorch's bug
    prob = prob[_mask].view(-1, num)
    return iou, yx_min, yx_max, prob, prob_cls, cls
Exemple #52
0
def show_main(wxbot):
    global main_frame
    groups = parse_group(wxbot, encoding)
    templates = parse_template()
    window.geometry('1080x680+200+50')
    main_frame = MainFrame(window,
                           wxbot,
                           groups,
                           templates,
                           send_period=config.getint('basic', 'send.period'),
                           thumbnail=config.getboolean('basic',
                                                       'send.thumbnail'))
    main_frame.grid()
Exemple #53
0
    def __init__(self, config, config_group):
        self.command = config.get(config_group, 'command', None)
        args = config.getlist(config_group, 'args', '')
        self.verbose = config.verbose
        self.command_object = get_command_object(self.command)

        parser = argparse.ArgumentParser()
        self.command_object.add_arguments(parser)
        self.args, _ = parser.parse_known_args(args=args)
        self.args.verbose = self.verbose
        self.delay = int(config.get(config_group, 'delay', 0))
        self.args.delay = self.delay
        self.enabled = config.getboolean(config_group, 'enabled', True)
Exemple #54
0
def main():
    def setlogger(conf_file=None):
        if conf_file:
            return logging.config.fileConfig(conf_file)

        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(levelname)s %(message)s',
                            datefmt='%Y/%m/%d %H:%M:%S')

    def loadmodules(path=None):
        from . import irchandler    # load default modules

        if path:
            ircplugin.load(path)

        for name, action in ircplugin.iteractions():
            logging.info('[plugin] [action] [%s] <%s.%s> loaded',
                         name, action.__module__, action.__name__)
        for name, command in ircplugin.itercommands():
            logging.info('[plugin] [command] [%s] <%s.%s> loaded',
                         name, command.__module__, command.__name__)
        for name, events in ircplugin.iterevents():
            for evt in events:
                logging.info('[plugin] [event] [%s] <%s.%s> loaded',
                             name, evt.__module__, evt.__name__)

    ap = argparse.ArgumentParser()
    ap.add_argument('-l', '--logging-config', type=util.abspath,
                    help='configuration file for the logging')
    ap.add_argument('config', type=util.abspath,
                    help='configuration file for the IRC client')
    args = ap.parse_args()

    config.read(args.config)
    setlogger(args.logging_config)
    loadmodules(config.get('plugin', 'path'))

    bot = IRCBot()

    def q(*args, **kwargs):
        bot.die(config.get('irc', 'quit_message'))
        raise SystemExit()

    for sig in (signal.SIGINT, signal.SIGTERM):
        signal.signal(sig, q)

    if config.getboolean('irc', 'daemon'):
        util.daemonize()

    with exceptlog('main', bot.start) as run:
        run()
Exemple #55
0
    def __init__(self, config_path):
        # Setup config with defaults.
        config = configparser.ConfigParser()
        config['myapp'] = {}
        config['myapp']['fullscreen'] = 'false'
        config['myapp']['timeout'] = '60'

        # Update config from file.
        with open(config_path, 'r', encoding='utf-8') as config_file:
            config.read_file(config_file)

        self.connection = config.get('myapp', 'connection')
        self.is_fullscreen = config.getboolean('myapp', 'fullscreen')
        self.timeout = config.getint('myapp', 'timeout')
Exemple #56
0
    def virtExplorer(self, rnum):
        """
            Check the VT/SVM compatibility
        """
        self.test = False
        self.vt_svm = None
        self.res = ''
        self.message = ''
        self.rc = True

        if self.rc:
            if deployUtil.virtEnabledInCpuAndBios():
                self.vt_svm = "OK"
                self.message = "Server supports virtualization"
            else:
                # We can't use the regular vdsm.config module here because
                # vdsm-python might not be installed yet.
                config = ConfigParser.ConfigParser()
                config.read(VDSM_CONF)

                try:
                    fake_kvm = config.getboolean('vars', 'fake_kvm_support')
                except:
                    fake_kvm = False

                if fake_kvm:
                    self.vt_svm = "OK"
                    self.message = "Server uses the fake kvm virtualization"
                else:
                    self.vt_svm = "FAIL"
                    self.message = "Server does not support virtualization"
                    self.rc = False

            if "GenuineIntel" == deployUtil.cpuVendorID():
                self.res = "Intel"
            elif "AuthenticAMD" == deployUtil.cpuVendorID():
                self.res = "AMD"
            elif "IBM_POWER" == deployUtil.cpuVendorID():
                self.res = "IBM"
            else:
                self.res = "Unknown"
                logging.error("Unable to get CPU Vendor ID")

        if self.vt_svm is None:
            self.vt_svm = "NA"

        self._xmlOutput('VT_SVM', self.vt_svm, "processor", self.res,
                        self.message, self.test)
        return self.rc
Exemple #57
0
def start():
    """Start the logging according to the configuration."""
    if os.path.exists(FILENAME):
        # Configuration taken from file
        logging.config.fileConfig(FILENAME)
    else:
        # Default configuration, standard output
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(logging.Formatter("%(message)s"))
        LOGGER.addHandler(handler)

    if config.getboolean("logging", "debug"):
        LOGGER.setLevel(logging.DEBUG)
        for handler in LOGGER.handlers:
            handler.setLevel(logging.DEBUG)
Exemple #58
0
def postprocess(config, iou, yx_min, yx_max, prob):
    iou, yx_min, yx_max, prob, prob_cls, cls = filter_visible(config, iou, yx_min, yx_max, prob)
    keep = pybenchmark.profile('nms')(utils.postprocess.nms)(iou, yx_min, yx_max, config.getfloat('detect', 'overlap'))
    if keep:
        keep = utils.ensure_device(torch.LongTensor(keep))
        iou, yx_min, yx_max, prob, prob_cls, cls = (t[keep] for t in (iou, yx_min, yx_max, prob, prob_cls, cls))
        if config.getboolean('detect', 'fix'):
            score = torch.unsqueeze(iou, -1) * prob
            mask = score > config.getfloat('detect', 'threshold_cls')
            indices, cls = torch.unbind(mask.nonzero(), -1)
            yx_min, yx_max = (t[indices] for t in (yx_min, yx_max))
            score = score[mask]
        else:
            score = iou
        return iou, yx_min, yx_max, cls, score
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    category = utils.get_category(config, cache_dir)
    draw_bbox = utils.visualize.DrawBBox(config, category)
    batch_size = args.rows * args.cols
    paths = [os.path.join(cache_dir, phase + '.pkl') for phase in args.phase]
    dataset = utils.data.Dataset(
        utils.data.load_pickles(paths),
        transform=transform.augmentation.get_transform(config, config.get('transform', 'augmentation').split()),
        shuffle=config.getboolean('data', 'shuffle'),
    )
    logging.info('num_examples=%d' % len(dataset))
    try:
        workers = config.getint('data', 'workers')
    except configparser.NoOptionError:
        workers = multiprocessing.cpu_count()
    collate_fn = utils.data.Collate(
        utils.train.load_sizes(config),
        config.getint('data', 'maintain'),
        resize=transform.parse_transform(config, config.get('transform', 'resize_train')),
        transform_image=transform.get_transform(config, config.get('transform', 'image_train').split()),
    )
    loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers, collate_fn=collate_fn)
    for data in loader:
        path, size, image, yx_min, yx_max, cls = (t.numpy() if hasattr(t, 'numpy') else t for t in (data[key] for key in 'path, size, image, yx_min, yx_max, cls'.split(', ')))
        fig, axes = plt.subplots(args.rows, args.cols)
        axes = axes.flat if batch_size > 1 else [axes]
        for ax, path, size, image, yx_min, yx_max, cls in zip(*[axes, path, size, image, yx_min, yx_max, cls]):
            logging.info(path + ': ' + 'x'.join(map(str, size)))
            size = yx_max - yx_min
            target = np.logical_and(*[np.squeeze(a, -1) > 0 for a in np.split(size, size.shape[-1], -1)])
            yx_min, yx_max, cls = (a[target] for a in (yx_min, yx_max, cls))
            image = draw_bbox(image, yx_min.astype(np.int), yx_max.astype(np.int), cls)
            ax.imshow(image)
            ax.set_title('%d objects' % np.sum(target))
            ax.set_xticks([])
            ax.set_yticks([])
        fig.tight_layout()
        mng = plt.get_current_fig_manager()
        mng.resize(*mng.window.maxsize())
        plt.show()