def parse_args(self, args_str): # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument( "-c", "--config_file", help="Specify config file with the parameter values.", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) if args.config_file: config_file = args.config_file else: config_file = _DEF_SMGR_CFG_FILE config = ConfigParser.SafeConfigParser() config.read([config_file]) for key in dict(config.items("MONITORING")).keys(): if key in self.MonitoringCfg.keys(): self.MonitoringCfg[key] = dict(config.items("MONITORING"))[key] else: self.log(self.DEBUG, "Configuration set for invalid parameter: %s" % key) self.log(self.DEBUG, "Arguments read form monitoring config file %s" % self.MonitoringCfg) parser = argparse.ArgumentParser( # Inherit options from config_parser # parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.set_defaults(**self.MonitoringCfg) self._collectors_ip = self.MonitoringCfg['collectors'] return parser.parse_args(remaining_argv)
def configure(self, config): factory = unlock.UnlockFactory() level = 0 max_level = 0 while level <= max_level: pop_keys = set([]) for key, value in config.items(): if not "singleton" in value: continue level_value = value["singleton"] if level_value > max_level: max_level = level_value if level_value <= level: # print(key, value) assert "name" in value factory.create_singleton(value["name"], key, config) pop_keys.add(key) for key in pop_keys: config.pop(key) level += 1 for key, value in config.items(): if "main" in value: newobj = factory.create(key, config) unlock_instance = newobj break assert unlock_instance return unlock_instance
def __init__(self, config, args): db_class = config.get('server', 'class') if db_class in self.server_defaults: self.server_config.update(self.server_defaults[db_class]) self.server_config.update(config.items('server')) self.client_config.update(config.items('client')) # update config from args self.update_config(args) # create client api_key = self.client_config.pop('key') logger.info("Create client %s", self.client_config) self.client = MyApiClient(api_key, self.client_config) # log connection config w/o password safe_config = dict(self.server_config) safe_config.pop('passwd', None) safe_config.pop('password', None) logger.info("Connect server %s", safe_config) # create database connection db_class = peewee.__dict__.get(self.server_config.pop('class')) self.db_init = self.server_config.pop('init', '').strip(' \'"') self.db_name = self.server_config.pop('db', None) if not self.db_name and self.server_config.get('database'): self.db_name = self.server_config.pop('database') self.database = db_class(self.db_name, **self.server_config) if self.db_init: self.database.execute_sql(self.db_init) # create model class self.create_models(config) # create cache model self.init_cache(config)
def getShares(tsn=''): shares = [(section, Bdict(config.items(section))) for section in config.sections() if not (section.startswith(('_tivo_', 'logger_', 'handler_', 'formatter_')) or section in ('Server', 'loggers', 'handlers', 'formatters'))] tsnsect = '_tivo_' + tsn if config.has_section(tsnsect) and config.has_option(tsnsect, 'shares'): # clean up leading and trailing spaces & make sure ref is valid tsnshares = [] for x in config.get(tsnsect, 'shares').split(','): y = x.strip() if config.has_section(y): tsnshares.append((y, Bdict(config.items(y)))) shares = tsnshares shares.sort() if get_server('nosettings', 'false').lower() in ['false', 'no', 'off']: shares.append(('Settings', {'type': 'settings'})) if get_server('tivo_mak') and get_server('togo_path'): shares.append(('ToGo', {'type': 'togo'})) return shares
def create_models(self, config): self.models = collections.OrderedDict() self.main_model = None for section in config.sections(): if section.startswith('table:'): table_schema = config.items(section) table, name = section.split(':', 2) filter_section = 'filter:' + name filters = [] if config.has_section(filter_section): filters = config.items(filter_section) index_section = 'index:' + name index_schema = [] if config.has_section(index_section): index_schema = config.items(index_section) self.init_model(name, table_schema, index_schema, filters) self.sorted_models = sorted( self.models.values(), key=lambda m: len(m._meta.table_options.get('__iter__', []))) if self.client_config['resume'] and not self.main_model: raise ValueError('Main model is required for resume mode') if not self.main_model: logger.warning('Main model is not defined in config')
def getShares(tsn=""): shares = [ (section, dict(config.items(section))) for section in config.sections() if not ( section.startswith("_tivo_") or section.startswith("logger_") or section.startswith("handler_") or section.startswith("formatter_") or section in ("Server", "loggers", "handlers", "formatters") ) ] tsnsect = "_tivo_" + tsn if config.has_section(tsnsect) and config.has_option(tsnsect, "shares"): # clean up leading and trailing spaces & make sure ref is valid tsnshares = [] for x in config.get(tsnsect, "shares").split(","): y = x.strip() if config.has_section(y): tsnshares.append((y, dict(config.items(y)))) shares = tsnshares shares.sort() if get_server("nosettings", "false").lower() in ["false", "no", "off"]: shares.append(("Settings", {"type": "settings"})) if get_server("tivo_mak") and get_server("togo_path"): shares.append(("ToGo", {"type": "togo"})) return shares
def getShares(tsn=''): shares = [(section, Bdict(config.items(section))) for section in config.sections() if not (section.startswith(('_tivo_', 'logger_', 'handler_', 'formatter_')) or section in ('Server', 'loggers', 'handlers', 'formatters') ) ] tsnsect = '_tivo_' + tsn if config.has_section(tsnsect) and config.has_option(tsnsect, 'shares'): # clean up leading and trailing spaces & make sure ref is valid tsnshares = [] for x in config.get(tsnsect, 'shares').split(','): y = x.strip() if config.has_section(y): tsnshares.append((y, Bdict(config.items(y)))) shares = tsnshares shares.sort() if get_server('nosettings', 'false').lower() in ['false', 'no', 'off']: shares.append(('Settings', {'type': 'settings'})) if get_server('tivo_mak') and get_server('togo_path'): shares.append(('ToGo', {'type': 'togo'})) if sys.platform == 'win32': shares.append(('VRD', {'type': 'vrd'})) if getattr(sys, 'frozen', False): shares.append(('Desktop', {'type': 'desktop', 'path': os.path.join(sys._MEIPASS, 'plugins', 'desktop', 'content')})) return shares
def jigsaw_parser(self, config): app_config = {"main":{"start":"false"}} if config: try: set_main = {} set_board = {} set_img = {} set_grid = {} set_db = {} for k, v in config.items('Jigsaw_Main'): set_main[k] = v app_config["main"] = set_main for k, v in config.items('Jigsaw_DB'): set_db[k] = v app_config["db"] = set_db for k, v in config.items('Jigsaw_Image'): set_img[k] = v app_config["img"] = set_img for k, v in config.items('Jigsaw_Board'): set_board[k] = float(v) app_config["board"] = set_board for k, v in config.items('Jigsaw_Grid'): set_grid[k] = int(v) app_config["grid"] = set_grid except ConfigParser.NoSectionError: logging.warn("[jigsawpp]: No Section exception. Might be OK!") return app_config
def parse_config_file(filename): config = ConfigParser.ConfigParser() config.read(filename) try: config.items('loggers') # We have at least the loggers section so we can set logging config logging.config.fileConfig(filename) except ConfigParser.NoSectionError: log.info('No section loggers in %s' % filename) try: items = dict(config.items('sqla_taskq')).keys() except ConfigParser.NoSectionError: log.info('No section sqla_taskq in %s' % filename) return None dic = {} if 'sqla_url' in items: dic['sqla_url'] = config.get('sqla_taskq', 'sqla_url') if 'kill' in items: dic['kill'] = config.getboolean('sqla_taskq', 'kill') else: dic['kill'] = False if 'timeout' in items: dic['timeout'] = config.getint('sqla_taskq', 'timeout') else: dic['timeout'] = 60 return dic
def getShares(tsn=''): shares = [(section, Bdict(config.items(section))) for section in config.sections() if not (section.startswith(('_tivo_', 'logger_', 'handler_', 'formatter_')) or section in ('Server', 'loggers', 'handlers', 'formatters') ) ] tsnsect = '_tivo_' + tsn if config.has_section(tsnsect) and config.has_option(tsnsect, 'shares'): # clean up leading and trailing spaces & make sure ref is valid tsnshares = [] for x in config.get(tsnsect, 'shares').split(','): y = x.strip() if config.has_section(y): tsnshares.append((y, Bdict(config.items(y)))) shares = tsnshares shares.sort() if get_server('nosettings', 'false').lower() in ['false', 'no', 'off']: shares.append(('Settings', {'type': 'settings'})) if get_server('tivo_mak') and get_server('togo_path'): shares.append(('ToGo', {'type': 'togo'})) return shares
def getShares(tsn=''): shares = [(section, Bdict(config.items(section))) for section in config.sections() if not (section.startswith(special_section_prefixes) or section in special_section_names) ] tsnsect = '_tivo_' + tsn if config.has_section(tsnsect) and config.has_option(tsnsect, 'shares'): # clean up leading and trailing spaces & make sure ref is valid tsnshares = [] for x in config.get(tsnsect, 'shares').split(','): y = x.strip() if config.has_section(y): tsnshares.append((y, Bdict(config.items(y)))) shares = tsnshares shares.sort() if not config.getboolean('Server', 'nosettings', fallback=False): shares.append(('Settings', {'type': 'settings'})) if get_server('tivo_mak') and get_togo('path'): shares.append(('ToGo', {'type': 'togo'})) return shares
def __init__(self, config, args): db_class = config.get('server', 'class') if db_class in self.server_defaults: self.server_config.update(self.server_defaults[db_class]) self.server_config.update(config.items('server')) self.client_config.update(config.items('client')) # update config from args self.update_config(args) # create client api_key = self.client_config.pop('key') logger.info("Create client %s", self.client_config) self.client = MyApiClient(api_key, self.client_config) # log connection config w/o password safe_config = dict(self.server_config) safe_config.pop('passwd', None) safe_config.pop('password', None) logger.info("Connect server %s", safe_config) # create database connection db_class = peewee.__dict__.get(self.server_config.pop('class')) self.db_init = self.server_config.pop('init', '').strip(' \'"') self.db_name = self.server_config.pop('db', None) self.db_ping = self.server_config.pop('ping', 0) if not self.db_name and self.server_config.get('database'): self.db_name = self.server_config.pop('database') self.database = db_class(self.db_name, **self.server_config) if self.db_init: self.database.execute_sql(self.db_init) # create model class self.create_models(config) # create cache model self.init_cache(config)
def load_config(conf_file): """Loads redfish.conf file Loads and parsers the system conf file into config global var Loads json schemas into schemas_dict global var Established a connection with OneView and sets in as ov_conn global var Args: conf_file: string with the conf file name Returns: None Exception: OneViewRedfishResourceNotFoundError: - if conf file not found - if any of the schemas files are not found - if the schema directory is not found OneViewRedFishResourceNotAccessibleError: - if can't access schema's directory HPOneViewException: - if fails to connect to oneview """ config = load_conf(conf_file) globals()['config'] = config # Config file read set global vars # Setting ov_config ov_config = dict(config.items('oneview_config')) ov_config['credentials'] = dict(config.items('credentials')) ov_config['api_version'] = int(ov_config['api_version']) globals()['ov_config'] = ov_config # Setting schemas_dict schemas = dict(config.items('schemas')) globals()['schemas'] = schemas registries = dict(config.items('registry')) load_event_service_info() # Load schemas | Store schemas | Connect to OneView try: ov_client = OneViewClient(ov_config) globals()['ov_client'] = ov_client registry_dict = load_registry(config['redfish']['registry_dir'], registries) globals()['registry_dict'] = registry_dict store_schemas(config['redfish']['schema_dir']) except OneViewRedfishResourceNotFoundError as e: raise OneViewRedfishError( 'Failed to load schemas or registries: {}'.format(e)) except Exception as e: raise OneViewRedfishError('Failed to connect to OneView: {}'.format(e))
def get_config(service_conf, section=''): config = ConfigParser.ConfigParser() config.read(service_conf) conf_items = dict(config.items('common')) if config.has_section('common') else {} if section and config.has_section(section): conf_items.update(config.items(section)) return conf_items
def get_config(section=''): config = ConfigParser.ConfigParser() service_conf = os.path.join(work_dir,'conf/service.conf') config.read(service_conf) config_items = dict(config.items('common')) if config.has_section('common') else {} if section and config.has_section(section): config_items.update(config.items(section)) return config_items
def parse_args(self, args_str, section): # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument( "-c", "--config_file", help="Specify config file with the parameter values.", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) if args.config_file: config_file = args.config_file else: config_file = _DEF_SMGR_CFG_FILE config = ConfigParser.SafeConfigParser() config.read([config_file]) parser = argparse.ArgumentParser( # Inherit options from config_parser # parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) if section == "MONITORING": for key in dict(config.items("MONITORING")).keys(): if key in self.MonitoringCfg.keys(): self.MonitoringCfg[key] = dict( config.items("MONITORING"))[key] else: self._smgr_log.log( self._smgr_log.DEBUG, "Configuration set for invalid parameter: %s" % key) self._smgr_log.log( self._smgr_log.DEBUG, "Arguments read from monitoring config file %s" % self.MonitoringCfg) parser.set_defaults(**self.MonitoringCfg) elif section == "INVENTORY": for key in dict(config.items("INVENTORY")).keys(): if key in self.InventoryCfg.keys(): self.InventoryCfg[key] = dict( config.items("INVENTORY"))[key] else: self._smgr_log.log( self._smgr_log.DEBUG, "Configuration set for invalid parameter: %s" % key) self._smgr_log.log( self._smgr_log.DEBUG, "Arguments read from inventory config file %s" % self.InventoryCfg) parser.set_defaults(**self.InventoryCfg) return parser.parse_args(remaining_argv)
def main(): socket.setdefaulttimeout(30) logging.config.fileConfig('sample.ini') log = logging.getLogger('pygios') config = ConfigParser() config.read('sample.ini') monitor = [ i.strip() for i in config.get('monitor', 'keys').split(',') if i.strip() ] log.info("Loaded configuration: %d monitor%s.", len(monitor), '' if len(monitor) == 1 else 's') log.debug("Preparing scheduler.") scheduler = Scheduler() log.debug("Preparing logger task.") tp = AsyncQueue() lq = AsyncQueue() scheduler.add(Tee(input=tp)) scheduler.add(TransitionPipe(input=lq, output=tp)) for name in monitor: log.debug("Configuring %s.", name) kw = dict(config.items('monitor_' + name)) plugin = get_dotted_object(kw.pop('plugin')) if 'schedule' in kw: schedule = kw.pop('schedule') kw['every'] = timedelta( **dict([(i, int(j)) for i, j in config.items('schedule_' + schedule)])) for key in kw.iterkeys(): if key in plugin.kwargs: kw[key] = plugin.kwargs[key](kw[key]) log.info("%s(%r)", name, kw) scheduler.add(plugin(name, lq, **kw)) scheduler.run()
def __init__(self, config, state_dict, cmap=None): self.dot = graphviz.Digraph( node_attr=dict(config.items('digraph_node_attr')), graph_attr=dict(config.items('digraph_graph_attr'))) self.dot.format = config.get('graph', 'format') self.state_dict = state_dict self.var_name = {t._cdata: k for k, t in state_dict.items()} self.seen = set() self.drawn = set() self.cm = matplotlib.cm.get_cmap(cmap) self.metric = eval(config.get('graph', 'metric')) metrics = [self.metric(t.numpy()) for t in state_dict.values()] self.minmax = [min(metrics), max(metrics)]
def parse_config_for_client_initiated(args): """ Parse connector configuration and generate a result in dictionary format which includes integration names and the required info for pulling out data """ connectors = {} try: config = SpecialConfigParser() if not os.path.isfile(args.ini): # The config file does not yet exist, so generate it. # generate_ini_file(args) raise ConfigError("Error: unable to open ini file: %r" % args.ini) config.read(args.ini) for section in config.sections(): if section == 'converters': for name, filter_str in config.items('converters'): DynamicConverter(name, filter_str) elif section == 'oomnitza' or config.has_option( section, 'enable') and config.getboolean( section, 'enable'): if not connectors and section != 'oomnitza': raise ConfigError( "Error: [oomnitza] must be the first section in the ini file." ) cfg = init_connector_from_configuration( section, config.items(section), args) connectors[section] = cfg else: LOG.debug("Skipping connector '%s' as it is not enabled.", section) pass except IOError: raise ConfigError("Could not open config file.") if len(connectors) <= 1: raise ConfigError("No connectors have been enabled.") if args.show_mappings: for name, connector in connectors.items(): if name == 'oomnitza': continue print(connector["__connector__"].section, "Mappings") pprint.pprint(connector["__connector__"].field_mappings) exit(0) return connectors
def ini_to_dict(ini_str): import re try: from configparser import SafeConfigParser except ImportError: from ConfigParser import SafeConfigParser finally: config, conf_dict = SafeConfigParser(), {} config.optionxform = str try: config.read_string(ini_str) except AttributeError: from io import StringIO config.readfp(StringIO(ini_str)) for sect in config.sections(): if 'default' != sect: conf_dict[sect] = tmp_dict = {} else: tmp_dict = conf_dict for key, val in config.items(sect): match_1 = re.search('([^#]*)', val) tmp_dict[key] = match_1.groups()[0].strip().strip("'") return conf_dict
def init_logger(config=None): """Load logging config file and setup logging. Only needs to be called once per process.""" global log if log: log.warning("logging already initialized") if config is None: config = rhsm.config.get_config_parser() default_log_level = config.get('logging', 'default_log_level') if default_log_level not in [ 'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOSET' ]: # This is not a valid logging level, set to INFO print("Invalid Log Level: {lvl}, setting to INFO.".format( lvl=default_log_level), file=sys.stderr) print( "Use: subscription-manager config --logging.default_log_level=<Log Level> to set the default_log_level.", file=sys.stderr) default_log_level = 'INFO' pending_error_messages = [] for root_namespace in ROOT_NAMESPACES: logger = logging.getLogger(root_namespace) rhsm_handler, error = _get_default_rhsm_log_handler() if error: pending_error_messages.append(error) logger.addHandler(rhsm_handler) logger.addHandler(_get_default_subman_debug_handler()) logger.setLevel(getattr(logging, default_log_level.strip())) for logger_name, logging_level in config.items('logging'): logger_name = logger_name.strip() if logger_name.split('.')[0] not in ROOT_NAMESPACES: # Don't allow our logging configuration to mess with loggers # outside the namespaces we claim as ours # Also ignore other more general configuration options like # default_log_level continue logger = logging.getLogger(logger_name) logger.setLevel(getattr(logging, logging_level.strip())) if not log: log = logging.getLogger(__name__) # RHBZ#1782910 Previously to handle the case of not being able to create # the /var/log/rhsm/rhsm.log file to log to, we were logging an error level message # by doing `logging.error("Our error message here"). Doing that at a point where # there is nothing configured for the root logger causes a StreamHandler to be added to # the root logger. That then caused all dnf python logging to be written to stderr. # To be able to output the log messages which happen during set up, we queue the errors # and log them using our resulting logger which has been setup (after adding our own # StreamHandler but not to the root logger). for error_message in pending_error_messages: log.error(error_message)
def parse_cfg_with_sections(stream): """Return as dict of dict of ... """ #Experimental: """ ConfigParser sections become sub-sub sections when separated by dots. [foo.bar] baz = 42 is equivalent to JSON {"foo": {"bar": {"baz": 42}}} """ content = stream.read() result = dict() try: jdict = json.loads(NativeIO(content).read()) return jdict except ValueError: pass #logger.exception('Could not parse stream as JSON.') try: config = ConfigParser() #strict=False? config.optionxform = str config.readfp(NativeIO(content)) sections = config.sections() for sec in sections: result[sec] = dict(config.items(sec)) return result except: raise
def __init__(self, config_file): print "Using config file '%s'" % config_file # init python logging logging.config.fileConfig(config_file) # create a logger self._log = logging.getLogger("moniteur") # Load the config file config = ConfigParser.SafeConfigParser() config.read(config_file) # store settings self.settings = dict(config.items("moniteur")) self._log.info("Settings: %s" % self.settings) # Load the notifier configuration notifier_file = self.settings['notifier_config'] self._log.debug("Loading notifier definition from '%s'" % notifier_file) if not os.access(notifier_file, os.R_OK): m = "Unable to load tests configuration file: '%s'" % notifier_file self._log.error(m) raise Exception(m) notifier_config = ConfigParser.SafeConfigParser() notifier_config.read(notifier_file) self._notifier = Notifier(self.settings, notifier_config) # Init thread threading.Thread.__init__(self)
def load_settings(path): here = os.path.abspath(os.path.dirname(path)) config = compat.ConfigParser(defaults={'here': here}) config.read(path) settings = dict(config.items('app:main')) logging.config.fileConfig(path) return settings
def reset(): global bin_paths global config global configs_found global tivos_found bin_paths = {} config = ConfigParser.ConfigParser() configs_found = config.read(config_files) if not configs_found: print ('WARNING: pyTivo.conf does not exist.\n' + 'Assuming default values.') configs_found = config_files[-1:] for section in config.sections(): if section.startswith('_tivo_'): tsn = section[6:] if tsn.upper() not in ['SD', 'HD', '4K']: tivos_found = True tivos[tsn] = Bdict(config.items(section)) for section in ['Server', '_tivo_SD', '_tivo_HD', '_tivo_4K']: if not config.has_section(section): config.add_section(section)
def __setup_advanced_logging() -> None: """Sets up advanced logging over mail and Discord """ if config.getboolean("logging", "enable_mail_logging"): mailcfg = dict(config.items("mail_logging")) mailhost = (mailcfg["mailhost"], mailcfg["mailport"]) toaddrs = mailcfg["toaddrs"].split(",") credentials = (mailcfg["username"], mailcfg["password"]) eh = SMTPHandler(mailhost=mailhost, fromaddr=mailcfg["fromaddr"], toaddrs=toaddrs, subject=mailcfg["subject"], credentials=credentials, secure=(), timeout=config.getint("mail_logging", "timeout")) eh.setFormatter(formatter) eh.setLevel(logging.WARNING) logger.addHandler(eh) if config.getboolean("logging", "enable_discord_logging"): avatar_url = config["discord_logging"]["avatar_url"] avatar_url = avatar_url if avatar_url else None dh = DiscordHandler(config["discord_logging"]["username"], config["discord_logging"]["webhook_url"], avatar_url) dh.setFormatter(formatter) dh.setLevel(logging.WARNING) logger.addHandler(dh)
def load_attrid_config(cfg_path=None): id_map = {} id_with_ip = {} id_with_ratio = {} config = ConfigParser.ConfigParser() config.readfp(skip_leading_wsp(open(cfg_path))) ip_pattern = re.compile(r'^(\d+\.){3}\d+$') num_pattern = re.compile(r'^[\d\.]+$') attr_items = config.items('id_map') for item in attr_items: val = item[1].split(',') id_map[val[0].strip()] = item[0].strip() if len(val) >= 2: match = num_pattern.match(val[1]) # print item[0].strip(),"->",val[0].strip(),"->",val[1].strip() if match: id_with_ratio[item[0].strip()] = float(val[1].strip()) # print "match ratio:",val[1].strip() if len(val) > 2: print item[0].strip(), "->", val[0].strip( ), "->", val[1].strip(), "->", val[2].strip() match = ip_pattern.match(val[2]) if match: # print "match other ip:",val[2].strip() id_with_ip[item[0].strip()] = val[2].strip() return id_map, id_with_ratio, id_with_ip
def load_config(self, configpath, pkg=False): """ Use ConfigParser module to load config sections :param pkg: file is inside the package (configpath is the relative path inside the package) :param configpath: :return: """ if not pkg and not os.path.exists(configpath): LOG.error("Configuration file not found (%s)" % configpath) from errno import ENOENT raise OSError(ENOENT) config = SafeConfigParser(allow_no_value=True) if pkg: with pkgr.resource_stream(__name__, configpath) as conf: config.readfp(conf) else: config.read(configpath) LOG.debug(config) for section in config.sections(): if hasattr(self, section): tmp = format_dict(dict(config.items(section))) getattr(self, section).config.update(tmp) LOG.debug("%s config updated" % section) LOG.debug( "%s.%s : %s" % (self.__class__.__name__, section, getattr(self, section))) else: LOG.warning("Unknow config section %s" % section)
def parse_connector_config_for_cloud_initiated(connector_name, extra_cfg, args): """ Read and init the specific connector by its given `section` identifier """ try: config = ConfigParser() if not os.path.isfile(args.ini): # The config file does not yet exist, so generate it. # generate_ini_file(args) raise ConfigError("Error: unable to open ini file: %r" % args.ini) config.read(args.ini) # That is possible that there will be no content within the .ini file - especially for the cloud installation. # So, init the connector config without the data from ini file try: configuration = config.items(connector_name) except NoSectionError: configuration = [] initialized_connector_configuration = init_connector_from_configuration( connector_name, configuration, args, extra_cfg=extra_cfg) # ensure the instance of the connector class initiated for the cloud based connector has its own copy of OomnitzaConnector and not sharing it # among other threads initialized_connector_configuration[ '__connector__'].OomnitzaConnector = deepcopy( initialized_connector_configuration['__connector__']. OomnitzaConnector) return initialized_connector_configuration except IOError: raise ConfigError("Could not open config file.")
def create_models(self, config): self.models = dict() for section in config.sections(): if section.startswith('table:'): table_schema = config.items(section) table, name = section.split(':', 2) self.init_model(name, table_schema)
def __parseCfgOptions(self): # Is there a configuration available? configDir = self.get('core.config') configFiles = self.__getCfgFiles(os.path.join(configDir, "config.d")) configFiles.insert(0, os.path.join(configDir, "config")) config = ConfigParser.RawConfigParser() filesRead = config.read(configFiles) # Bail out if there's no configuration file if not filesRead: raise ConfigNoFile("No usable configuration file (%s/config) found!" % configDir) # Walk thru core configuration values and push them into the registry for section in config.sections(): if not section in self.__registry: self.__registry[section] = {} self.__registry[section].update(config.items(section)) # Initialize the logging module on the fly try: tmp = StringIO() config.write(tmp) tmp2 = StringIO(tmp.getvalue()) logging.config.fileConfig(tmp2) except ConfigParser.NoSectionError: logging.basicConfig(level=logging.ERROR, format='%(asctime)s (%(levelname)s): %(message)s')
def create_tools(): parser = ArgumentParser() parser.add_argument("-b", "--bot", required=True, help="The name of bot that will be run") parser.add_argument("-d", "--config-dir-path", help="The config dir path") parser.add_argument("-l", "--logging-path", required=True, help="The name of logging configuration file") args = parser.parse_args() logging.config.fileConfig(args.logging_path) full_bot_path = join(args.config_dir_path, args.bot) + ".ini" config = ConfigParser() try: config.read(full_bot_path) token = config.get("main", "token") handler_name = config.get('main', "handler") handler_settings = config.items("handler") handler_settings = dict(handler_settings) except Exception as exc: raise exc msghandler = import_module(handler_name) return (CustomTeleBot(token), msghandler.msghandler.MessageHandler(**handler_settings))
def load_config(cls, config_path=None): """ Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable. """ if config_path: cls.CONFIG_PATH = config_path if cls.CONFIG_PATH: try: if isfile(cls.CONFIG_PATH): with open(cls.CONFIG_PATH) as config_file: config = json.loads(config_file.read()) for key, value in config.items(): if value: setattr(cls, key.upper(), value) else: logger.warning('Config path {} is not a file.'.format( cls.CONFIG_PATH)) except (IOError, OSError, ValueError): logger.warning('Could not read config file.', exc_info=True) cls.SQLALCHEMY_DATABASE_URI = format_db_path(cls.WALKOFF_DB_TYPE, cls.DB_PATH, 'WALKOFF_DB_USERNAME', 'WALKOFF_DB_PASSWORD', cls.WALKOFF_DB_HOST)
def load_event_service_info(): """Loads Event Service information Loads DeliveryRetryAttempts and DeliveryRetryIntervalSeconds from CONFIG file and store it in a global var. Exceptions: OneViewRedfishError: DeliveryRetryAttempts and DeliveryRetryIntervalSeconds must be integers greater than zero. """ config = globals()['config'] event_service = dict(config.items("event_service")) try: delivery_retry_attempts = \ int(event_service["DeliveryRetryAttempts"]) delivery_retry_interval = \ int(event_service["DeliveryRetryIntervalSeconds"]) if delivery_retry_attempts <= 0 or delivery_retry_interval <= 0: raise OneViewRedfishError( "DeliveryRetryAttempts and DeliveryRetryIntervalSeconds must" " be an integer greater than zero.") except ValueError: raise OneViewRedfishError( "DeliveryRetryAttempts and DeliveryRetryIntervalSeconds " "must be valid integers.") globals()['delivery_retry_attempts'] = delivery_retry_attempts globals()['delivery_retry_interval'] = delivery_retry_interval
def add_time_2_log_filename(config): for k, v in config.items(): if k == 'filename': config[k] = v + "." + time.strftime("%Y-%d-%m-%s") print('log file name: %s' % config[k]) elif type(v) is dict: add_time_2_log_filename(v)
def parse_config(self, arguments=None): finalconfig = {} args = self.parser.parse_args(arguments) config = configparser.SafeConfigParser() try: with open(args.config) as fdconfig: config.readfp(fdconfig) except Exception as e: msg = "Ignoring configuration file '%s'" self.logger.warn(msg % (args.config)) for section in self.PARAMETERS.keys(): config.add_section(section) else: self.logger.info("Read configuration file '%s'" % args.config) for section in self.PARAMETERS.keys(): cfgsection = dict(config.items(section)) for var, required in self.PARAMETERS[section].iteritems(): try: # build env variables like IRONIC_URL envparameter = section.upper() + '_' + var.upper() cfgsection[var] = os.environ[envparameter] msg = "Reading env variable '%s'" % envparameter self.logger.debug(msg) except: pass if required and not var in cfgsection: msg = "Variable '%s.%s' not defined and it is required!" msg = msg % (section, var) self.logger.error(msg) raise ValueError(msg) finalconfig[section] = cfgsection self.args = args return finalconfig
def write_config_wpa(config): printer("Creating: {0}".format(config['location'])) group = "={" delim = "=" quoted_fields = ("ssid") with open(config['location'], 'w') as outfile: for key, value in config.items(): if isinstance(value, list): for lon in value: outfile.write('{0}{2}\n'.format(key, delim, group)) for listkey, listval in lon.items(): if listkey in quoted_fields: quotes = '"' else: quotes = '' outfile.write(' {1}{0}{3}{2}{3}\n'.format( delim, listkey, listval, quotes)) outfile.write('}\n') elif not key == 'location': if value: if key in quoted_fields: quotes = '"' else: quotes = '' outfile.write('{1}{0}{3}{2}{3}\n'.format( delim, key, value, quotes)) else: outfile.write('{0}\n'.format(key))
def reset(): global bin_paths global config global configs_found global tivos_found bin_paths = {} config = ConfigParser.ConfigParser() configs_found = config.read(config_files) if not configs_found: print('WARNING: pyTivo.conf does not exist.\n' + 'Assuming default values.') configs_found = config_files[-1:] for section in config.sections(): if section.startswith('_tivo_'): tsn = section[6:] if tsn.upper() not in ['SD', 'HD', '4K']: tivos_found = True tivos[tsn] = Bdict(config.items(section)) for section in ['Server', '_tivo_SD', '_tivo_HD', '_tivo_4K']: if not config.has_section(section): config.add_section(section)
def get_config(config_file=None, server=None): logger.info("Getting config from config file %s" % config_file) if config_file is None: config_file = '/home/minecraft/minecraft/pyredstone.cfg' if not os.path.exists(config_file): raise IOError("Could not open config file") config = ConfigParser.ConfigParser() config.read(config_file) if server is None: try: sections = config.sections() logger.debug(sections) if len(sections) < 1: raise SyntaxError("No sections found in config file") elif len(sections) > 1: logger.warning("More than one server found, no server specified. Using first server.") server = sections[0] except ConfigParser.Error as e: logger.exception("Could not get sections") if not config.has_section(server): raise SyntaxError("Server section '%s' of config file does not exist. Cannot continue." % (server, )) # Now we have a config file and a section. data = {} try: # Take each item in the config file section and dump into a dict for item in config.items(server): data[item[0]] = item[1] logger.info("Config data: %s" % str(data)) except ConfigParser.Error as e: raise SyntaxError("Config file is improperly formated") return data
def init_logger(): """Load logging config file and setup logging. Only needs to be called once per process.""" global log if log: log.warning("logging already initialized") config = rhsm.config.initConfig() default_log_level = config.get('logging', 'default_log_level') for root_namespace in ROOT_NAMESPACES: logger = logging.getLogger(root_namespace) logger.addHandler(_get_default_rhsm_log_handler()) logger.addHandler(_get_default_subman_debug_handler()) logger.setLevel(getattr(logging, default_log_level.strip())) for logger_name, logging_level in config.items('logging'): logger_name = logger_name.strip() if logger_name.split('.')[0] not in ROOT_NAMESPACES: # Don't allow our logging configuration to mess with loggers # outside the namespaces we claim as ours # Also ignore other more general configuration options like # default_log_level continue logger = logging.getLogger(logger_name) logger.setLevel(getattr(logging, logging_level.strip())) if not log: log = logging.getLogger(__name__)
def parse_cfg(cfg_file): with open(cfg_file) as cfg: test_parameters = dict() data_args = dict() descriptors = [] flann_args = dict() config = ConfigParser.RawConfigParser() config.readfp(cfg) sections = config.sections() for section in sections: if section == 'Test': for name, value in config.items(section): if name == 'batch_size': if value == 'None': test_parameters[name] = None else: test_parameters[name] = int(value) else: test_parameters[name] = value elif section == 'Dataset': for name, value in config.items(section): if name == 'classes': data_args[name] = value.split(',') elif name == 'trainsize' or name == 'testsize' or \ name == 'no_classes': data_args[name] = int(value) else: data_args[name] = value elif section == 'Flann': for name, value in config.items(section): if name in ['k','checks','trees']: flann_args[name] = int(value) else: flann_args[name] = value else: d = [section, dict()] for name, value in config.items(section): if name=='alpha': d[1][name] = float(value) elif name=='verbose': d[1][name] = value=='True' else: d[1][name] = value descriptors.append(d) return test_parameters, data_args, descriptors, flann_args
def load_settings(path): here = os.path.abspath(os.path.dirname(path)) config = compat.ConfigParser(defaults={'here': here}) config.read(path) settings = dict(config.items('app:main')) logging.config.fileConfig(path) settings = load_doc_sets(settings) return settings
def __init__(self, config, section): values = dict(_parse_options(k, v) for k, v in config.items(section)) for key, value in values.items(): setattr(self, key, value) self.as_dictionary = values
def create_models(self, config): self.models = dict() self.main_model = None for section in config.sections(): if section.startswith('table:'): table_schema = config.items(section) table, name = section.split(':', 2) index_section = 'index:' + name index_schema = [] if config.has_section(index_section): index_schema = config.items(index_section) self.init_model(name, table_schema, index_schema) self.sorted_models = sorted(self.models.values(), key=lambda m: len(m._meta.table_options.get('__iter__', []))) if self.client_config['resume'] and not self.main_model: raise ValueError('Main model is required for resume mode')
def updateConfigMap(configInFile, config): for k, v in config.items(): if not configInFile.has_key(k): continue if type(v) in [types.IntType, types.TupleType, types.ListType] + list(types.StringTypes): config[k] = configInFile[k] else: v.update(configInFile[k])
def __init__(self, config, args): self.client_config.update(config.items('client')) self.server_config.update(config.items('server')) # update config from args self.update_config(args) # create client api_key = self.client_config.pop('key') logger.info("Create client %s", self.client_config) self.client = MyApiClient(api_key, self.client_config) # create database connection passwd = self.server_config.pop('passwd') logger.info("Connect server %s", self.server_config) if passwd: self.server_config['passwd'] = passwd db_class = peewee.__dict__.get(self.server_config.pop('class')) self.db_name = self.server_config.pop('db') self.db_table = self.server_config.pop('db_table') self.database = db_class(self.db_name, **self.server_config) # create model class self.create_models(config)
def main(): """Main()""" args = parse_args() config = ConfigParser() config.read(args.config_file) setup_logging(config, args.config_item, args.process_num) logger = logging.getLogger("trollduction") # Create a new Trollduction instance, initialised with the config cfg = dict(config.items(args.config_item)) cfg["config_item"] = args.config_item cfg["config_file"] = args.config_file cfg["process_num"] = args.process_num if "timezone" in cfg: print "Setting timezone to %s" % cfg["timezone"] os.environ["TZ"] = cfg["timezone"] time.tzset() else: print "No timezone given, defaulting to UTC timezone." os.environ["TZ"] = "UTC" time.tzset() if "template" in cfg["product_config_file"]: print "Template file given as trollstalker product config, " \ "aborting!" sys.exit() trd = Trollduction(cfg) def shutdown(*args): logger.info("l2processor shutting down") del args trd.shutdown() logging.shutdown() signal.signal(signal.SIGTERM, shutdown) # Run Trollduction try: trd.run_single() except KeyboardInterrupt: logging.shutdown() except: logger.exception("Trollduction died!") trd.shutdown() os._exit(os.EX_SOFTWARE) print "Thank you for using pytroll/l2processor!" \ "See you soon on pytroll.org."
def get_config(section): logging_conf() config = ConfigParser.ConfigParser() cur_dir = os.path.dirname(os.path.realpath(__file__)) service_conf = os.path.join(cur_dir, './conf/config.ini') try: config.read(service_conf) conf_items = dict(config.items(section)) except Exception as e: logging.error('load config file error, %s', e) conf_items = {} return conf_items
def _parse_routes(config): for route, groups in config.items('routes'): assert route.startswith('/'), """ Route must start with a '/' """ groups = groups.strip().decode('utf-8') if groups == '*': yield route, groups else: yield route, [g.strip() for g in groups.split(',')]
def main(): import ConfigParser config=ConfigParser.SafeConfigParser() config.read(["config.ini", "config_custom.ini"]) configure_mail(config) templateFolder="downloads" if not os.path.exists(templateFolder): os.mkdir(templateFolder) for key, value in config.items("urls"): url=value template = os.path.join(templateFolder, "%s-template.html"%key) process(url, template)
def run_app(config, descending=False): setup_watchdog(config.get('general', 'watchdog')) client_config = config.items('client') mysql_config = config.items('mysql') if descending: logger.info("Start in descending mode") client_config.append(('descending', 1)) app = ComplaintsToMySQL(client_config, mysql_config) app.watchdog = Watchdog try: app.run() except (KeyboardInterrupt, SystemExit): sys.exit(2) except Exception as e: logger.exception("%s %s", type(e).__name__, str(e)) Watchdog.counter = Watchdog.timeout - 1 sys.exit(1) logger.info("Leave worker") return 0
def _load_config_file(self): """Load the configuration file into memory, returning the content. """ LOGGER.info('Loading configuration from %s', self._file_path) if self._file_path.endswith('json'): config = self._load_json_config() else: config = self._load_yaml_config() for key, value in [(k, v) for k, v in config.items()]: if key.title() != key: config[key.title()] = value del config[key] return flatdict.FlatDict(config)
def init_config(overrides=None): if overrides == '-': overrides = None overrides = overrides or os.environ.get('BOB_CONF', None) if isinstance(overrides, dict): return overrides if overrides: logger.info('loading config overrides from "%s"', overrides) if not os.path.exists(overrides): raise Exception('Config %s not found' % overrides) config = ConfigParser.RawConfigParser() config.read(overrides) return dict(config.items('bobb:main')) return {}