def __init__(self, cfg, transformer_manager): self.cfg = cfg try: self.name = cfg['name'] # It's legal to have no transformer specified self.transformer_cfg = cfg.get('transformers') or [] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher.get_publisher(p, self.NAMESPACE)) except Exception: LOG.exception(_("Unable to load publisher %s"), p) self.multi_publish = True if len(self.publishers) > 1 else False self.transformers = self._setup_transformers(cfg, transformer_manager)
def __init__(self, cfg, transformer_manager): self.cfg = cfg try: self.name = cfg['name'] # It's legal to have no transformer specified self.transformer_cfg = cfg.get('transformers') or [] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher.get_publisher(p, self.NAMESPACE)) except Exception: LOG.exception(_("Unable to load publisher %s"), p) self.transformers = self._setup_transformers(cfg, transformer_manager)
def __init__(self, cfg): try: super(PollingSource, self).__init__(cfg) except agent.SourceException as err: raise PollingException(err.msg, cfg) try: self.meters = cfg['meters'] except KeyError: raise PollingException("Missing meters value", cfg) try: self.interval = int(cfg['interval']) except ValueError: raise PollingException("Invalid interval value", cfg) except KeyError: raise PollingException("Missing interval value", cfg) if self.interval <= 0: raise PollingException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PollingException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PollingException("Discovery should be a list", cfg) try: self.check_source_filtering(self.meters, 'meters') except agent.SourceException as err: raise PollingException(err.msg, cfg)
def readConfig(self): LOG.info("broadview_collector: readConfig") try: cfg = ConfigParser.ConfigParser() cfg.read("/etc/broadviewcollector.conf") x = cfg.get("plugins", "publishers") self._publisherNames = [y.strip() for y in x.split(',')] LOG.info("publishers {}".format(self._publisherNames)) self._searchpath = [] try: x = cfg.get("plugins", "searchpath") self._searchpath = [y.strip() for y in x.split(',')] except: LOG.info("plugin searchpath missing or malformed") if not self._searchpath or len(self._searchpath) == 0: self._searchpath = ["broadview_collector.plugins"] else: self._searchpath.append("broadview_collector.plugins") LOG.info("plugin searchpath {}".format(self._searchpath)) x = cfg.get("plugins", "handlers") self._handlerNames = [y.strip() for y in x.split(',')] LOG.info("plugin handlers {}".format(self._handlerNames)) self._ip_address = cfg.get("network", "ip_address") self._port = int(cfg.get("network", "port")) except: LOG.error("Unable to open or read /etc/broadviewcollector.conf") exit()
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module import ConfigParser from oslo_config import cfg import logging global loaded, PATRON_VENDOR, PATRON_PRODUCT, PATRON_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = ConfigParser.RawConfigParser() cfg.read(cfgfile) if cfg.has_option("Patron", "vendor"): PATRON_VENDOR = cfg.get("Patron", "vendor") if cfg.has_option("Patron", "product"): PATRON_PRODUCT = cfg.get("Patron", "product") if cfg.has_option("Patron", "package"): PATRON_PACKAGE = cfg.get("Patron", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error(_LE("Failed to load %(cfgfile)s: %(ex)s"), {"cfgfile": cfgfile, "ex": ex})
def _load_config(self, cfg): # Try to load configuration file try: f = open(cfg['config_path'] or '/etc/vitastor/vitastor.conf') conf = json.loads(f.read()) f.close() for k in conf: cfg[k] = cfg.get(k, conf[k]) except: pass if isinstance(cfg['etcd_address'], str): cfg['etcd_address'] = cfg['etcd_address'].split(',') # Sanitize etcd URLs for i, etcd_url in enumerate(cfg['etcd_address']): ssl = False if etcd_url.lower().startswith('http://'): etcd_url = etcd_url[7:] elif etcd_url.lower().startswith('https://'): etcd_url = etcd_url[8:] ssl = True if etcd_url.find('/') < 0: etcd_url += '/v3' if ssl: etcd_url = 'https://' + etcd_url else: etcd_url = 'http://' + etcd_url cfg['etcd_address'][i] = etcd_url return cfg
def __init__(self, conf, cfg, publisher_manager): self.conf = conf self.cfg = cfg try: self.name = cfg['name'] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher_manager.get(p)) except Exception: LOG.error("Unable to load publisher %s", p, exc_info=True) self.multi_publish = True if len(self.publishers) > 1 else False
def __init__(self, cfg): self.cfg = cfg try: self.name = cfg["name"] self.sinks = cfg.get("sinks") except KeyError as err: raise PipelineException("Required field %s not specified" % err.args[0], cfg)
def __init__(self, cfg): self.cfg = cfg try: self.name = cfg['name'] self.sinks = cfg.get('sinks') except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg)
def __init__(self, cfg): super(SampleSource, self).__init__(cfg) # Support 'counters' for backward compatibility self.meters = cfg.get('meters', cfg.get('counters')) try: self.interval = int(cfg.get('interval', 600)) except ValueError: raise PipelineException("Invalid interval value", cfg) if self.interval <= 0: raise PipelineException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PipelineException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PipelineException("Discovery should be a list", cfg) self.check_source_filtering(self.meters, 'meters')
def __init__(self, cfg): super(SampleSource, self).__init__(cfg) try: self.meters = cfg['meters'] except KeyError: raise PipelineException("Missing meters value", cfg) try: self.interval = int(cfg.get('interval', 600)) except ValueError: raise PipelineException("Invalid interval value", cfg) if self.interval <= 0: raise PipelineException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PipelineException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PipelineException("Discovery should be a list", cfg) self.check_source_filtering(self.meters, 'meters')
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module import configparser from oslo_config import cfg from oslo_log import log as logging global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE, NOVA_SUPPORT if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = configparser.RawConfigParser() cfg.read(cfgfile) if cfg.has_option("Nova", "vendor"): NOVA_VENDOR = cfg.get("Nova", "vendor") if cfg.has_option("Nova", "product"): NOVA_PRODUCT = cfg.get("Nova", "product") if cfg.has_option("Nova", "package"): NOVA_PACKAGE = cfg.get("Nova", "package") if cfg.has_option("Nova", "support"): NOVA_SUPPORT = cfg.get("Nova", "support") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error("Failed to load %(cfgfile)s: %(ex)s", {'cfgfile': cfgfile, 'ex': ex})
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module from six.moves import configparser from oslo_config import cfg from oslo_log import log as logging global loaded, MASAKARI_VENDOR, MASAKARI_PRODUCT, MASAKARI_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = configparser.RawConfigParser() cfg.read(cfgfile) if cfg.has_option("Masakari", "vendor"): MASAKARI_VENDOR = cfg.get("Masakari", "vendor") if cfg.has_option("Masakari", "product"): MASAKARI_PRODUCT = cfg.get("Masakari", "product") if cfg.has_option("Masakari", "package"): MASAKARI_PACKAGE = cfg.get("Masakari", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error("Failed to load %(cfgfile)s: %(ex)s", {'cfgfile': cfgfile, 'ex': ex})
def __init__(self, cfg): super(SampleSource, self).__init__(cfg) try: try: self.interval = int(cfg['interval']) except ValueError: raise PipelineException("Invalid interval value", cfg) # Support 'counters' for backward compatibility self.meters = cfg.get('meters', cfg.get('counters')) self.sinks = cfg.get('sinks') except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if self.interval <= 0: raise PipelineException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PipelineException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PipelineException("Discovery should be a list", cfg) self.check_source_filtering(self.meters, 'meters')
def __init__(self, conf, cfg_file): """Setup the polling according to config. The configuration is supported as follows: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], }, ]} } The interval determines the cadence of sample polling Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. """ super(PollingManager, self).__init__(conf) try: cfg = self.load_config(cfg_file) except (TypeError, IOError): LOG.warning( 'Using the pipeline configuration for polling ' 'is deprecated. %s should ' 'be used instead.', cfg_file) cfg = self.load_config(conf.pipeline_cfg_file) self.sources = [] if 'sources' not in cfg: raise PollingException("sources required", cfg) for s in cfg.get('sources'): self.sources.append(PollingSource(s))
def __init__(self, conf, cfg_file): """Setup the polling according to config. The configuration is supported as follows: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], }, ]} } The interval determines the cadence of sample polling Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. """ super(PollingManager, self).__init__(conf) try: cfg = self.load_config(cfg_file) except (TypeError, IOError): LOG.warning(_LW('Unable to locate polling configuration, falling ' 'back to pipeline configuration.')) cfg = self.load_config(conf.pipeline_cfg_file) self.sources = [] if 'sources' not in cfg: raise PollingException("sources required", cfg) for s in cfg.get('sources'): self.sources.append(PollingSource(s))
def _check_rsync(self): rsync_config_path = "/etc/rsyncd.conf" rsync_ironic_section_name = 'ironic_rsync' if not utils._pid_of('rsync'): raise exception.RsyncProcessNotFound() if os.path.exists(rsync_config_path): cfg = utils.read_config(rsync_config_path) else: raise exception.RsyncConfigNotFound(path=rsync_config_path) if rsync_ironic_section_name in cfg.sections(): self.rsync_dir = cfg.get(rsync_ironic_section_name, 'path') else: raise exception.RsyncIronicSectionNotFound( section=rsync_ironic_section_name )
def __init__(self, cfg): """Setup the polling according to config. The configuration is the sources half of the Pipeline Config. """ self.sources = [] if not ("sources" in cfg and "sinks" in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI("detected decoupled pipeline config format")) unique_names = set() for s in cfg.get("sources", []): name = s.get("name") if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) self.sources.append(SampleSource(s)) unique_names.clear()
def __init__(self, cfg): """Setup the polling according to config. The configuration is the sources half of the Pipeline Config. """ self.sources = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_('detected decoupled pipeline config format')) unique_names = set() for s in cfg.get('sources', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) self.sources.append(SampleSource(s)) unique_names.clear()
def __init__(self, conf, cfg_info): """Setup the polling according to config. The configuration is the sources half of the Pipeline Config. """ super(PollingManager, self).__init__(conf) cfg = self.load_config(cfg_info) self.sources = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI('detected decoupled pipeline config format')) unique_names = set() for s in cfg.get('sources'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) self.sources.append(SampleSource(s)) unique_names.clear()
def __init__(self, conf, cfg_file): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as meter handling, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "meters" : ["meter_1", "meter_2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. Publisher's name is plugin name in setup.cfg """ super(PipelineManager, self).__init__(conf) cfg = self.load_config(cfg_file) self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) publisher_manager = PublisherManager(self.conf, self.pm_type) unique_names = set() sources = [] for s in cfg.get('sources'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(self.pm_source(s)) unique_names.clear() sinks = {} for s in cfg.get('sinks'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = self.pm_sink(self.conf, s, publisher_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = self.pm_pipeline(self.conf, source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear()
def __init__(self, conf, cfg_file, transformer_manager, p_type=SAMPLE_TYPE): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as meter handling, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "meters" : ["meter_1", "meter_2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "transformers": [ {"name": "Transformer_1", "parameters": {"p1": "value"}}, {"name": "Transformer_2", "parameters": {"p1": "value"}}, ], "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. Transformer's name is plugin name in setup.cfg. Publisher's name is plugin name in setup.cfg """ super(PipelineManager, self).__init__(conf) cfg = self.load_config(cfg_file) self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI('detected decoupled pipeline config format')) publisher_manager = PublisherManager(self.conf, p_type['name']) unique_names = set() sources = [] for s in cfg.get('sources'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(p_type['source'](s)) unique_names.clear() sinks = {} for s in cfg.get('sinks'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = p_type['sink'](self.conf, s, transformer_manager, publisher_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = p_type['pipeline'](self.conf, source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear()
def __init__(self, cfg): super(EventSource, self).__init__(cfg) self.events = cfg.get('events') self.check_source_filtering(self.events, 'events')
def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as resource discovery, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "transformers": [ {"name": "Transformer_1", "parameters": {"p1": "value"}}, {"name": "Transformer_2", "parameters": {"p1": "value"}}, ], "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } The interval determines the cadence of sample injection into the pipeline where samples are produced under the direct control of an agent, i.e. via a polling cycle as opposed to incoming notifications. Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. The 'meter_name" is Sample name field. For meter names with variable like "instance:m1.tiny", it's "instance:*". Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. Transformer's name is plugin name in setup.cfg. Publisher's name is plugin name in setup.cfg """ self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_('detected decoupled pipeline config format')) unique_names = set() sources = [] for s in cfg.get('sources', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(p_type['source'](s)) unique_names.clear() sinks = {} for s in cfg.get('sinks', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = p_type['sink'](s, transformer_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = p_type['pipeline'](source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear()