Beispiel #1
0
 def add_write_callback(self, callback, **kwargs):
     """
     Register a write callback with collectd. kwargs will be passed to
     collectd.register_read. The callback will be called by collectd
     with a collectd.Values object as the only argument.
     """
     collectd.register_write(callback)
def aws_init():
    """
    Collectd callback entry used to initialize plugin
    """
    config = ConfigHelper()
    flusher = Flusher(config_helper=config,  dataset_resolver=get_dataset_resolver())
    collectd.register_write(aws_write, data = flusher)
    _LOGGER.info('Initialization finished successfully.')
def redis_queue_init():
    import threading

    d = {"host": REDIS_HOST, "port": REDIS_PORT, "queue": REDIS_QUEUE, "hotqueue": None, "lock": threading.Lock()}

    create_queue(d)

    collectd.register_write(redis_write, data=d)
Beispiel #4
0
def maasi_init():
  d = {
    'server_uri': server_uri,
    'interval': interval
    }
  logging.info('initiating maasi daemon')
  collectd.register_write(maasi_collect, data=d)
  collectd.register_read(maasi_send)
def handle_init():
    global types
    types = write_common.parse_types_db(config['TypesDB'], plugin_name)

    # If we made it this far, go ahead and register the write plugin
    collectd.register_write(handle_write)
    collectd.info(
        '%s: Initialized and registered write handler.' % plugin_name)
Beispiel #6
0
    def add_write_callback(self, callback, **kwargs):
        """Register a write callback with collectd.

        kwargs will be passed to collectd.register_read.
        The callback will be called by collectd with a collectd.
        Values object as the only argument.
        """

        collectd.register_write(callback)
Beispiel #7
0
def aws_init():
    """
    Collectd callback entry used to initialize plugin
    """
    try:
        config = ConfigHelper()
        flusher = Flusher(config_helper=config,  dataset_resolver=get_dataset_resolver())
        collectd.register_write(aws_write, data = flusher)
        _LOGGER.info('Initialization finished successfully.')
    except Exception as e:
        _LOGGER.error("Cannot initialize plugin. Cause: " + str(e) + "\n" + traceback.format_exc())
def influxdb_init():
    import threading

    try:
        influxdb_parse_types_file(config["types_db"])
    except:
        msg = "%s: ERROR: Unable to open TypesDB file: %s." % (plugin_name, config["types_db"])
        raise Exception(msg)

    d = {"lock": threading.Lock(), "last_flush_time": get_time(), "series": {}}

    collectd.register_write(influxdb_write, data=d)
def handle_init():
    global types
    types = write_common.parse_types_db(config['TypesDB'], plugin_name)

    # global connection
    # connection = Connection("%s/%s@%s:%s" % (
    #     config['User'], config['Password'], config['Host'], config['Port']))

    # If we made it this far, go ahead and register the write plugin
    collectd.register_write(handle_write)
    collectd.info('%s: Initialized and registered write handler.' %
                  plugin_name)
Beispiel #10
0
def initialize(data=None):
    """
    Create the statsd client object that will be used by the statsd_write
    function to send stats to statsd.

    This object will be shared between collectd threads, but because we are
    not using pipelines, the statsd object is thread safe.
    """
    data['stats'] = statsd.StatsClient(host=data['conf']['host'],
                                       port=int(data['conf']['port']),
                                       prefix=data['conf']['prefix'])
    collectd.register_write(statsd_write, data=data)
def plugin_config(conf):
    """
    :param conf:
      https://collectd.org/documentation/manpages/collectd-python.5.shtml#config

    Parse the config object for config parameters:
      ProcessInfo: true or false, whether or not to collect process
        information. Default is true.
      Notifications: true or false, whether or not to emit notifications
      if Notifications is true:
        URL: where to POST the notifications to
        Token: what auth to send along
        Timeout: timeout for the POST
        NotifyLevel: what is the lowest level of notification to emit.
          Default is to only emit notifications generated by this plugin
    """

    DOGSTATSD_INSTANCE.config.configure_callback(conf)

    for kv in conf.children:
        if kv.key == 'Notifications':
            if kv.values[0]:
                log("sending collectd notifications")
                collectd.register_notification(receive_notifications)
        elif kv.key == 'ProcessInfo':
            global PROCESS_INFO
            PROCESS_INFO = kv.values[0]
        elif kv.key == 'DPM':
            global DPM
            DPM = kv.values[0]
            if DPM:
                collectd.register_write(receive_datapoint)
        elif kv.key == 'URL':
            global POST_URL
            POST_URL = kv.values[0]
        elif kv.key == 'Token':
            global API_TOKEN
            API_TOKEN = kv.values[0]
        elif kv.key == 'Timeout':
            global TIMEOUT
            TIMEOUT = int(kv.values[0])
        elif kv.key == 'Interval':
            global INTERVAL
            INTERVAL = int(kv.values[0])
        elif kv.key == 'NotifyLevel':
            global NOTIFY_LEVEL
            if string.lower(kv.values[0]) == "okay":
                NOTIFY_LEVEL = 4
            elif string.lower(kv.values[0]) == "warning":
                NOTIFY_LEVEL = 2
            elif string.lower(kv.values[0]) == "failure":
                NOTIFY_LEVEL = 1
def stomp_config(c):
    global config

    for child in c.children:
        if child.key == 'host':
            config['host'] = child.values[0]
        elif child.key == 'port':
            config['port'] = int(child.values[0])
        elif child.key == 'destination':
            config['destination'] = child.values[0]

    # register writer
    collectd.register_write(stomp_write)
Beispiel #13
0
def initialize(data=None):
    """
    Create the statsd client object that will be used by the statsd_write
    function to send stats to statsd.

    This object will be shared between collectd threads, but because we are
    not using pipelines, the statsd object is thread safe.
    """
    data['stats'] = statsd.StatsClient(
        host=data['conf']['host'],
        port=int(data['conf']['port']),
        prefix=data['conf']['prefix']
    )
    collectd.register_write(statsd_write, data=data)
    def kairosdb_init(self):
        # Param validation has to happen here, exceptions thrown in kairosdb_config
        # do not prevent the plugin from loading.
        if not self.uri:
            raise Exception('KairosDBURI not defined')

        if not self.tags_map and not self.add_host_tag:
            raise Exception('Tags not defined')

        split = self.uri.strip('/').split(':')
        # collectd.info(repr(split))
        if len(split) != 3 and len(split) != 2:
            raise Exception(
                'KairosDBURI must be in the format of <protocol>://<host>[:<port>]'
            )

        # validate protocol and set default ports
        self.protocol = split[0]
        if self.protocol == 'http':
            port = 80
        elif self.protocol == 'https':
            port = 443
        elif self.protocol == 'telnet':
            port = 4242
        else:
            raise Exception(
                'Invalid protocol specified. Must be either "http", "https" or "telnet"'
            )

        host = split[1].strip('/')

        if len(split) == 3:
            port = int(split[2])

        collectd.info('Initializing kairosdb_writer client in %s mode.' %
                      self.protocol.upper())

        d = {
            'host': host,
            'port': port,
            'lowercase_metric_names': self.lowercase_metric_names,
            'conn': None,
            'lock': threading.Lock(),
            'values': {},
            'last_connect_time': 0
        }

        self.kairosdb_connect(d)

        collectd.register_write(self.kairosdb_write, data=d)
def kairosdb_init():
    import threading
    global uri, tags_map, add_host_tag, protocol

    #Param validation has to happen here, exceptions thrown in kairosdb_config 
    #do not prevent the plugin from loading.
    if not uri:
        raise Exception('KairosDBURI not defined')

    if not tags_map and not add_host_tag :
        raise Exception('Tags not defined')
        
    split = uri.strip('/').split(':')
    #collectd.info(repr(split))
    if len(split) != 3 and len(split) != 2:
        raise Exception('KairosDBURI must be in the format of <protocol>://<host>[:<port>]')
    
    #validate protocol and set default ports
    protocol = split[0]
    if protocol == 'http':
        port = 80
    elif protocol == 'https':
        port = 443
    elif protocol == 'telnet':
        port = 4242
    else:
        raise Exception('Invalid protocol specified. Must be either "http", "https" or "telnet"')
    
    host = split[1].strip('/')
    
    if (len(split) == 3):
        port = int(split[2])

    
        
    collectd.info('Initializing kairosdb_writer client in %s mode.' % protocol.upper())

    d = {
        'host': host,
        'port': port,
        'lowercase_metric_names': lowercase_metric_names,
        'conn': None,
        'lock': threading.Lock(),
        'values': {},
        'last_connect_time': 0
    }

    kairosdb_connect(d)

    collectd.register_write(kairosdb_write, data=d)
def redis_queue_init():
    import threading

    d = {
        'host': REDIS_HOST,
        'port': REDIS_PORT,
        'queue': REDIS_QUEUE,
        'hotqueue': None,
        'lock': threading.Lock(),
    }

    create_queue(d)

    collectd.register_write(redis_write, data=d)
Beispiel #17
0
def oci_init():
    """collectd initialization callback method"""
    global _configuration, _data_reporter
    logger = None
    try:
        while not _configuration:
            time.sleep(1)
        data_processor = DataProcessor(configuration=_configuration)
        collectd.register_write(oci_write, data=data_processor)
        _data_reporter = DataReporter(_configuration, data_processor)
        logger = oci_logger.get_logger(__name__, _configuration.logging_level)
        logger.info('Initialization completed successfully.')
    except Exception as e:
        logger.error("Cannot initialize plugin. Cause: " + str(e) + "\n" +
                     traceback.format_exc())
Beispiel #18
0
    def register(cls):
        assert collectd is not None

        LOG.info("Register plugin: %s", cls)

        log_handler = CollectdLogHandler(collectd=collectd)
        log_handler.setFormatter(logging.Formatter(LOG_FORMAT))
        logging.getLogger('collectd_pandas').addHandler(log_handler)

        instance = cls()
        collectd.register_config(instance.configure)
        collectd.register_init(instance.initialize)
        collectd.register_write(instance.write)
        LOG.info("Plugin registered as: %r.", instance)
        return instance
def kairosdb_init():
    import threading

    d = {
        'host': host,
        'port': port,
        'lowercase_metric_names': lowercase_metric_names,
        'sock': None,
        'lock': threading.Lock(),
        'values': { },
        'last_connect_time': 0
    }

    kairosdb_connect(d)

    collectd.register_write(kairosdb_write, data=d)
Beispiel #20
0
def kairosdb_init():
    import threading

    d = {
        'host': host,
        'port': port,
        'lowercase_metric_names': lowercase_metric_names,
        'sock': None,
        'lock': threading.Lock(),
        'values': {},
        'last_connect_time': 0
    }

    kairosdb_connect(d)

    collectd.register_write(kairosdb_write, data=d)
def carbon_init():
    import threading

    d = {
        'host': host,
        'port': port,
        'differentiate_values': differentiate_values,
        'differentiate_values_over_time': differentiate_values_over_time,
        'sock': None,
        'lock': threading.Lock(),
        'values': { },
        'last_connect_time': 0
    }

    carbon_connect(d)

    collectd.register_write(carbon_write, data=d)
Beispiel #22
0
def carbon_init():
    global host, port, derive
    import threading

    d = {
        'host': host,
        'port': port,
        'derive': derive,
        'sock': None,
        'lock': threading.Lock(),
        'values': { },
        'last_connect_time': 0
    }

    carbon_connect(d)

    collectd.register_write(carbon_write, data=d)
Beispiel #23
0
	def __init__(self,typeinfo):
		self.nameserver="unknown"
		self.cluster="none"
		self.ns=None
		self.ip="0.0.0.0"
		self.publishTimeout=600
		self.q = multiprocessing.Queue()
		self.qthread = None
		self.typesdb = "/usr/share/collectd/types.db"
		self.types = {}
		self.typeinfo = typeinfo
		self.cachedValues = {}
	
		collectd.register_config(self.config)
		collectd.register_init(self.init)
		collectd.register_write(self.write)
		collectd.register_shutdown(self.shutdown)
def carbon_init():
    global host, port, derive
    import threading

    d = {
        'host': host,
        'port': port,
        'derive': derive,
        'sock': None,
        'lock': threading.Lock(),
        'values': {},
        'last_connect_time': 0
    }

    carbon_connect(d)

    collectd.register_write(carbon_write, data=d)
def init():
    import threading
    data = {
        'lock': threading.Lock(),
        'conf': cfg,
        'last_flush_time': time.time(),
        'types': parse_types_file(cfg.get('TypesDB', '/usr/share/collectd/types.db')),
        'metrics': {}
    }

    for key in ('URL', 'Tenant'):
        if key not in cfg:
            collectd.error('{}: No {} key is present in config file'.format(plugin_name, key))
            return

    # can't register write earlier cause of threading import constraints
    collectd.register_write(write, data)
Beispiel #26
0
def register_plugin(collectd):
    """Bind plugin hooks to collectd and viceversa."""
    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd)
    log_handler.cfg = config
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.NOTSET)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_config(instance.config)
    collectd.register_shutdown(instance.shutdown)
    collectd.register_write(instance.write)
Beispiel #27
0
    def __init__(self, typeinfo):
        self.nameserver = "unknown"
        self.cluster = "none"
        self.ns = None
        self.ip = "0.0.0.0"
        self.publishTimeout = 600
        self.q = multiprocessing.Queue()
        self.qthread = None
        self.typesdb = "/usr/share/collectd/types.db"
        self.types = {}
        self.typeinfo = typeinfo
        self.cachedValues = {}

        collectd.register_config(self.config)
        collectd.register_init(self.init)
        collectd.register_write(self.write)
        collectd.register_shutdown(self.shutdown)
Beispiel #28
0
def librato_init():
    import threading

    try:
        librato_parse_types_file(config['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (plugin_name, config['types_db'])
        raise Exception(msg)

    d = {
        'lock': threading.Lock(),
        'last_flush_time': get_time(),
        'gauges': [],
        'counters': []
    }

    collectd.register_write(librato_write, data=d)
def register_plugin(collectd):
    "Bind plugin hooks to collectd and viceversa"

    config = Config.instance()

    # Setup loggging
    log_handler = CollectdLogHandler(collectd=collectd)
    log_handler.cfg = config
    ROOT_LOGGER.addHandler(log_handler)
    ROOT_LOGGER.setLevel(logging.NOTSET)

    # Creates collectd plugin instance
    instance = Plugin(collectd=collectd, config=config)

    # Register plugin callbacks
    collectd.register_config(instance.config)
    collectd.register_write(instance.write)
    collectd.register_shutdown(instance.shutdown)
Beispiel #30
0
def sensu_init():
    """Prepare to send data to Sensu"""
    import threading

    try:
        sensu_parse_types_file(CONFIG['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (PLUGIN_NAME, CONFIG['types_db'])
        raise Exception(msg)

    data_init = {
        'lock': threading.Lock(),
        'last_flush_time': get_time(),
        'output': [],
    }

    collectd.register_write(sensu_write, data=data_init)
 def config_cb(self, config, data=None):
     self.config = util.map_collectd_config(config)
     if "Module.config" in self.config:
         self._log("config_cb: {!r}".format(self.config))
     if "Module.init" in self.config:
         collectd.register_init(util.init_closure(self), name=self.__module__)
     if "Module.read" in self.config:
         collectd.register_read(util.read_closure(self), name=self.__module__)
     if "Module.write" in self.config:
         collectd.register_write(util.write_closure(self), name=self.__module__)
     if "Module.notification" in self.config:
         collectd.register_notification(util.notification_closure(self), name=self.__module__)
     if "Module.flush" in self.config:
         collectd.register_flush(util.flush_closure(self), name=self.__module__)
     if "Module.log" in self.config:
         collectd.register_log(util.log_closure(self), name=self.__module__)
     if "Module.shutdown" in self.config:
         collectd.register_shutdown(util.shutdown_closure(self), name=self.__module__)
def carbon_init():
    import threading

    d = {
        "host": host,
        "port": port,
        "differentiate_values": differentiate_values,
        "differentiate_values_over_time": differentiate_values_over_time,
        "lowercase_metric_names": lowercase_metric_names,
        "sock": None,
        "lock": threading.Lock(),
        "values": {},
        "last_connect_time": 0,
    }

    carbon_connect(d)

    collectd.register_write(carbon_write, data=d)
def librato_init():
    import threading

    try:
        librato_parse_types_file(config['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (plugin_name, config['types_db'])
        raise Exception(msg)

    d = {
        'lock' : threading.Lock(),
        'last_flush_time' : get_time(),
        'gauges' : [],
        'counters' : []
        }

    collectd.register_write(librato_write, data = d)
Beispiel #34
0
def sensu_init():
    """Prepare to send data to Sensu"""
    import threading

    try:
        sensu_parse_types_file(CONFIG['types_db'])
    except:
        msg = '%s: ERROR: Unable to open TypesDB file: %s.' % \
              (PLUGIN_NAME, CONFIG['types_db'])
        raise Exception(msg)

    data_init = {
        'lock' : threading.Lock(),
        'last_flush_time' : get_time(),
        'output' : [],
        }

    collectd.register_write(sensu_write, data = data_init)
Beispiel #35
0
def carbon_init():
    import threading

    d = {
        'host': host,
        'port': port,
        'differentiate_values': differentiate_values,
        'differentiate_values_over_time': differentiate_values_over_time,
        'lowercase_metric_names': lowercase_metric_names,
        'sock': None,
        'lock': threading.Lock(),
        'values': { },
        'last_connect_time': 0
    }

    carbon_connect(d)

    collectd.register_write(carbon_write, data=d)
Beispiel #36
0
    def init(self):
        auth_mode = self.conf.get('auth_mode', 'basic').lower()
        if auth_mode == 'keystone':
            auth_url = self.conf.get("auth_url", self.conf.get("authurl"))
            if auth_url is None:
                raise RuntimeError(
                    "Please specify `auth_url` for Keystone auth_mode")

            kwargs = {}

            for arg in ("auth_url", "username", "user_id", "project_id",
                        "project_name", "tenant_id", "tenant_name", "password",
                        "user_domain_id", "user_domain_name",
                        "project_domain_id", "project_domain_name"):
                if arg in self.conf:
                    kwargs[arg] = self.conf.get(arg)

            auth = identity.Password(**kwargs)
        elif auth_mode == "basic":
            auth = gnocchiclient.auth.GnocchiBasicPlugin(
                self.conf.get("user", "admin"), self.conf.get("endpoint"))
        elif auth_mode == "noauth":
            auth = gnocchiclient.auth.GnocchiNoAuthPlugin(
                self.conf.get("userid", "admin"),
                self.conf.get("projectid", "admin"),
                self.conf.get("roles", "admin"), self.conf.get("endpoint"))
        else:
            raise RuntimeError("Unknown auth_mode `%s'" % auth_mode)
        s = session.Session(auth=auth)
        self.g = client.Client(
            1,
            s,
            adapter_options=dict(interface=self.conf.get('interface'),
                                 region_name=self.conf.get('region_name'),
                                 endpoint_override=self.conf.get('endpoint')))

        self._resource_type = self.conf.get("resourcetype", "collectd")
        self.values = []
        self.batch_size = self.conf.get("batchsize", 10)

        collectd.register_write(self.write)
        collectd.register_flush(self.flush)
Beispiel #37
0
def init_callback():

   global diskMetrics, networkMetrics, diskMetricsNames, networkMetricsNames, cpuMetricsNames, cpuMetrics, memoryMetrics, memoryMetricsNames, sysSpecsMetrics
   
   i = len(disks)*2
   for x in range(0, i):
     diskMetrics.insert(x, 0)
     diskMetricsNames = ['disk_octets']
   
   y = len(networks)*4
   for z in range(0, y):
     networkMetrics.insert(x, 0)
     networkMetricsNames = ['if_octets', 'if_packets']

   cpuMetricsNames = ['load', 'totalCpuTime', 'system', 'user', 'idle']
   cpuMetrics = [0, 0, 0, 0, 0]
   memoryMetricsNames = ['UsedPercantage', 'FreePercentage', 'used', 'free']
   memoryMetrics = [0, 0, 0, 0]
   sysSpecsMetrics = [0, 0, 0, 0]

   collectd.register_write(write_onos, data=None)
Beispiel #38
0
def amqp_config(config):
    global AMQP_EXCHANGE, AMQP_HOST, AMQP_PORT
    global METRIC_PREFIX
    global SSL_CERT, SSL_KEY, SSL_CACERT
    global connection, channel
    
    for child in config.children:
        if child.key == 'TypesDB':
            for v in child.values:
                parse_types_file(v)
        elif child.key == 'AMQP_EXCHANGE':
            AMQP_EXCHANGE = child.values[0]
        elif child.key == 'AMQP_HOST':
            AMQP_HOST = child.values[0]
        elif child.key == 'METRIC_PREFIX':
            METRIC_PREFIX = child.values[0]
        elif child.key == 'SSL_CERT':
            SSL_CERT = child.values[0]
        elif child.key == 'SSL_KEY':
            SSL_KEY = child.values[0]
        elif child.key == 'SSL_CACERT':
            SSL_CACERT = child.values[0]

    if SSL_KEY is not None:
        connection = amqp.Connection(
            host = AMQP_HOST,
            ssl = {
                'ca_certs': SSL_CACERT,
                'keyfile' : SSL_KEY,
                'certfile': SSL_CERT,
                'cert_reqs': ssl.CERT_REQUIRED })
    else:
        connection = amqp.Connection(host=AMQP_HOST)
    
    channel = connection.channel()
    channel.exchange_declare(AMQP_EXCHANGE, type='topic', durable=True, auto_delete=False)
    collectd.register_write(amqp_write)
Beispiel #39
0
    def config(cfg):
        # Handle legacy config (not multiple-endpoint capable)
        if not any([n.key == 'Endpoint' for n in cfg.children]):
            # Create fake intermediary Endpoint node
            cfg.children = (collectd.Config('Endpoint', cfg, ('default', ),
                                            cfg.children), )

        endpoints = []
        for node in cfg.children:
            if node.key == 'Endpoint':
                endpoint = WriteWarp10.config_endpoint(node)
                if endpoint:
                    if any(e['name'] == endpoint['name'] for e in endpoints):
                        collectd.warning('write_warp10 plugin: Duplicate '
                                         'endpoint: %s' % endpoint['name'])
                    else:
                        endpoints.append(endpoint)
            else:
                collectd.warning('write_warp10 plugin: Unknown config key: '
                                 '%s' % node.key)

        if endpoints:
            for e in endpoints:
                ww10 = WriteWarp10(e['url'], e['token'], e['flush_interval'],
                                   e['flush_retry_interval'], e['buffer_size'],
                                   e['default_labels'], e['rewrite_rules'],
                                   e['rewrite_limit'])
                collectd.info('write_warp10 plugin: register init write and '
                              'shutdown functions')
                collectd.register_init(ww10.init,
                                       name='write_warp10/%s' % e['name'])
                collectd.register_write(ww10.write,
                                        name='write_warp10/%s' % e['name'])
                collectd.register_shutdown(ww10.shutdown,
                                           name='write_warp10/%s' % e['name'])
        else:
            collectd.warning('write_warp10 plugin: No valid endpoints found')
    def wallarm_init(self):
        for typedb_file in self.config['types_db']:
            try:
                self.wallarm_parse_types_file(typedb_file)
            except IOError as e:
                msg = "{0}: Unable to open TypesDB file '{1}': {2}.".format(
                    self.plugin_name,
                    typedb_file,
                    str(e)
                )
                self.log('warning', msg)

        if not len(self.types):
            msg = (
                "{0}: Didn't find any valid type in TypesDB files: {1}".format(
                    self.plugin_name,
                    self.config['types_db'],
                )
            )
            self.log('error', msg)
            raise ValueError(msg)

        self.last_try_time = self.get_time()
        self.last_flush_time = self.get_time()
        self.main_queue = Queue.Queue()
        self.send_queue = []
        self.measr_avg_size = self.config['measr_avg_size']
        self.update_queue_size()
        self.shutdown_event = threading.Event()
        self.send_lock = threading.Lock()

        self.send_thread = threading.Thread(target=self.send_watchdog)
        self.send_thread.start()

        collectd.register_write(self.wallarm_write)
        collectd.register_shutdown(self.shutdown_callback)
def writer_init():
    global sandbox, debug, apikey, secretkey, monitors

    # last values and timestamps must be shared across collectd threads
    # collectd will pass this shared object into write on all calls
    # all accessess to this data should be protected by threading.Lock

    shared_data = {'monitors': dict(), 'lock': threading.Lock()}

    if sandbox:
        Monitis.sandbox = True
        os.environ['MONITIS_SANDBOX_APIKEY'] = apikey
        os.environ['MONITIS_SANDBOX_SECRETKEY'] = secretkey
    else:
        Monitis.sandbox = False
        os.environ['MONITIS_APIKEY'] = apikey
        os.environ['MONITIS_SECRETKEY'] = secretkey

    if debug:
        Monitis.debug = True
    else:
        Monitis.debug = False

    for monitor_name in monitors.keys():
        monitors[monitor_name]['monitis'] = CustomMonitor.fetch(
            monitor_id=monitors[monitor_name]['id'])

    # hold lock while initializing shared_data
    with shared_data['lock']:
        for monitor_name in monitors.keys():
            shared_data['monitors'][monitor_name] = {
                'last_value': None,
                'last_time': None
            }

    collectd.register_write(write, data=shared_data)
                new_value = value

            sample.values[i] = new_value

        points.extend(sample.values)
        columns.extend(('host', 'type'))
        points.extend((sample.host, sample.type))

        if sample.plugin_instance:
            columns.append('plugin_instance')
            points.append(sample.plugin_instance)

        if sample.type_instance:
            columns.append('type_instance')
            points.append(sample.type_instance)

        data = {'name': sample.plugin,
                'columns': columns,
                'points': [points]}

        self._queues[identifier].put(data)
        self._flush()


db = InfluxDB()
collectd.register_config(db.config)
collectd.register_flush(db.flush)
collectd.register_init(db.init)
collectd.register_shutdown(db.shutdown)
collectd.register_write(db.write)
        """Collectd write callback"""
        # pylint: disable=broad-except
        # pass arguments to the writer
        try:
            self._writer.write(vl, data)
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during write: %s' % exc)

    def shutdown(self):
        """Shutdown callback"""
        # pylint: disable=broad-except
        collectd.info("SHUTDOWN")
        try:
            self._writer.flush()
        except Exception as exc:
            if collectd is not None:
                collectd.error('Exception during shutdown: %s' % exc)


# The collectd plugin instance
# pylint: disable=invalid-name
instance = Plugin()
# pylint: enable=invalid-name

# Register plugin callbacks
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown)
    upload_thread = UploadThread()
    upload_thread.daemon = True
    upload_thread.start()
    collectd.info('cloudhealth - plugin configured successfully')


def write_func(values):
    perf_data_mapping = {'memory': MEMORY}
    type_filter = {'memory': ['percent']}
    type_instance_filter = {'memory': ['used']}
    if not (CONFIG.get('token') and REGION_NAME and AWS_ACCOUNT_ID
            and INSTANCE_ID):
        collectd.warning('cloudhealth - plugin not configured properly')
        return
    if values.plugin not in perf_data_mapping.keys():
        return
    if values.type in type_filter.get(values.plugin, []):
        if values.type_instance in type_instance_filter.get(values.plugin, []):
            update_min(perf_data_mapping[values.plugin], values.plugin,
                       values.type, values.type_instance, values.values[0])
            update_max(perf_data_mapping[values.plugin], values.plugin,
                       values.type, values.type_instance, values.values[0])
            update_avg(perf_data_mapping[values.plugin], values.plugin,
                       values.type, values.type_instance, values.values[0])
            update_values(perf_data_mapping[values.plugin])


if __name__ != '__main__':
    collectd.register_config(config_func)
    collectd.register_write(write_func)
Beispiel #45
0
    return


def flush_cb(timeout, identifier, data=None):
    return

def log_cb(severity, message, data=None):
    return


## Register the call-back functions

data = "stub-string"         # placeholder
name = init_cb.__module__    # the default
interval = 10                # the default

collectd.register_config(config_cb, data, name)
collectd.register_init(init_cb, data, name)
collectd.register_shutdown(shutdown_cb, data, name)

collectd.register_read(read_cb, interval, data, name)
collectd.register_write(write_cb, data, name)
collectd.register_notification(notification_cb, data, name)

collectd.register_flush(flush_cb, data, name)
collectd.register_log(log_cb, data, name)

## Local Variables:
## mode: python
## End:
Beispiel #46
0
    def notify(self, n):
        """Collectd notification callback"""
        # type='gauge',type_instance='link_status',plugin='ovs_events',plugin_instance='br0',
        # host='silv-vmytnyk-nos.ir.intel.com',time=1476441572.7450583,severity=4,
        # message='link state of "br0" interface has been changed to "UP"')
        collectd_event_severity_map = {
            collectd.NOTIF_FAILURE : 'CRITICAL',
            collectd.NOTIF_WARNING : 'WARNING',
            collectd.NOTIF_OKAY : 'NORMAL'
        }
        fault = Fault(self.get_event_id())
        fault.event_severity = collectd_event_severity_map[n.severity]
        fault.specific_problem = '{}-{}'.format(n.plugin_instance, n.type_instance)
        fault.alarm_condition = n.message
        self.event_send(fault)

    def shutdown(self):
        """Collectd shutdown callback"""
        # stop the timer
        self.stop_timer()

# The collectd plugin instance
plugin_instance = VESPlugin()

# Register plugin callbacks
collectd.register_config(plugin_instance.config)
collectd.register_init(plugin_instance.init)
collectd.register_write(plugin_instance.write)
collectd.register_notification(plugin_instance.notify)
collectd.register_shutdown(plugin_instance.shutdown)
Beispiel #47
0
def plugin_config(conf):
    """
    :param conf:
      https://collectd.org/documentation/manpages/collectd-python.5.shtml
      #config

    Parse the config object for config parameters
    """

    global POST_URLS
    for kv in conf.children:
        if kv.key == 'ProcessInfo':
            global PROCESS_INFO
            PROCESS_INFO = kv.values[0]
        elif kv.key == 'Datapoints':
            global DATAPOINTS
            DATAPOINTS = kv.values[0]
        elif kv.key == 'Utilization':
            global UTILIZATION
            UTILIZATION = kv.values[0]
        elif kv.key == 'PerCoreCPUUtil':
            global PERCORECPUUTIL
            PERCORECPUUTIL = kv.values[0]
        elif kv.key == 'OverallCPUUtil':
            global OVERALLCPUUTIL
            OVERALLCPUUTIL = kv.values[0]
        elif kv.key == 'Verbose':
            global DEBUG
            DEBUG = kv.values[0]
            log('setting verbose to %s' % DEBUG)
        elif kv.key == 'URL':
            POST_URLS.extend(kv.values)
        elif kv.key == 'Token':
            global API_TOKEN
            API_TOKENS.extend(kv.values)
        elif kv.key == 'Timeout':
            global TIMEOUT
            TIMEOUT = int(kv.values[0])
        elif kv.key == 'Interval':
            global INTERVAL
            INTERVAL = int(kv.values[0])
        elif kv.key == 'NotifyLevel':
            global NOTIFY_LEVEL
            if string.lower(kv.values[0]) == "okay":
                NOTIFY_LEVEL = 4
            elif string.lower(kv.values[0]) == "warning":
                NOTIFY_LEVEL = 2
            elif string.lower(kv.values[0]) == "failure":
                NOTIFY_LEVEL = 1
        elif kv.key == 'EtcPath':
            global ETC_PATH
            ETC_PATH = kv.values[0].rstrip(os.pathsep).rstrip(os.sep)
            debug("Setting etc path to %s for os release detection" % ETC_PATH)
        elif kv.key == 'PersistencePath':
            global PERSISTENCE_PATH
            PERSISTENCE_PATH = kv.values[0]
            load_persistent_data()

    if not POST_URLS:
        POST_URLS = [DEFAULT_POST_URL]

    if API_TOKENS and len(POST_URLS) != len(API_TOKENS):
        log("You have specified a different number of Tokens than URLs, "
            "please fix this")
        sys.exit(0)

    collectd.register_write(write)

    if UTILIZATION:
        collectd.register_read(UTILIZATION_INSTANCE.read,
                               1,
                               name="utilization_reads")

    if OVERALLCPUUTIL is not True:
        log("Overall cpu utilization has been disabled via configuration")

    if PERCORECPUUTIL is True:
        log("Cpu utilization per core has been enabled via configuration")

    collectd.register_read(send, INTERVAL)
    get_aws_info()
        metrics_list = list(metrics(vl, config))
        ts = datetime.fromtimestamp(vl.time)
        data = []

        for i, v in enumerate(vl.values):
            fullname, unit, dims = metrics_list[i]
            name = fullname[:255]
            if len(name) < len(fullname):
                collectd.warning('Metric name was truncated for CloudWatch: {}'.format(fullname))

            data.append(dict(
                MetricName=name,
                Timestamp=ts,
                Value=v,
                Unit=unit,
                Dimensions=dims
            ))

        client.put_metric_data(Namespace=vl.plugin, MetricData=data)
    except Exception, e:
        collectd.error(str(e))

def plugin_init():
    collectd.info('Initializing write_cloudwatch')
    signal.signal(signal.SIGCHLD, signal.SIG_DFL)

config = Config()
collectd.register_config(plugin_config, config)
collectd.register_init(plugin_init)
collectd.register_write(plugin_write, config)
    parser.add_option("","--plugin-name", default="RabbitMQ")
    parser.add_option("-H", "--host", default="localhost",
        help="RabbitMQ hostname. Default localhost")
    parser.add_option("-P", "--port", default=55672,
        help="RabbitMQ Admin port. Default 55672.")
    parser.add_option("", "--username", default="guest",
        help="Username credential. Default guest.")
    parser.add_option("", "--password", default="guest",
        help="Password credential. Default guest.")
    parser.add_option("", "--vhost", default="/",
        help="Virtual host. Default /")

    opts, arg_files = parser.parse_args()

    CONFIGS = ([{'plugin_name' : opts.plugin_name, 'host' : opts.host, 'admin_port' : opts.port, 'vhost' : opts.vhost, 'username' : opts.username, 'password' : opts.password }])
    co = RabbitMQ()
    co.check_run_multi_config()
    if co.debug_info:
        str_json = json.dumps(METRICS_RESULTS)
        print str_json


if not DEBUG:
    collectd_rabbitMQ = RabbitMQ()
    collectd.register_config(collectd_rabbitMQ.multi_config)
    collectd.register_read(collectd_rabbitMQ.check_run_multi_config)
    collectd.register_write(collectd_rabbitMQ.write)

if __name__ == '__main__':
    main()
Beispiel #50
0
    if [vl.plugin,vl.type,vl.plugin_instance,vl.type_instance] in d[vl.host]:
      pass
    else:
      # add service
      d[vl.host].append([vl.plugin,vl.type,vl.plugin_instance,vl.type_instance])
      pluginname = vl.plugin + "/" + vl.type
      if len(vl.plugin_instance) != 0:
        pluginname = vl.plugin + "-" + vl.plugin_instance + "/" + vl.type
      if len(vl.type_instance) != 0:
        pluginname = pluginname + "-" + vl.type_instance
      sendCommand("-o SERVICE -a add -v \""+vl.host+";"+pluginname+";generic-service\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";command;check_collectd\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";normal_check_interval;1\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";retry_check_interval;5\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";check_period;24x7\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";max_check_attempts;5\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";active_checks_enabled;1\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";notif_period;24x7\"")
      sendCommand("-o SERVICE -a setparam -v \""+vl.host+";"+pluginname+";args;!"+pluginname+"!\"")
      sendCommand("-o SERVICE -a setcg -v \""+vl.host+";"+pluginname+";Supervisors\"")

      # Regenerates the centreon configuration
      #TODO: currently we flood the centreon regenerating each service that we add.
      sendCommand("-a POLLERGENERATE -v 1")
      sendCommand("-a CFGMOVE -v 1")
      sendCommand("-a POLLERRELOAD -v 1")
      

collectd.register_init(init);
collectd.register_write(write);
Beispiel #51
0
            else:
                collectd.warn('unrecognized ds_type {}'.format(ds_type))
                new_value = value

            sample.values[i] = new_value

        points.extend(sample.values)
        columns.extend(('host', 'type'))
        points.extend((sample.host, sample.type))

        if sample.plugin_instance:
            columns.append('plugin_instance')
            points.append(sample.plugin_instance)

        if sample.type_instance:
            columns.append('type_instance')
            points.append(sample.type_instance)

        data = {'name': sample.plugin, 'columns': columns, 'points': [points]}

        self._queues[identifier].put(data)
        self._flush()


db = InfluxDB()
collectd.register_config(db.config)
collectd.register_flush(db.flush)
collectd.register_init(db.init)
collectd.register_shutdown(db.shutdown)
collectd.register_write(db.write)
        if len(append_names) != len(value.values):
            collectd.error("len(ds_names) != len(value.values)")
            return

        msg = "".join([
            "%s %f %d host=%s%s\n" %
            (prefix + metric_name + postfix, metric_value, value.time,
             value.host, tags_append)
            for (postfix, metric_value) in zip(append_names, value.values)
        ])

        try:
            CONFIG['queue'].put(msg, block=False)
        except Exception, e:
            collectd.error("Failed to message:" + str(e))

    except Exception:
        collectd.error("Exception in write_callback: " +
                       traceback.format_exc())


if 'collectd' in globals().keys():
    collectd.register_config(configure_callback)
    collectd.register_write(write_callback)
    collectd.register_init(init_callback)
else:
    print "Not running under collectd"
    import sys
    sys.exit(1)
    try:
        tags_append = ' ' + CONFIG['tags_append']
    except KeyError, e:
        tags_append = ''

    append_names = [ '.' + append_name if append_name != 'value' else ''
                     for (append_name, _, _, _)
                     in collectd.get_dataset(value.type) ]

    if len(append_names) != len(value.values):
        collectd.error("len(ds_names) != len(value.values)")
        return
    
    msg = "".join([ "%s %f %d host=%s%s\n" % (prefix + metric_name + postfix, metric_value, value.time, value.host, tags_append)
                      for (postfix, metric_value)
                      in zip(append_names, value.values) ])
    try:
        CONFIG['queue'].put(msg, block=False)
    except Exception, e:
        collectd.error("Failed to message:" + str(e))

if 'collectd' in globals().keys():
    collectd.register_config(configure_callback)
    collectd.register_write(write_callback)
    collectd.register_init(init_callback)
else:
    print "Not running under collectd"
    import sys
    sys.exit(1)
Beispiel #54
0
    get_host_type()
    collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
    collectd.info('buddyinfo initer: white list: %s' % (white_list))
    init_stats_cache()
    collectd.info('buddyinfo init: stats_cache: %s' % (stats_cache))


def reader(input_data=None):
    collect_buddyinfo()
    swap_current_cache()


def writer(metric, data=None):
    for i in metric.values:
        collectd.debug('%s (%s): %f' % (metric.plugin, metric.type, i))


def shutdown():
    collectd.info('buddyinfo plugin shutting down')


#== Callbacks ==#
if (os_name == 'Linux'):
    collectd.register_config(configer)
    collectd.register_init(initer)
    collectd.register_read(reader)
    collectd.register_write(writer)
    collectd.register_shutdown(shutdown)
else:
    collectd.warning('buddyinfo plugin currently works for Linux only')
Beispiel #55
0
   collectd.info('buddyinfo plugin: configuring host: %s' % (host_name)) 

def initer():
   get_host_type()
   collectd.info('buddyinfo plugin: host of type: %s' % (host_type))
   collectd.info('buddyinfo initer: white list: %s ' % (white_list))
   init_stats_cache()
   collectd.info('buddyinfo init: stats_cache: %s ' % (stats_cache))

def reader(input_data=None):
   collect_buddyinfo()
   swap_current_cache()

def writer(metric, data=None):
   for i in metric.values:
      collectd.debug("%s (%s): %f" % (metric.plugin, metric.type, i))

def shutdown():
   collectd.info("buddyinfo plugin shutting down")

#== Callbacks ==#
if (os_name == 'Linux'):
   collectd.register_config(configer)
   collectd.register_init(initer)
   collectd.register_read(reader)
   collectd.register_write(writer)
   collectd.register_shutdown(shutdown)
else:
   collectd.warning('buddyinfo plugin currently works for Linux only')

"""
This is for debugging. Use with `collectd -fC <CONF>`.
"""

import collectd
import sys
import re


def cb_write(values):
    collectd.info('write_info: {}'.format(repr(values)))


collectd.register_write(cb_write)
Beispiel #57
0
        for host, values in itertools.groupby(to_flush,
                                              operator.attrgetter("host")):
            measures = {host: collections.defaultdict(list)}
            for value_obj in values:
                for i, value in enumerate(value_obj.values):
                    measures[host][self._serialize_identifier(
                        i, value_obj)].append({
                            "timestamp": v.time,
                            "value": value,
                        })
            try:
                self.g.metric.batch_resources_metrics_measures(
                    measures, create_metrics=True)
            except exceptions.BadRequest:
                # Create the resource and try again
                self.g.resource.create(
                    self._resource_type, {
                        "id": "collectd:" + host.replace("/", "_"),
                        "host": host,
                    })
                self.g.metric.batch_resources_metrics_measures(
                    measures, create_metrics=True)

        self.values = not_to_flush


g = Gnocchi()
collectd.register_config(g.config)
collectd.register_write(g.write)
collectd.register_flush(g.flush)
def plugin_config(conf):
    """
    :param conf:
      https://collectd.org/documentation/manpages/collectd-python.5.shtml
      #config

    Parse the config object for config parameters
    """

    DOGSTATSD_INSTANCE.config.configure_callback(conf)

    global POST_URLS
    for kv in conf.children:
        if kv.key == 'Notifications':
            if kv.values[0]:
                global NOTIFICATIONS
                NOTIFICATIONS = kv.values[0]
        elif kv.key == 'ProcessInfo':
            global PROCESS_INFO
            PROCESS_INFO = kv.values[0]
        elif kv.key == 'Datapoints':
            global DATAPOINTS
            DATAPOINTS = kv.values[0]
        elif kv.key == 'Utilization':
            global UTILIZATION
            UTILIZATION = kv.values[0]
        elif kv.key == 'DPM':
            global DPM
            DPM = kv.values[0]
        elif kv.key == 'Verbose':
            global DEBUG
            DEBUG = kv.values[0]
            log('setting verbose to %s' % DEBUG)
        elif kv.key == 'URL':
            POST_URLS.extend(kv.values)
        elif kv.key == 'Token':
            global API_TOKEN
            API_TOKENS.extend(kv.values)
        elif kv.key == 'Timeout':
            global TIMEOUT
            TIMEOUT = int(kv.values[0])
        elif kv.key == 'Interval':
            global INTERVAL
            INTERVAL = int(kv.values[0])
        elif kv.key == 'NotifyLevel':
            global NOTIFY_LEVEL
            if string.lower(kv.values[0]) == "okay":
                NOTIFY_LEVEL = 4
            elif string.lower(kv.values[0]) == "warning":
                NOTIFY_LEVEL = 2
            elif string.lower(kv.values[0]) == "failure":
                NOTIFY_LEVEL = 1

    if not POST_URLS:
        POST_URLS = [DEFAULT_POST_URL]

    if API_TOKENS and len(POST_URLS) != len(API_TOKENS):
        log("You have specified a different number of Tokens than URLs, "
            "please fix this")
        sys.exit(0)

    if NOTIFICATIONS:
        log("sending collectd notifications")
        collectd.register_notification(receive_notifications)
    else:
        collectd.register_notification(steal_host_from_notifications)

    collectd.register_write(write)

    if UTILIZATION:
        collectd.register_read(UTILIZATION_INSTANCE.read,
                               1,
                               name="utilization_reads")

    collectd.register_read(send, INTERVAL)
    set_aws_url(get_aws_info())
                for ii in xrange(0, len(tvector)-1):
                    intervals.append(tvector[ii+1] - tvector[ii])
                self._log("tvector:  {!r}".format(tvector))
                self._log("intervals:{!r}".format(intervals))



## collectd call-back functions
##

def config_cb(config, data):
    cfg = util.map_collectd_config(config)
    data.config(cfg)


def write_cb(vl, data):
    data.track_metric(vl)


## Register the call-back functions

data = MetricWriteTracker()

collectd.register_write(write_cb, data)
collectd.register_config(config_cb, data)


## Local Variables:
## mode: python
## End: