Пример #1
0
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data

        uri = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
        try:
            s = httper.get(uri, headers={'Metadata-Flavor': 'Google'})
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get pubic IP for your Azure instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        raw_data = jsoner.loads(s)

        # We want to merge the structure into a more flatten one between compute and network
        self.__meta_data = raw_data['instance']

        # we want only the short name of the machine type
        self.__meta_data['machineType'] = self.__meta_data[
            'machineType'].split('/')[-1]
        self.__meta_data['zone'] = self.__meta_data['zone'].split('/')[-1]

        del self.__meta_data['serviceAccounts']
        del self.__meta_data['virtualClock']
        del self.__meta_data['licenses']
        del self.__meta_data['disks']

        return self.__meta_data
Пример #2
0
    def launch(self):
        logger = self.logger
        logger.debug('getRabbitMQStatus: start')

        if not self.is_in_group('rabbitmq'):
            self.set_not_eligible(
                'Please add the rabbitmq group to enable this collector.')
            return

        try:
            uri = self.get_parameter('uri')
            user = self.get_parameter('user')
            password = self.get_parameter('password')
            response = httper.get(uri, timeout=3, user=user, password=password)

        except get_http_exceptions() as e:
            self.set_error('Unable to get RabbitMQ status - HTTPError = %s' %
                           e)
            return False

        except Exception:
            self.set_error('Unable to get RabbitMQ status - Exception = %s' %
                           traceback.format_exc())
            return False

        try:
            status = jsoner.loads(response)
        except Exception as exp:
            self.set_error("Rabbitmq: parsing json: %s" % exp)
            return False

        return status
Пример #3
0
    def get_data_sources_from_grafana(self):
        uri = '%s/api/datasources' % (self.uri)
        our_data_sources = {}
        try:
            api_return = httper.get(uri, headers=self.__get_headers())
            try:
                all_data_sources = jsoner.loads(api_return)
            except (ValueError, TypeError) as exp:
                self.logger.error(
                    'Cannot load json from grafana datasources: %s' % exp)
                return None
        except get_http_exceptions() as exp:
            self.logger.error('Cannot connect to grafana datasources: %s' %
                              exp)
            return None
        self.logger.debug("All data sources")
        self.logger.debug(str(all_data_sources))
        # Error message is a dict with just a key: message
        if isinstance(all_data_sources, dict):
            error_message = all_data_sources.get('message', '')
            if error_message:
                if error_message == 'Unauthorized':
                    self.logger.error(
                        'Your API key is not autorized to list data sources.')
                    return None
                self.logger.error('Unknown error from grafana API: %s' %
                                  error_message)
                return None

        # A data source will look like this:
        # [{u'name'    : u'SuperBla',
        ##  u'database': u'',
        # u'url': u'http://super:6768',
        #  u'basicAuth': False,
        # u'jsonData': {},
        # u'access': u'proxy',
        # u'typeLogoUrl': u'public/app/plugins/datasource/graphite/img/graphite_logo.png',
        # u'orgId': 1,
        # u'user': u'',
        #  u'password': u'',
        # u'type': u'graphite',
        #  u'id': 1,
        # u'isDefault': False}]
        for data_source in all_data_sources:
            if data_source.get('type', '') != 'graphite':
                continue
            src_name = data_source.get('name', '')
            if '--opsbro--' in src_name:
                elts = src_name.split('--opsbro--')
                if len(elts) == 2:
                    nuuid = elts[1]
                    our_data_sources[nuuid] = data_source
        return our_data_sources
Пример #4
0
 def remove_data_source(self, data_source_id):
     self.logger.info(
         'Cleaning data source %d from grafana because the node is no more'
         % data_source_id)
     uri = '%s/api/datasources/%d' % (self.uri, data_source_id)
     try:
         r = httper.delete(uri, headers=self.__get_headers())
         self.logger.debug("Result delete", r)
     except get_http_exceptions() as exp:
         self.logger.error('Cannot connect to grafana datasources: %s' %
                           exp)
         return
Пример #5
0
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data

        uri = 'http://169.254.169.254/metadata/v1.json'
        try:
            s = httper.get(uri)
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get meta data for your digital ocean instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        self.__meta_data = jsoner.loads(s)
        return self.__meta_data
Пример #6
0
    def get_conf(self):
        if self.conf is not None:
            return self.conf

        uri = 'http://169.254.42.42/conf?format=json'
        try:
            s = httper.get(uri)
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get pubic IP for your Scaleway instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        self.conf = jsoner.loads(s)
        self.logger.info('Get scaleway conf: %s' % self.conf)
        return self.conf
Пример #7
0
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data

        uri = 'http://169.254.169.254/metadata/instance?api-version=2017-08-01'
        try:
            s = httper.get(uri, headers={'Metadata': 'True'})
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get pubic IP for your Azure instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        raw_data = jsoner.loads(s)

        # We want to merge the structure into a more flatten one between compute and network
        self.__meta_data = raw_data['compute']
        first_network_interface = raw_data['network']['interface'][0]
        self.__meta_data.update(first_network_interface)

        return self.__meta_data
Пример #8
0
    def launch_main(self):
        while not stopper.is_stop():
            self.enabled = self.get_parameter('enabled')
            if not self.enabled:
                time.sleep(1)
                continue
            self.export_uri = self.get_parameter('export_uri')
            self.customer_key = self.get_parameter('customer_key')
            self.inventory_number = self.get_parameter('inventory_number')
            if not self.customer_key:
                self.warning('You must have a customer key')
                time.sleep(1)
                continue

            syno_collector = collectormgr.collectors.get('synology', None)
            if syno_collector is None:
                self.logger.error('The synology collector is missing')
                time.sleep(1)
                continue

            results = syno_collector.get('results', None)
            if results is None:
                self.logger.warning('The synology collector did not run')
                time.sleep(1)
                continue

            try:
                r = httper.post(self.export_uri,
                                params={
                                    'uuid': gossiper.uuid,
                                    'customer_key': self.customer_key,
                                    'inventory_number': self.inventory_number,
                                    'results': results
                                },
                                headers={})
                self.logger.debug("Result insert", r)
            except get_http_exceptions() as exp:
                self.logger.error(
                    'Cannot connect to export uri datasources: %s' % exp)
            time.sleep(1)
Пример #9
0
 def insert_node_into_grafana(self, nuuid):
     node = gossiper.get(nuuid)
     if node is None:
         return
     name = node['name']
     addr = node['addr']
     port = node['port']
     data_source_name = "%s--opsbro--%s" % (name, nuuid)
     entry = {
         "name": data_source_name,
         "type": "graphite",
         "url": "http://%s:%d" % (addr, port),
         "access": "proxy"
     }
     uri = '%s/api/datasources' % (self.uri)
     try:
         r = httper.post(uri, params=entry, headers=self.__get_headers())
         self.logger.debug("Result insert", r)
     except get_http_exceptions() as exp:
         self.logger.error('Cannot connect to grafana datasources: %s' %
                           exp)
         return
Пример #10
0
 def do_render(targets, _from):
     response.content_type = 'application/json'
     
     if not targets:
         return abort(400, 'Invalid target')
     # Default past values, round at an hour
     now = int(time.time())
     pastraw = int(time.time()) - 86400
     past = divmod(pastraw, 3600)[0] * 3600
     
     found = False
     # Try -Xd
     m = re.match(r'-(\d*)d', _from, re.M | re.I)
     if m:
         found = True
         nbdays = int(m.group(1))
         pastraw = int(time.time()) - (nbdays * 86400)
         past = divmod(pastraw, 86400)[0] * 86400
     # Try -Xh
     m = re.match(r'-(\d*)h', _from, re.M | re.I)
     if m:
         found = True
         nbhours = int(m.group(1))
         pastraw = int(time.time()) - (nbhours * 3600)
         past = divmod(pastraw, 3600)[0] * 3600
     # Try -Xhours
     if not found:
         m = re.match(r'-(\d*)hours', _from, re.M | re.I)
         if m:
             found = True
             nbhours = int(m.group(1))
             pastraw = int(time.time()) - (nbhours * 3600)
             past = divmod(pastraw, 3600)[0] * 3600
     # Try -Xmin
     if not found:
         m = re.match(r'-(\d*)min', _from, re.M | re.I)
         if m:
             found = True
             nbminutes = int(m.group(1))
             pastraw = int(time.time()) - (nbminutes * 60)
             past = divmod(pastraw, 60)[0] * 60
     # absolute value maybe?
     if not found:
         m = re.match(r'(\d*)', _from, re.M | re.I)
         if m:
             found = True
             past = divmod(int(m.group(1)), 3600)[0] * 3600
     
     if not found:
         return abort(400, 'Invalid range')
     
     # Ok now got the good values
     res = []
     for target in targets:
         
         nuuid = gossiper.find_group_node('ts', target)
         n = None
         if nuuid:
             n = gossiper.get(nuuid)
         nname = ''
         if n:
             nname = n['name']
         self.logger.debug('HTTP ts: target %s is managed by %s(%s)' % (target, nname, nuuid))
         # that's me or the other is no more there?
         if nuuid == gossiper.uuid or n is None:
             self.logger.debug('HTTP ts: /render, my job to manage %s' % target)
             
             # Maybe I am also the TS manager of these data? if so, get the TS backend data for this
             min_e = hour_e = day_e = None
             
             self.logger.debug('HTTP RENDER founded TS %s' % tsmgr.tsb.data)
             min_e = tsmgr.tsb.data.get('min::%s' % target, None)
             hour_e = tsmgr.tsb.data.get('hour::%s' % target, None)
             day_e = tsmgr.tsb.data.get('day::%s' % target, None)
             self.logger.debug('HTTP TS RENDER, FOUNDED TS data %s %s %s' % (min_e, hour_e, day_e))
             
             # Get from the past, but start at the good hours offset
             t = past
             r = []
             
             while t < now:
                 # Maybe the time match a hour we got in memory, if so take there
                 if hour_e and hour_e['hour'] == t:
                     self.logger.debug('HTTP TS RENDER match memory HOUR, take this value instead')
                     raw_values = hour_e['values'][:]  # copy instead of cherrypick, because it can move/append
                     for i in range(60):
                         # Get teh value and the time
                         e = raw_values[i]
                         tt = t + 60 * i
                         r.append((e, tt))
                         if e:
                             self.logger.debug('GOT NOT NULL VALUE from RENDER MEMORY cache %s:%s' % (e, tt))
                 else:  # no memory match, got look in the KS part
                     ukey = '%s::h%d' % (target, t)
                     raw64 = kvmgr.get_key(ukey)
                     if raw64 is None:
                         for i in range(60):
                             # Get the value and the time
                             tt = t + 60 * i
                             r.append((None, tt))
                     else:
                         raw = base64.b64decode(raw64)
                         v = pickle.loads(raw)
                         raw_values = v['values']
                         for i in range(60):
                             # Get teh value and the time
                             e = raw_values[i]
                             tt = t + 60 * i
                             r.append((e, tt))
                 # Ok now the new hour :)
                 t += 3600
             # Now build the final thing
             res.append({"target": target, "datapoints": r})
         else:  # someone else job, rely the question
             uri = 'http://%s:%s/render/?target=%s&from=%s' % (n['addr'], n['port'], target, _from)
             try:
                 self.logger.debug('TS: (get /render) relaying to %s: %s' % (n['name'], uri))
                 r = httper.get(uri)
                 self.logger.debug('TS: get /render founded (%d)' % len(r))
                 v = jsoner.loads(r)
                 self.logger.debug("TS /render relay GOT RETURN", v, "AND RES", res)
                 res.extend(v)
                 self.logger.debug("TS /render res is now", res)
             except get_http_exceptions() as exp:
                 self.logger.debug('TS: /render relay error asking to %s: %s' % (n['name'], str(exp)))
                 continue
     
     self.logger.debug('TS RENDER FINALLY RETURN', res)
     return jsoner.dumps(res)
Пример #11
0
 def launch(self):
     logger = self.logger
     
     if not self.is_in_group('nginx'):
         self.set_not_eligible('Please add the nginx group to enable this collector.')
         return
     
     logger.debug('getNginxStatus: start')
     
     logger.debug('getNginxStatus: config set')
     
     try:
         response = httper.get(self.get_parameter('uri'), timeout=3)
     except get_http_exceptions() as exp:
         self.set_error('Unable to get Nginx status - HTTPError = %s' % exp)
         return False
         
     logger.debug('getNginxStatus: urlopen success, start parsing')
     
     # Thanks to http://hostingfu.com/files/nginx/nginxstats.py for this code
     
     logger.debug('getNginxStatus: parsing connections')
     
     try:
         # Connections
         parsed = re.search(r'Active connections:\s+(\d+)', response)
         connections = int(parsed.group(1))
         
         logger.debug('getNginxStatus: parsed connections')
         logger.debug('getNginxStatus: parsing reqs')
         
         # Requests per second
         parsed = re.search(r'\s*(\d+)\s+(\d+)\s+(\d+)', response)
         
         if not parsed:
             logger.debug('getNginxStatus: could not parse response')
             return False
         
         requests = int(parsed.group(3))
         
         logger.debug('getNginxStatus: parsed reqs')
         
         if self.nginxRequestsStore == None or self.nginxRequestsStore < 0:
             logger.debug('getNginxStatus: no reqs so storing for first time')
             self.nginxRequestsStore = requests
             requestsPerSecond = 0
         else:
             logger.debug('getNginxStatus: reqs stored so calculating')
             logger.debug('getNginxStatus: self.nginxRequestsStore = %s', self.nginxRequestsStore)
             logger.debug('getNginxStatus: requests = %s', requests)
             
             requestsPerSecond = float(requests - self.nginxRequestsStore) / 60
             logger.debug('getNginxStatus: requestsPerSecond = %s', requestsPerSecond)
             self.nginxRequestsStore = requests
         
         if connections != None and requestsPerSecond != None:
             logger.debug('getNginxStatus: returning with data')
             return {'connections': connections, 'reqPerSec': requestsPerSecond}
         else:
             logger.debug('getNginxStatus: returning without data')
             return False
     
     except Exception:
         self.set_error('Unable to get Nginx status - %s - Exception = %s' % (response, traceback.format_exc()))
         return False
Пример #12
0
    def launch_collector_thread(self):
        last_collector_check = 0
        while not stopper.is_stop():
            collector_group = self.get_parameter('collector-group')
            collector_enabled = gossiper.is_in_group(collector_group)

            if not collector_enabled:
                self.logger.debug('IMRANE: not a collector thread')
                time.sleep(1)
                continue
            self.logger.debug('IMRANE: collector loop')
            self.logger.debug('IMRANE: manage: %s' % self.queue)
            imrane_collector = None
            for collector in collectormgr.collectors.values():
                name = collector['name']
                if name == 'imrane':
                    imrane_collector = collector
                    break
            if imrane_collector is None:
                self.logger.error(
                    'IMRANE: cannot find the imrane collector, skiping this loop'
                )
                time.sleep(1)
                continue

            # Maybe this collector did not run since we last look at it, if so, skip it
            last_check = imrane_collector['last_check']
            if last_check == last_collector_check:
                self.logger.debug(
                    'IMRANE: the collector did not run since the last loop, skiping this turn'
                )
                time.sleep(1)
                continue
            last_collector_check = last_check

            results = imrane_collector['results']
            self.logger.info('IMRANE: collector result: %s' % results)

            our_node = gossiper.get(gossiper.uuid)
            our_node_name = our_node['name']

            agregator_group = self.get_parameter('agregator-group')
            agregator_nodes = gossiper.find_group_nodes(agregator_group)
            if len(agregator_nodes) == 0:
                self.logger.error(
                    'IMRANE ERROR: there are no agregator nodes, skiping data sending'
                )
                time.sleep(1)
                continue

            agregator_node_uuid = random.choice(agregator_nodes)
            agregator_node = gossiper.get(agregator_node_uuid)
            if agregator_node is None:  # oups: thread race bug
                time.sleep(1)
                continue

            address = agregator_node['addr']
            port = agregator_node['port']
            display_name = agregator_node['display_name']
            self.logger.info('IMRANE: did choose %s (%s:%s) for sending' %
                             (display_name, address, port))

            uri = 'http://%s:%s/imrane' % (address, port)
            try:
                r = httper.post(
                    uri,
                    params={
                        'results': results,
                        'from': our_node_name
                    },
                    headers={'Content-Type': 'application/json;charset=UTF-8'})
                self.logger.debug("Result insert", r)
            except get_http_exceptions() as exp:
                self.logger.error('Cannot connect to agregator: %s' % exp)

            # always sleep to not hammer the CPU
            time.sleep(1)
Пример #13
0
 def launch(self, rule):
     
     mode = rule.get_mode()
     if mode is None:
         return
     
     matching_env = rule.get_first_matching_environnement()
     if matching_env is None:
         return
     
     # Now we can get our parameters
     parameters = matching_env.get_parameters()
     dest_directory = parameters.get('dest_directory')
     url = parameters.get('url')
     sha1 = parameters.get('sha1', '')
     md5 = parameters.get('md5', '')
     
     if not url:
         err = 'No url defined, cannot solve uri download'
         rule.add_error(err)
         rule.set_error()
         return
     
     if not dest_directory:
         err = 'No dest_directory defined, cannot solve uri download'
         rule.add_error(err)
         rule.set_error()
         return
     
     parsed_uri = urlparse(url)
     file_name = os.path.basename(parsed_uri.path)
     self.logger.debug("TRY DOWNLOADING %s => %s " % (url, file_name))
     
     # If we want to download in a directory
     if not os.path.exists(dest_directory):
         make_dir(dest_directory)
     self.logger.debug("MKDIR OK")
     
     dest_file = os.path.join(dest_directory, file_name)
     tmp_file = dest_file + '.tmp'
     
     # If the file already exists, there is no packages to install, we are done in a good way
     if os.path.exists(dest_file):
         txt = 'The file at %s is already present at %s' % (url, dest_file)
         rule.add_compliance(txt)
         rule.set_compliant()
         return
     
     # If audit mode: we should exit now
     if mode == 'audit':
         err = 'The file %s is not present at %s' % (url, dest_file)
         rule.add_error(err)
         rule.set_error()
         return
     
     self.logger.debug('START DOWNLOAd', url)
     try:
         data = httper.get(url)
     except get_http_exceptions() as exp:
         err = 'ERROR: downloading the uri: %s did fail withthe error: %s' % (url, exp)
         rule.add_error(err)
         rule.set_error()
         return
     self.logger.debug("DOWNLOADED", len(data))
     
     if sha1:
         sha1_hash = hashlib.sha1(data).hexdigest()
         if sha1 != sha1_hash:
             err = 'ERROR: the file %s sha1 hash %s did not match defined one: %s' % (url, sha1_hash, sha1)
             rule.add_error(err)
             rule.set_error()
             return
     
     if md5:
         md5_hash = hashlib.md5(data).hexdigest()
         if md5 != md5_hash:
             err = 'ERROR: the file %s md5 hash %s did not match defined one: %s' % (url, md5_hash, md5)
             rule.add_error(err)
             rule.set_error()
             return
     
     self.logger.debug("WRITING FILE")
     try:
         with open(tmp_file, 'wb') as f:
             f.write(data)
     except Exception as exp:
         err = 'ERROR: cannot save the file %s: %s' % (tmp_file, exp)
         rule.add_error(err)
         rule.set_error()
         return
     
     self.logger.debug("MOVING FILE")
     try:
         shutil.move(tmp_file, dest_file)
     except Exception as exp:
         err = 'ERROR: cannot save the file %s: %s' % (dest_file, exp)
         rule.add_error(err)
         rule.set_error()
         return
     self.logger.debug("SAVED TO", dest_file)
     
     # spawn post commands if there are some
     is_ok = rule.launch_post_commands(matching_env)
     if not is_ok:
         return
     
     # We did do the job
     txt = 'The file at %s was download at %s' % (url, dest_file)
     rule.add_fix(txt)
     rule.set_fixed()
     return
Пример #14
0
    def launch(self):

        if not self.is_in_group('apache'):
            self.set_not_eligible(
                'Please add the apache group to enable this collector.')
            return

        logger = self.logger
        logger.debug('getApacheStatus: start')
        '''
                    passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
                    passwordMgr.add_password(None, self.config['apacheStatusUrl'], self.config['apacheStatusUser'],
                                             self.config['apacheStatusPass'])

                    handler = urllib2.HTTPBasicAuthHandler(passwordMgr)

                    # create "opener" (OpenerDirector instance)
                    opener = urllib2.build_opener(handler)

                    # use the opener to fetch a URL
                    opener.open(self.config['apacheStatusUrl'])

                    # Install the opener.
                    # Now all calls to urllib2.urlopen use our opener.
                    urllib2.install_opener(opener)
        '''
        try:
            uri = 'http://%s/server-status/?auto' % self.get_parameter(
                'hostname')
            user = self.get_parameter('user')
            password = self.get_parameter('password')
            response = httper.get(uri, timeout=3, user=user, password=password)
        except get_http_exceptions() as exp:
            stack = traceback.format_exc()
            self.log = stack
            self.set_error('Unable to get Apache status - Exception = %s' %
                           exp)
            return False

        logger.debug('getApacheStatus: urlopen success, start parsing')
        # Split out each line
        lines = response.split('\n')

        # Loop over each line and get the values
        apacheStatus = {}

        logger.debug('getApacheStatus: parsing, loop')

        # Loop through and extract the numerical values
        for line in lines:
            values = line.split(': ')
            try:
                apacheStatus[str(values[0])] = values[1]
            except IndexError:
                break

        logger.debug('getApacheStatus: parsed')

        res = {}

        try:
            if apacheStatus['Total Accesses'] != False:
                logger.debug('getApacheStatus: processing total accesses')
                totalAccesses = float(apacheStatus['Total Accesses'])
                if self.apacheTotalAccesses is None or self.apacheTotalAccesses <= 0 or totalAccesses <= 0:
                    res['req/s'] = 0.0
                    self.apacheTotalAccesses = totalAccesses
                    logger.debug(
                        'getApacheStatus: no cached total accesses (or totalAccesses == 0), so storing for first time / resetting stored value'
                    )
                else:
                    logger.debug(
                        'getApacheStatus: cached data exists, so calculating per sec metrics'
                    )
                    res['req/s'] = (totalAccesses -
                                    self.apacheTotalAccesses) / 60
                    self.apacheTotalAccesses = totalAccesses
            else:
                self.set_error(
                    'getApacheStatus: Total Accesses not present in mod_status output. Is ExtendedStatus enabled?'
                )
        except (IndexError, KeyError):
            self.set_error(
                'getApacheStatus: IndexError - Total Accesses not present in mod_status output. Is ExtendedStatus enabled?'
            )

        try:
            if apacheStatus['BusyWorkers'] != False and apacheStatus[
                    'IdleWorkers'] != False:
                res['busy_workers'] = int(apacheStatus['BusyWorkers'])
                res['idle_workers'] = int(apacheStatus['IdleWorkers'])
            else:
                self.set_error(
                    'getApacheStatus: BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?'
                )
        except (IndexError, KeyError):
            self.set_error(
                'getApacheStatus: IndexError - BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?'
            )

        return res