示例#1
0
    def launch(self):
        logger = self.logger
        logger.debug('getRabbitMQStatus: start')

        if not self.is_in_group('rabbitmq'):
            self.set_not_eligible(
                'Please add the rabbitmq group to enable this collector.')
            return

        try:
            uri = self.get_parameter('uri')
            user = self.get_parameter('user')
            password = self.get_parameter('password')
            response = httper.get(uri, timeout=3, user=user, password=password)

        except get_http_exceptions() as e:
            self.set_error('Unable to get RabbitMQ status - HTTPError = %s' %
                           e)
            return False

        except Exception:
            self.set_error('Unable to get RabbitMQ status - Exception = %s' %
                           traceback.format_exc())
            return False

        try:
            status = jsoner.loads(response)
        except Exception as exp:
            self.set_error("Rabbitmq: parsing json: %s" % exp)
            return False

        return status
示例#2
0
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data

        uri = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
        try:
            s = httper.get(uri, headers={'Metadata-Flavor': 'Google'})
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get pubic IP for your Azure instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        raw_data = jsoner.loads(s)

        # We want to merge the structure into a more flatten one between compute and network
        self.__meta_data = raw_data['instance']

        # we want only the short name of the machine type
        self.__meta_data['machineType'] = self.__meta_data[
            'machineType'].split('/')[-1]
        self.__meta_data['zone'] = self.__meta_data['zone'].split('/')[-1]

        del self.__meta_data['serviceAccounts']
        del self.__meta_data['virtualClock']
        del self.__meta_data['licenses']
        del self.__meta_data['disks']

        return self.__meta_data
示例#3
0
    def get_data_sources_from_grafana(self):
        uri = '%s/api/datasources' % (self.uri)
        our_data_sources = {}
        try:
            api_return = httper.get(uri, headers=self.__get_headers())
            try:
                all_data_sources = jsoner.loads(api_return)
            except (ValueError, TypeError) as exp:
                self.logger.error(
                    'Cannot load json from grafana datasources: %s' % exp)
                return None
        except get_http_exceptions() as exp:
            self.logger.error('Cannot connect to grafana datasources: %s' %
                              exp)
            return None
        self.logger.debug("All data sources")
        self.logger.debug(str(all_data_sources))
        # Error message is a dict with just a key: message
        if isinstance(all_data_sources, dict):
            error_message = all_data_sources.get('message', '')
            if error_message:
                if error_message == 'Unauthorized':
                    self.logger.error(
                        'Your API key is not autorized to list data sources.')
                    return None
                self.logger.error('Unknown error from grafana API: %s' %
                                  error_message)
                return None

        # A data source will look like this:
        # [{u'name'    : u'SuperBla',
        ##  u'database': u'',
        # u'url': u'http://super:6768',
        #  u'basicAuth': False,
        # u'jsonData': {},
        # u'access': u'proxy',
        # u'typeLogoUrl': u'public/app/plugins/datasource/graphite/img/graphite_logo.png',
        # u'orgId': 1,
        # u'user': u'',
        #  u'password': u'',
        # u'type': u'graphite',
        #  u'id': 1,
        # u'isDefault': False}]
        for data_source in all_data_sources:
            if data_source.get('type', '') != 'graphite':
                continue
            src_name = data_source.get('name', '')
            if '--opsbro--' in src_name:
                elts = src_name.split('--opsbro--')
                if len(elts) == 2:
                    nuuid = elts[1]
                    our_data_sources[nuuid] = data_source
        return our_data_sources
示例#4
0
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data
        # OK we will query all meta data we want
        keys = ('region-id', 'zone-id', 'instance-id', 'image-id',
                'private-ipv4', 'public-ipv4')
        self.__meta_data = {}
        for k in keys:
            uri = 'http://100.100.100.200/latest/meta-data/%s' % k
            # Note: each call is quite fast, not a problem to get them all at daemon startup
            v = httper.get(uri)
            self.__meta_data[k] = v

        return self.__meta_data
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data

        uri = 'http://169.254.169.254/metadata/v1.json'
        try:
            s = httper.get(uri)
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get meta data for your digital ocean instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        self.__meta_data = jsoner.loads(s)
        return self.__meta_data
示例#6
0
    def get_conf(self):
        if self.conf is not None:
            return self.conf

        uri = 'http://169.254.42.42/conf?format=json'
        try:
            s = httper.get(uri)
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get pubic IP for your Scaleway instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        self.conf = jsoner.loads(s)
        self.logger.info('Get scaleway conf: %s' % self.conf)
        return self.conf
示例#7
0
    def get_meta_data(self):
        if self.__meta_data is not None:
            return self.__meta_data

        uri = 'http://169.254.169.254/metadata/instance?api-version=2017-08-01'
        try:
            s = httper.get(uri, headers={'Metadata': 'True'})
        except get_http_exceptions() as exp:
            self.logger.error(
                'Cannot get pubic IP for your Azure instance from %s. Error: %s.Exiting'
                % (uri, exp))
            raise
        raw_data = jsoner.loads(s)

        # We want to merge the structure into a more flatten one between compute and network
        self.__meta_data = raw_data['compute']
        first_network_interface = raw_data['network']['interface'][0]
        self.__meta_data.update(first_network_interface)

        return self.__meta_data
示例#8
0
 def get_meta_data(self):
     if self.__meta_data is not None:
         return self.__meta_data
     # OK we will query all meta data we want
     keys = ('ami-id ', 'hostname', 'instance-id', 'instance-type', 'local-hostname', 'local-ipv4', 'placement/availability-zone', 'profile', 'public-hostname',
             'public-ipv4', 'reservation-id', 'security-groups')
     self.__meta_data = {}
     for k in keys:
         # For placement, only take the availability-zone
         uri_k = k
         if '/' in k:
             k = k.split('/')[-1]
         uri = 'http://169.254.169.254/latest/meta-data/%s' % uri_k
         # Note: each call is quite fast, not a problem to get them all at daemon startup
         v = httper.get(uri)
         self.__meta_data[k] = v
     
     # Note that the region is the  minus the last character
     self.__meta_data['region'] = self.__meta_data['availability-zone'][:-1]
     return self.__meta_data
示例#9
0
 def do_render(targets, _from):
     response.content_type = 'application/json'
     
     if not targets:
         return abort(400, 'Invalid target')
     # Default past values, round at an hour
     now = int(time.time())
     pastraw = int(time.time()) - 86400
     past = divmod(pastraw, 3600)[0] * 3600
     
     found = False
     # Try -Xd
     m = re.match(r'-(\d*)d', _from, re.M | re.I)
     if m:
         found = True
         nbdays = int(m.group(1))
         pastraw = int(time.time()) - (nbdays * 86400)
         past = divmod(pastraw, 86400)[0] * 86400
     # Try -Xh
     m = re.match(r'-(\d*)h', _from, re.M | re.I)
     if m:
         found = True
         nbhours = int(m.group(1))
         pastraw = int(time.time()) - (nbhours * 3600)
         past = divmod(pastraw, 3600)[0] * 3600
     # Try -Xhours
     if not found:
         m = re.match(r'-(\d*)hours', _from, re.M | re.I)
         if m:
             found = True
             nbhours = int(m.group(1))
             pastraw = int(time.time()) - (nbhours * 3600)
             past = divmod(pastraw, 3600)[0] * 3600
     # Try -Xmin
     if not found:
         m = re.match(r'-(\d*)min', _from, re.M | re.I)
         if m:
             found = True
             nbminutes = int(m.group(1))
             pastraw = int(time.time()) - (nbminutes * 60)
             past = divmod(pastraw, 60)[0] * 60
     # absolute value maybe?
     if not found:
         m = re.match(r'(\d*)', _from, re.M | re.I)
         if m:
             found = True
             past = divmod(int(m.group(1)), 3600)[0] * 3600
     
     if not found:
         return abort(400, 'Invalid range')
     
     # Ok now got the good values
     res = []
     for target in targets:
         
         nuuid = gossiper.find_group_node('ts', target)
         n = None
         if nuuid:
             n = gossiper.get(nuuid)
         nname = ''
         if n:
             nname = n['name']
         self.logger.debug('HTTP ts: target %s is managed by %s(%s)' % (target, nname, nuuid))
         # that's me or the other is no more there?
         if nuuid == gossiper.uuid or n is None:
             self.logger.debug('HTTP ts: /render, my job to manage %s' % target)
             
             # Maybe I am also the TS manager of these data? if so, get the TS backend data for this
             min_e = hour_e = day_e = None
             
             self.logger.debug('HTTP RENDER founded TS %s' % tsmgr.tsb.data)
             min_e = tsmgr.tsb.data.get('min::%s' % target, None)
             hour_e = tsmgr.tsb.data.get('hour::%s' % target, None)
             day_e = tsmgr.tsb.data.get('day::%s' % target, None)
             self.logger.debug('HTTP TS RENDER, FOUNDED TS data %s %s %s' % (min_e, hour_e, day_e))
             
             # Get from the past, but start at the good hours offset
             t = past
             r = []
             
             while t < now:
                 # Maybe the time match a hour we got in memory, if so take there
                 if hour_e and hour_e['hour'] == t:
                     self.logger.debug('HTTP TS RENDER match memory HOUR, take this value instead')
                     raw_values = hour_e['values'][:]  # copy instead of cherrypick, because it can move/append
                     for i in range(60):
                         # Get teh value and the time
                         e = raw_values[i]
                         tt = t + 60 * i
                         r.append((e, tt))
                         if e:
                             self.logger.debug('GOT NOT NULL VALUE from RENDER MEMORY cache %s:%s' % (e, tt))
                 else:  # no memory match, got look in the KS part
                     ukey = '%s::h%d' % (target, t)
                     raw64 = kvmgr.get_key(ukey)
                     if raw64 is None:
                         for i in range(60):
                             # Get the value and the time
                             tt = t + 60 * i
                             r.append((None, tt))
                     else:
                         raw = base64.b64decode(raw64)
                         v = pickle.loads(raw)
                         raw_values = v['values']
                         for i in range(60):
                             # Get teh value and the time
                             e = raw_values[i]
                             tt = t + 60 * i
                             r.append((e, tt))
                 # Ok now the new hour :)
                 t += 3600
             # Now build the final thing
             res.append({"target": target, "datapoints": r})
         else:  # someone else job, rely the question
             uri = 'http://%s:%s/render/?target=%s&from=%s' % (n['addr'], n['port'], target, _from)
             try:
                 self.logger.debug('TS: (get /render) relaying to %s: %s' % (n['name'], uri))
                 r = httper.get(uri)
                 self.logger.debug('TS: get /render founded (%d)' % len(r))
                 v = jsoner.loads(r)
                 self.logger.debug("TS /render relay GOT RETURN", v, "AND RES", res)
                 res.extend(v)
                 self.logger.debug("TS /render res is now", res)
             except get_http_exceptions() as exp:
                 self.logger.debug('TS: /render relay error asking to %s: %s' % (n['name'], str(exp)))
                 continue
     
     self.logger.debug('TS RENDER FINALLY RETURN', res)
     return jsoner.dumps(res)
示例#10
0
 def launch(self):
     logger = self.logger
     
     if not self.is_in_group('nginx'):
         self.set_not_eligible('Please add the nginx group to enable this collector.')
         return
     
     logger.debug('getNginxStatus: start')
     
     logger.debug('getNginxStatus: config set')
     
     try:
         response = httper.get(self.get_parameter('uri'), timeout=3)
     except get_http_exceptions() as exp:
         self.set_error('Unable to get Nginx status - HTTPError = %s' % exp)
         return False
         
     logger.debug('getNginxStatus: urlopen success, start parsing')
     
     # Thanks to http://hostingfu.com/files/nginx/nginxstats.py for this code
     
     logger.debug('getNginxStatus: parsing connections')
     
     try:
         # Connections
         parsed = re.search(r'Active connections:\s+(\d+)', response)
         connections = int(parsed.group(1))
         
         logger.debug('getNginxStatus: parsed connections')
         logger.debug('getNginxStatus: parsing reqs')
         
         # Requests per second
         parsed = re.search(r'\s*(\d+)\s+(\d+)\s+(\d+)', response)
         
         if not parsed:
             logger.debug('getNginxStatus: could not parse response')
             return False
         
         requests = int(parsed.group(3))
         
         logger.debug('getNginxStatus: parsed reqs')
         
         if self.nginxRequestsStore == None or self.nginxRequestsStore < 0:
             logger.debug('getNginxStatus: no reqs so storing for first time')
             self.nginxRequestsStore = requests
             requestsPerSecond = 0
         else:
             logger.debug('getNginxStatus: reqs stored so calculating')
             logger.debug('getNginxStatus: self.nginxRequestsStore = %s', self.nginxRequestsStore)
             logger.debug('getNginxStatus: requests = %s', requests)
             
             requestsPerSecond = float(requests - self.nginxRequestsStore) / 60
             logger.debug('getNginxStatus: requestsPerSecond = %s', requestsPerSecond)
             self.nginxRequestsStore = requests
         
         if connections != None and requestsPerSecond != None:
             logger.debug('getNginxStatus: returning with data')
             return {'connections': connections, 'reqPerSec': requestsPerSecond}
         else:
             logger.debug('getNginxStatus: returning without data')
             return False
     
     except Exception:
         self.set_error('Unable to get Nginx status - %s - Exception = %s' % (response, traceback.format_exc()))
         return False
示例#11
0
 def launch(self, rule):
     
     mode = rule.get_mode()
     if mode is None:
         return
     
     matching_env = rule.get_first_matching_environnement()
     if matching_env is None:
         return
     
     # Now we can get our parameters
     parameters = matching_env.get_parameters()
     dest_directory = parameters.get('dest_directory')
     url = parameters.get('url')
     sha1 = parameters.get('sha1', '')
     md5 = parameters.get('md5', '')
     
     if not url:
         err = 'No url defined, cannot solve uri download'
         rule.add_error(err)
         rule.set_error()
         return
     
     if not dest_directory:
         err = 'No dest_directory defined, cannot solve uri download'
         rule.add_error(err)
         rule.set_error()
         return
     
     parsed_uri = urlparse(url)
     file_name = os.path.basename(parsed_uri.path)
     self.logger.debug("TRY DOWNLOADING %s => %s " % (url, file_name))
     
     # If we want to download in a directory
     if not os.path.exists(dest_directory):
         make_dir(dest_directory)
     self.logger.debug("MKDIR OK")
     
     dest_file = os.path.join(dest_directory, file_name)
     tmp_file = dest_file + '.tmp'
     
     # If the file already exists, there is no packages to install, we are done in a good way
     if os.path.exists(dest_file):
         txt = 'The file at %s is already present at %s' % (url, dest_file)
         rule.add_compliance(txt)
         rule.set_compliant()
         return
     
     # If audit mode: we should exit now
     if mode == 'audit':
         err = 'The file %s is not present at %s' % (url, dest_file)
         rule.add_error(err)
         rule.set_error()
         return
     
     self.logger.debug('START DOWNLOAd', url)
     try:
         data = httper.get(url)
     except get_http_exceptions() as exp:
         err = 'ERROR: downloading the uri: %s did fail withthe error: %s' % (url, exp)
         rule.add_error(err)
         rule.set_error()
         return
     self.logger.debug("DOWNLOADED", len(data))
     
     if sha1:
         sha1_hash = hashlib.sha1(data).hexdigest()
         if sha1 != sha1_hash:
             err = 'ERROR: the file %s sha1 hash %s did not match defined one: %s' % (url, sha1_hash, sha1)
             rule.add_error(err)
             rule.set_error()
             return
     
     if md5:
         md5_hash = hashlib.md5(data).hexdigest()
         if md5 != md5_hash:
             err = 'ERROR: the file %s md5 hash %s did not match defined one: %s' % (url, md5_hash, md5)
             rule.add_error(err)
             rule.set_error()
             return
     
     self.logger.debug("WRITING FILE")
     try:
         with open(tmp_file, 'wb') as f:
             f.write(data)
     except Exception as exp:
         err = 'ERROR: cannot save the file %s: %s' % (tmp_file, exp)
         rule.add_error(err)
         rule.set_error()
         return
     
     self.logger.debug("MOVING FILE")
     try:
         shutil.move(tmp_file, dest_file)
     except Exception as exp:
         err = 'ERROR: cannot save the file %s: %s' % (dest_file, exp)
         rule.add_error(err)
         rule.set_error()
         return
     self.logger.debug("SAVED TO", dest_file)
     
     # spawn post commands if there are some
     is_ok = rule.launch_post_commands(matching_env)
     if not is_ok:
         return
     
     # We did do the job
     txt = 'The file at %s was download at %s' % (url, dest_file)
     rule.add_fix(txt)
     rule.set_fixed()
     return
示例#12
0
# -*- coding: utf-8 -*-
import sys


sys.path.insert(0, '.')

uri = 'https://binaries.cockroachdb.com/cockroach-v2.0.0.linux-amd64.tgz'
from opsbro.httpclient import get_http_exceptions, httper
r = httper.get(uri)
print('Result: r')
示例#13
0
    def launch(self):

        if not self.is_in_group('apache'):
            self.set_not_eligible(
                'Please add the apache group to enable this collector.')
            return

        logger = self.logger
        logger.debug('getApacheStatus: start')
        '''
                    passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
                    passwordMgr.add_password(None, self.config['apacheStatusUrl'], self.config['apacheStatusUser'],
                                             self.config['apacheStatusPass'])

                    handler = urllib2.HTTPBasicAuthHandler(passwordMgr)

                    # create "opener" (OpenerDirector instance)
                    opener = urllib2.build_opener(handler)

                    # use the opener to fetch a URL
                    opener.open(self.config['apacheStatusUrl'])

                    # Install the opener.
                    # Now all calls to urllib2.urlopen use our opener.
                    urllib2.install_opener(opener)
        '''
        try:
            uri = 'http://%s/server-status/?auto' % self.get_parameter(
                'hostname')
            user = self.get_parameter('user')
            password = self.get_parameter('password')
            response = httper.get(uri, timeout=3, user=user, password=password)
        except get_http_exceptions() as exp:
            stack = traceback.format_exc()
            self.log = stack
            self.set_error('Unable to get Apache status - Exception = %s' %
                           exp)
            return False

        logger.debug('getApacheStatus: urlopen success, start parsing')
        # Split out each line
        lines = response.split('\n')

        # Loop over each line and get the values
        apacheStatus = {}

        logger.debug('getApacheStatus: parsing, loop')

        # Loop through and extract the numerical values
        for line in lines:
            values = line.split(': ')
            try:
                apacheStatus[str(values[0])] = values[1]
            except IndexError:
                break

        logger.debug('getApacheStatus: parsed')

        res = {}

        try:
            if apacheStatus['Total Accesses'] != False:
                logger.debug('getApacheStatus: processing total accesses')
                totalAccesses = float(apacheStatus['Total Accesses'])
                if self.apacheTotalAccesses is None or self.apacheTotalAccesses <= 0 or totalAccesses <= 0:
                    res['req/s'] = 0.0
                    self.apacheTotalAccesses = totalAccesses
                    logger.debug(
                        'getApacheStatus: no cached total accesses (or totalAccesses == 0), so storing for first time / resetting stored value'
                    )
                else:
                    logger.debug(
                        'getApacheStatus: cached data exists, so calculating per sec metrics'
                    )
                    res['req/s'] = (totalAccesses -
                                    self.apacheTotalAccesses) / 60
                    self.apacheTotalAccesses = totalAccesses
            else:
                self.set_error(
                    'getApacheStatus: Total Accesses not present in mod_status output. Is ExtendedStatus enabled?'
                )
        except (IndexError, KeyError):
            self.set_error(
                'getApacheStatus: IndexError - Total Accesses not present in mod_status output. Is ExtendedStatus enabled?'
            )

        try:
            if apacheStatus['BusyWorkers'] != False and apacheStatus[
                    'IdleWorkers'] != False:
                res['busy_workers'] = int(apacheStatus['BusyWorkers'])
                res['idle_workers'] = int(apacheStatus['IdleWorkers'])
            else:
                self.set_error(
                    'getApacheStatus: BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?'
                )
        except (IndexError, KeyError):
            self.set_error(
                'getApacheStatus: IndexError - BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?'
            )

        return res