Exemplo n.º 1
0
 async def update_emote_data(self):
     """Fetch Twitch and FrakerFaceZ emote mappings."""
     with open(os.path.join(cur_dir, 'assets',
                            'emotes_twitch_global.json')) as f:
         twitch_global = json.loads(f.read())['emotes']
     with open(
             os.path.join(cur_dir, 'assets',
                          'emotes_twitch_subscriber.json')) as f:
         twitch_sub = json.loads(f.read())
     twitch_subscriber = {
         e: {
             'description': '\u200b',
             'image_id': twitch_sub[e],
             'first_seen': None
         }
         for e in twitch_sub
     }
     self.emotes['twitch'] = {**twitch_global, **twitch_subscriber}
     with open(os.path.join(cur_dir, 'assets', 'emotes_ffz.json')) as f:
         self.emotes['ffz'] = json.loads(f.read())
     with open(os.path.join(cur_dir, 'assets', 'emotes_bttv.json')) as f:
         raw_json = json.loads(f.read())
         bttv_v1 = {n: 'https:' + raw_json[n] for n in raw_json}
     with open(os.path.join(cur_dir, 'assets', 'emotes_bttv_2.json')) as f:
         raw_json2 = json.loads(f.read())
         bttv_v2 = {
             n:
             'https://cdn.betterttv.net/emote/' + str(raw_json2[n]) + '/1x'
             for n in raw_json2
         }
     self.emotes['bttv'] = {**bttv_v1, **bttv_v2}
Exemplo n.º 2
0
 def get_node_stats(self, base_url):
     url = urlparse.urljoin(base_url, 'nodes')
     stats = []
     try:
         stats = json.loads(urllib2.urlopen(url).read())
     except urllib2.URLError, e:
         raise Exception('Cannot open RabbitMQ API url: %s %s' % (url, str(e)))
Exemplo n.º 3
0
    def parse_json(cls, raw, tags=None):
        if tags is None:
            tags = []
        parsed = json.loads(raw)
        metric_base = 'nginx'
        output = []
        all_keys = parsed.keys()

        tagged_keys = [('caches', 'cache'), ('server_zones', 'server_zone'),
                       ('upstreams', 'upstream')]

        # Process the special keys that should turn into tags instead of
        # getting concatenated to the metric name
        for key, tag_name in tagged_keys:
            metric_name = '%s.%s' % (metric_base, tag_name)
            for tag_val, data in parsed.get(key, {}).iteritems():
                tag = '%s:%s' % (tag_name, tag_val)
                output.extend(cls._flatten_json(metric_name, data, tags + [tag]))

        # Process the rest of the keys
        rest = set(all_keys) - set([k for k, _ in tagged_keys])
        for key in rest:
            metric_name = '%s.%s' % (metric_base, key)
            output.extend(cls._flatten_json(metric_name, parsed[key], tags))

        return output
Exemplo n.º 4
0
    def dump(self):
        """Returns a dictionnary of all beans and attributes
        
        keys are bean's names
        values are bean's attributes in json format

        ex: 

        {"org.apache.cassandra.db:instance=1826959904,type=DynamicEndpointSnitch":
            {"UpdateInterval":100,
            "Scores":{},
            "SubsnitchClassName":"org.apache.cassandra.locator.SimpleSnitch",
            "BadnessThreshold":0.1,
            "ResetInterval":600000},
        "org.apache.cassandra.db:columnfamily=NodeIdInfo,keyspace=system,type=ColumnFamilies":
            {"LiveSSTableCount":0,
            "LiveDiskSpaceUsed":0,
            "MinimumCompactionThreshold":4,
            "WriteCount":0,
            "RowCacheKeysToSave":2147483647,
            "RecentWriteLatencyHistogramMicros":[0,0,0,0,0,0,0,0,0,0]}
        }

        """

        self._jmx.sendline("dump")
        self._wait_prompt()
        content = self._jmx.before.replace('dump','').strip()
        jsonvar = json.loads(content)
        return jsonvar
Exemplo n.º 5
0
 def post(self):
     try:
         payload = json.loads(zlib.decompress(self.request.body))
     except:
         #log.exception("Error parsing the agent's POST request body")
         return
     agent_update(payload)
Exemplo n.º 6
0
    def check(self, logger, agentConfig):

        if 'rabbitMQStatusUrl' not in agentConfig or \
           'rabbitMQUser' not in agentConfig or \
           'rabbitMQPass' not in agentConfig or \
            agentConfig['rabbitMQStatusUrl'] == 'http://www.example.com:55672/json':
            return False

        try:
            logger.debug('getRabbitMQStatus: attempting authentication setup')
            manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
            manager.add_password(None, agentConfig['rabbitMQStatusUrl'], agentConfig['rabbitMQUser'], agentConfig['rabbitMQPass'])
            handler = urllib2.HTTPBasicAuthHandler(manager)
            opener = urllib2.build_opener(handler)
            urllib2.install_opener(opener)

            logger.debug('getRabbitMQStatus: attempting urlopen')
            req = urllib2.Request(agentConfig['rabbitMQStatusUrl'], None, headers(agentConfig))

            # Do the request, log any errors
            request = urllib2.urlopen(req)
            response = request.read()

            return json.loads(response)
        except:
            logger.exception('Unable to get RabbitMQ status')
            return False
Exemplo n.º 7
0
 def post(self):
     try:
         body = json.loads(self.request.body)
         series = body['series']
     except Exception:
         return
     update(series)
Exemplo n.º 8
0
    def parse_json(cls, raw, tags=None):
        if tags is None:
            tags = []
        parsed = json.loads(raw)
        metric_base = 'nginx'
        output = []
        all_keys = parsed.keys()

        tagged_keys = [('caches', 'cache'), ('server_zones', 'server_zone'),
                       ('upstreams', 'upstream')]

        # Process the special keys that should turn into tags instead of
        # getting concatenated to the metric name
        for key, tag_name in tagged_keys:
            metric_name = '%s.%s' % (metric_base, tag_name)
            for tag_val, data in parsed.get(key, {}).iteritems():
                tag = '%s:%s' % (tag_name, tag_val)
                output.extend(
                    cls._flatten_json(metric_name, data, tags + [tag]))

        # Process the rest of the keys
        rest = set(all_keys) - set([k for k, _ in tagged_keys])
        for key in rest:
            metric_name = '%s.%s' % (metric_base, key)
            output.extend(cls._flatten_json(metric_name, parsed[key], tags))

        return output
Exemplo n.º 9
0
 def post(self):
     try:
         payload = json.loads(zlib.decompress(self.request.body))
     except:
         #log.exception("Error parsing the agent's POST request body")
         return
     agent_update(payload)
Exemplo n.º 10
0
 def post(self):
     try:
         body = json.loads(self.request.body)
         series = body['series']
     except Exception:
         return
     update(series)
Exemplo n.º 11
0
 def get_node_stats(self, base_url):
     url = urlparse.urljoin(base_url, 'nodes')
     stats = []
     try:
         stats = json.loads(urllib2.urlopen(url).read())
     except urllib2.URLError, e:
         self.log.info('Cannot open RabbitMQ API url: %s', url)
Exemplo n.º 12
0
 async def meme(self, ctx, *, pre_text: str):
     """Generate a meme!
     Usage: meme [top text] [bottom text]"""
     char_table = {
         '-': '--',
         '_': '__',
         '?': '~q',
         '%': '~p',
         '#': '~h',  # TODO: make
         '/': '~s',
         '"': "''",
         '\n': ' '
     }
     for key in char_table:
         pre_text = pre_text.replace(key, char_table[key])
     pre_text = pre_text.replace('    ', '__bottom__')
     pre_text = pre_text.replace(' ', '-')
     if '__bottom__' in pre_text:
         segments = pre_text.split('__bottom__')
     else:
         segments = textwrap.wrap(pre_text, width=int(len(pre_text) / 2))
     with async_timeout.timeout(10):
         async with self.bot.cog_http.get(
                 'https://memegen.link/api/templates/') as r:
             rtext = await r.text()
             templates = list(json.loads(rtext).values())
         rtemp = random.choice(templates)
         meme_url = rtemp + '/' + segments[0] + '/' + segments[1] + '.jpg'
         async with self.bot.cog_http.get(meme_url) as r:
             raw_image = await r.read()
     await ctx.send(file=discord.File(BytesIO(raw_image), 'meme.jpg'))
Exemplo n.º 13
0
    def check(self, logger, agentConfig):

        if 'rabbitmq_status_url' not in agentConfig or \
           'rabbitmq_user' not in agentConfig or \
           'rabbitmq_pass' not in agentConfig or \
            agentConfig['rabbitmq_status_url'] == 'http://www.example.com:55672/json':
            return False

        try:
            logger.debug('getRabbitMQStatus: attempting authentication setup')
            manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
            manager.add_password(None, agentConfig['rabbitmq_status_url'], agentConfig['rabbitmq_user'], agentConfig['rabbitmq_pass'])
            handler = urllib2.HTTPBasicAuthHandler(manager)
            opener = urllib2.build_opener(handler)
            urllib2.install_opener(opener)

            logger.debug('getRabbitMQStatus: attempting urlopen')
            req = urllib2.Request(agentConfig['rabbitmq_status_url'], None, headers(agentConfig))

            # Do the request, log any errors
            request = urllib2.urlopen(req)
            response = request.read()

            return json.loads(response)
        except:
            logger.exception('Unable to get RabbitMQ status')
            return False
Exemplo n.º 14
0
    def _get_json(self, uri, params=None, multi=False):
        """Utility method to get and parse JSON streams."""
        if params:
            uri = "%s?%s" % (uri, urllib.urlencode(params))
        self.log.debug("Connecting to Docker API at: %s" % uri)
        req = urllib2.Request(uri, None)

        try:
            request = self.url_opener.open(req)
        except urllib2.URLError as e:
            if "Errno 13" in str(e):
                raise Exception(
                    "Unable to connect to socket. dd-agent user must be part of the 'docker' group"
                )
            raise

        response = request.read()
        response = response.replace(
            '\n', ''
        )  # Some Docker API versions occassionally send newlines in responses
        self.log.debug('Docker API response: %s', response)
        if multi and "}{" in response:  # docker api sometimes returns juxtaposed json dictionaries
            response = "[{0}]".format(response.replace("}{", "},{"))

        if not response:
            return []

        try:
            return json.loads(response)
        except Exception as e:
            self.log.error('Failed to parse Docker API response: %s', response)
            raise DockerJSONDecodeError
Exemplo n.º 15
0
    def _get_json(self, uri, params=None, multi=False):
        """Utility method to get and parse JSON streams."""
        if params:
            uri = "%s?%s" % (uri, urllib.urlencode(params))
        self.log.debug("Connecting to Docker API at: %s" % uri)
        req = urllib2.Request(uri, None)

        try:
            request = self.url_opener.open(req)
        except urllib2.URLError as e:
            if "Errno 13" in str(e):
                raise Exception("Unable to connect to socket. dd-agent user must be part of the 'docker' group")
            raise

        response = request.read()
        response = response.replace('\n', '') # Some Docker API versions occassionally send newlines in responses
        self.log.debug('Docker API response: %s', response)
        if multi and "}{" in response: # docker api sometimes returns juxtaposed json dictionaries
            response = "[{0}]".format(response.replace("}{", "},{"))

        if not response:
            return []

        try:
            return json.loads(response)
        except Exception as e:
            self.log.error('Failed to parse Docker API response: %s', response)
            raise DockerJSONDecodeError
Exemplo n.º 16
0
 def post(self):
     try:
         body = json.loads(self.request.body)
         series = body['series']
     except:
         #log.exception("Error parsing the POST request body")
         return
     update(series)
Exemplo n.º 17
0
 def post(self):
     try:
         body = json.loads(self.request.body)
         series = body['series']
     except:
         #log.exception("Error parsing the POST request body")
         return
     update(series)
Exemplo n.º 18
0
 def read(self):
     """Re-read the datastore from disk, discarding changes."""
     if self.backend == 'json':
         with open(self.path, 'r') as storefile:
             self.store = json.loads(storefile.read())
     elif self.backend == 'bson':
         with open(self.path, 'rb') as storefile:
             self.store = bson.loads(storefile.read())
Exemplo n.º 19
0
    def _get_stats(self, url):
        "Hit a given URL and return the parsed json"
        self.log.debug('Fetching Couchbase stats at url: %s' % url)
        req = urllib2.Request(url, None, headers(self.agentConfig))

        # Do the request, log any errors
        request = urllib2.urlopen(req)
        response = request.read()
        return json.loads(response)
Exemplo n.º 20
0
    def _get_stats(self, url):
        "Hit a given URL and return the parsed json"
        self.log.debug('Fetching Couchbase stats at url: %s' % url)
        req = urllib2.Request(url, None, headers(self.agentConfig))

        # Do the request, log any errors
        request = urllib2.urlopen(req)
        response = request.read()
        return json.loads(response)
Exemplo n.º 21
0
 def _get_data(self, url, auth=None):
     """ Hit a given URL and return the parsed json
         `auth` is a tuple of (username, password) or None
     """
     req = urllib2.Request(url, None, headers(self.agentConfig))
     if auth:
         add_basic_auth(req, *auth)
     request = urllib2.urlopen(req)
     response = request.read()
     return json.loads(response)
Exemplo n.º 22
0
 def _get_data(self, url, auth=None):
     """ Hit a given URL and return the parsed json
         `auth` is a tuple of (username, password) or None
     """
     req = urllib2.Request(url, None, headers(self.agentConfig))
     if auth:
         add_basic_auth(req, *auth)
     request = urllib2.urlopen(req)
     response = request.read()
     return json.loads(response)
Exemplo n.º 23
0
    def _get_stats(self, url, instance):
        "Hit a given URL and return the parsed json"
        self.log.debug('Fetching Couchbase stats at url: %s' % url)
        req = urllib2.Request(url, None, headers(self.agentConfig))
        if 'user' in instance and 'password' in instance:
            add_basic_auth(req, instance['user'], instance['password'])

        # Do the request, log any errors
        request = urllib2.urlopen(req)
        response = request.read()
        return json.loads(response)
Exemplo n.º 24
0
 def getInitialUpdatedValue(self):
     """Get updated value
     """
     if self._repository.etcd is None:
         raise ValueError("No etcd available")
     if self._environ:
         path = self._environ.getEtcdPath(self._key)
     else:
         path = self._repository.environ.getEtcdPath(self._key)
     # Get value
     return json.loads(self._repository.etcd.read(path).value)
Exemplo n.º 25
0
    def _get_stats(self, agentConfig, url):
        "Hit a given URL and return the parsed json"
        try:
            req = urllib2.Request(url, None, headers(agentConfig))

            # Do the request, log any errors
            request = urllib2.urlopen(req)
            response = request.read()
            return json.loads(response)
        except:
            self.logger.exception('Unable to get CouchDB statistics')
            return None
Exemplo n.º 26
0
    def _get_stats(self, agentConfig, url):
        "Hit a given URL and return the parsed json"
        try:
            req = urllib2.Request(url, None, headers(agentConfig))

            # Do the request, log any errors
            request = urllib2.urlopen(req)
            response = request.read()
            return json.loads(response)
        except:
            self.logger.exception('Unable to get CouchDB statistics')
            return None
Exemplo n.º 27
0
    def _get_stats(self, url, instance):
        "Hit a given URL and return the parsed json"
        self.log.debug('Fetching Couchdb stats at url: %s' % url)
        req = urllib2.Request(url, None, headers(self.agentConfig))

        if 'user' in instance and 'password' in instance:
            add_basic_auth(req, instance['user'], instance['password'])

        # Do the request, log any errors
        request = urllib2.urlopen(req)
        response = request.read()
        return json.loads(response)
Exemplo n.º 28
0
 def __autoupdate__(self):
     """Auto update
     """
     # TODO: Support modified index
     initialized = False
     self.logger.debug("[%s] Auto update thread started", self.Type)
     while True:
         # Get etcd client
         client = None
         while True:
             if self._repository.etcd is None:
                 self.logger.error("[%s] Failed to watch config, no etcd client found, will retry in 30s", self.Type)
                 time.sleep(30)
                 continue
             client = self._repository.etcd
             break
         # Wait for the config
         # Get the read path
         if self._environ:
             path = self._environ.getEtcdPath(self._key)
         else:
             path = self._repository.environ.getEtcdPath(self._key)
         # Wait the config
         try:
             if not initialized:
                 # Not initialized
                 self.logger.debug("[%s] Watching config at path [%s]", self.Type, path)
                 if self.update(json.loads(client.read(path).value)):
                     initialized = True
             else:
                 # Initialized, just wait
                 self.logger.debug("[%s] Watching config at path [%s]", self.Type, path)
                 self.update(json.loads(client.read(path, wait = True).value))
         except (EtcdKeyNotFound, EtcdWatchTimedOut, EtcdEventIndexCleared):
             # A normal error
             time.sleep(10)
         except:
             # Error, wait 30s and continue watch
             self.logger.exception("[%s] Failed to watch etcd, will retry in 30s", self.Type)
             time.sleep(30)
Exemplo n.º 29
0
 def __init__(self, backend, path=None, join_path=True, commit_interval=3):
     self.dir = os.path.dirname(
         os.path.abspath(sys.modules['__main__'].core_file))
     self.backend = backend
     self.session = None
     self.commit_interval = commit_interval
     self.path = ''
     if path:
         if join_path:
             self.path = os.path.join(self.dir, path)
         else:
             self.path = path
     else:
         self.path = os.path.join(self.dir,
                                  'storage.' + self.exts[storage_backend])
     self.store = {}
     if self.backend == 'json':
         try:
             with open(self.path, 'r') as storefile:
                 self.store = json.loads(storefile.read())
         except FileNotFoundError:
             print('Creating storage file...')
             with open(self.path, 'a') as f, open(
                     os.path.join(self.dir, 'assets',
                                  'emp_storage.json')) as df:
                 orig = df.read()
                 f.write(orig)
             self.store = json.loads(orig)
     elif self.backend == 'bson':
         try:
             with open(self.path, 'rb') as storefile:
                 self.store = bson.loads(storefile.read())
         except FileNotFoundError:
             print('Creating storage file...')
             with open(self.path, 'ab') as f, open(
                     os.path.join(self.dir, 'assets',
                                  'emp_storage.json')) as df:
                 orig = df.read()
                 f.write(bson.dumps(orig))
             self.store = json.loads(orig)
Exemplo n.º 30
0
def import_main(options, args):
    if options.flavor == 'friendfeed':
        i = ingest.Ingester(persistence.MongoPersistence())
        flavor_object = friendfeed.FriendFeedInput()
        if options.file is not None:
            result = json.loads(open(options.file, "rb+").read())
        else:
            f = ff.FriendFeedImport(config.friendfeed['username'], config.friendfeed['password'])
            result = f.get_all_home_entries()
        flavor_object.data = result
        i.ingest(flavor_object)
    else:
        print "Not implemented !"
Exemplo n.º 31
0
 def _parse(self, resp, content):
     """Parses a rabj response to get the envelope information
     """
     if resp['content-type'] == 'application/json':
         try:
             envelope = json.loads(content)
             if envelope['status']['code']  == 200:
                 return envelope
             else:
                 error = envelope['error']
                 raise RabjError(error['code'], error['class'], error['detail'], envelope)
         except ValueError, e:
             _log.warn("Decode error %s in content %s", e, content)
             raise RabjError(resp.status, resp.reason, {'msg': e.message}, content)
Exemplo n.º 32
0
 def read(self, rawdata):
     # parse packets, throw out duplicates, forward to protocol
     packets = json.loads(rawdata)
     for key, encoding, data in packets:
         if self.lastReceived >= key:
             continue
         self.lastReceived = key
         if encoding == 1:
             # the python 2.6 json library decodes JSON strings to
             # unicode, while simplejson decodes JSON strings to
             # str. since, at this point, the data should have
             # been base64 encoded anyway, we can just cast
             # the data to a string and call it a day.
             data = base64.urlsafe_b64decode(str(data) + '==')
         self.protocol.dataReceived(data)
Exemplo n.º 33
0
    def _get_stats(self, url, instance):
        "Hit a given URL and return the parsed json"
        self.log.debug('Fetching Couchbase stats at url: %s' % url)
        req = urllib2.Request(url, None, headers(self.agentConfig))
        if 'user' in instance and 'password' in instance:
            add_basic_auth(req, instance['user'], instance['password'])

        if instance['is_recent_python']:
            timeout = instance.get('timeout', DEFAULT_TIMEOUT)
            request = urllib2.urlopen(req, timeout=timeout)
        else:
            request = urllib2.urlopen(req)

        response = request.read()
        return json.loads(response)
Exemplo n.º 34
0
    def _get_stats(self, url, instance):
        "Hit a given URL and return the parsed json"
        self.log.debug('Fetching Couchbase stats at url: %s' % url)
        req = urllib2.Request(url, None, headers(self.agentConfig))
        if 'user' in instance and 'password' in instance:
            add_basic_auth(req, instance['user'], instance['password'])

        if instance['is_recent_python']:
            timeout = instance.get('timeout' , DEFAULT_TIMEOUT)
            request = urllib2.urlopen(req,timeout=timeout)
        else:
            request = urllib2.urlopen(req)

        response = request.read()
        return json.loads(response)
Exemplo n.º 35
0
 def __parse_record(self, respJson):
     """ 解析 get_record 返回的 json 包
     """
     r = respJson["runrecord"]
     r["altitude"] = json.loads(r["altitude"])
     r["heartrate"] = json.loads(r["heartrate"])
     r["stepcontent"] = [[json.loads(y) for y in x] for x in json.loads(r["stepcontent"])]
     r["stepremark"] = json.loads(r["stepremark"])
     r["content"] = [json.loads(x) for x in r["content"].split("-")]
     return respJson
Exemplo n.º 36
0
    def _get_primary_addr(self, agentConfig, url, node_name):
        ''' Returns a list of primary interface addresses as seen by ES.
        Used in ES < 0.19
        '''
        req = urllib2.Request(url, None, headers(agentConfig))
        request = urllib2.urlopen(req)
        response = request.read()
        data = json.loads(response)

        if node_name in data['nodes']:
            node = data['nodes'][node_name]
            if 'network' in node\
            and 'primary_interface' in node['network']\
            and 'address' in node['network']['primary_interface']:
                return node['network']['primary_interface']['address']

        raise NodeNotFound()
Exemplo n.º 37
0
    def _get_primary_addr(self, agentConfig, url, node_name):
        ''' Returns a list of primary interface addresses as seen by ES.
        Used in ES < 0.19
        '''
        req = urllib2.Request(url, None, headers(agentConfig))
        request = urllib2.urlopen(req)
        response = request.read()
        data = json.loads(response)

        if node_name in data['nodes']:
            node = data['nodes'][node_name]
            if 'network' in node\
            and 'primary_interface' in node['network']\
            and 'address' in node['network']['primary_interface']:
                return node['network']['primary_interface']['address']

        raise NodeNotFound()
Exemplo n.º 38
0
    def _get_primary_addr(self, agentConfig, url, node_name):
        """ Returns a list of primary interface addresses as seen by ES.
        Used in ES < 0.19
        """
        req = urllib2.Request(url, None, headers(agentConfig))
        request = urllib2.urlopen(req)
        response = request.read()
        data = json.loads(response)

        if node_name in data["nodes"]:
            node = data["nodes"][node_name]
            if (
                "network" in node
                and "primary_interface" in node["network"]
                and "address" in node["network"]["primary_interface"]
            ):
                return node["network"]["primary_interface"]["address"]

        raise NodeNotFound()
Exemplo n.º 39
0
class DockerDaemonGauge(AgentCheck):
    def __init__(self, name, init_config, agentConfig, instances=None):
        AgentCheck.__init__(self, name, init_config, agentConfig, instances)
        # Initialize a HTTP opener with Unix socket support
        socket_timeout = int(init_config.get('socket_timeout', 0)) \
                         or DEFAULT_SOCKET_TIMEOUT
        UnixHTTPConnection.socket_timeout = socket_timeout
        self.url_opener = urllib2.build_opener(UnixSocketHandler())

    def check(self, instance):
        start = datetime.now()
        self._get_json("%(url)s/containers/json" % instance)
        end = datetime.now()
        time_msec = (end - start).total_seconds() * 1000
        self.gauge('docker.daemon.response_time', time_msec)

    def _get_json(self, uri, params=None, multi=False):
        """Utility method to get and parse JSON streams."""
        if params:
            uri = "%s?%s" % (uri, urllib.urlencode(params))
        self.log.debug("Connecting to Docker API at: %s" % uri)
        req = urllib2.Request(uri, None)
        try:
            request = self.url_opener.open(req)
        except urllib2.URLError, e:
            if "Errno 13" in str(e):
                raise Exception("Unable to connect to socket. dd-agent user "
                                "must be part of the 'docker' group")
            raise
        response = request.read()
        # Some Docker API versions occassionally send newlines in responses
        response = response.replace('\n', '')
        self.log.debug('Docker API response: %s', response)
        # docker api sometimes returns juxtaposed json dictionaries
        if multi and "}{" in response:
            response = "[{0}]".format(response.replace("}{", "},{"))
        if not response:
            return []
        try:
            return json.loads(response)
        except Exception as e:
            self.log.error('Failed to parse Docker API response: %s', response)
            raise DockerJSONDecodeError
Exemplo n.º 40
0
    def _get_primary_addr(self, url, node_name, auth):
        """ Returns a list of primary interface addresses as seen by ES.
            Used in ES < 0.19
        """
        req = urllib2.Request(url, None, headers(self.agentConfig))
        # Load basic authentication configuration, if available.
        if auth:
            add_basic_auth(req, *auth)
        request = urllib2.urlopen(req)
        response = request.read()
        data = json.loads(response)

        if node_name in data['nodes']:
            node = data['nodes'][node_name]
            if 'network' in node\
            and 'primary_interface' in node['network']\
            and 'address' in node['network']['primary_interface']:
                return node['network']['primary_interface']['address']

        raise NodeNotFound()
Exemplo n.º 41
0
    def _get_primary_addr(self, url, node_name, auth):
        """ Returns a list of primary interface addresses as seen by ES.
            Used in ES < 0.19
        """
        req = urllib2.Request(url, None, headers(self.agentConfig))
        # Load basic authentication configuration, if available.
        if auth:
            add_basic_auth(req, *auth)
        request = urllib2.urlopen(req)
        response = request.read()
        data = json.loads(response)

        if node_name in data['nodes']:
            node = data['nodes'][node_name]
            if 'network' in node\
            and 'primary_interface' in node['network']\
            and 'address' in node['network']['primary_interface']:
                return node['network']['primary_interface']['address']

        raise NodeNotFound()
Exemplo n.º 42
0
 def post(self):
     try:
         payload = json.loads(zlib.decompress(self.request.body))
     except Exception:
         return
     agent_update(payload)
Exemplo n.º 43
0
        except urllib2.URLError, e:
            if "Errno 13" in str(e):
                raise Exception("Unable to connect to socket. sd-agent user must be part of the 'docker' group")
            raise

        response = request.read()
        response = response.replace('\n', '') # Some Docker API versions occassionally send newlines in responses
        self.log.debug('Docker API response: %s', response)
        if multi and "}{" in response: # docker api sometimes returns juxtaposed json dictionaries
            response = "[{0}]".format(response.replace("}{", "},{"))

        if not response:
            return []

        try:
            return json.loads(response)
        except Exception as e:
            self.log.error('Failed to parse Docker API response: %s', response)
            raise DockerJSONDecodeError

    # Cgroups

    def _find_cgroup_filename_pattern(self):
        if self._mountpoints:
            # We try with different cgroups so that it works even if only one is properly working
            for mountpoint in self._mountpoints.values():
                stat_file_path_lxc = os.path.join(mountpoint, "lxc")
                stat_file_path_docker = os.path.join(mountpoint, "docker")
                stat_file_path_coreos = os.path.join(mountpoint, "system.slice")

                if os.path.exists(stat_file_path_lxc):
Exemplo n.º 44
0
 def post(self):
     try:
         payload = json.loads(zlib.decompress(self.request.body))
     except Exception:
         return
     agent_update(payload)
Exemplo n.º 45
0
                )
            raise

        response = request.read()
        response = response.replace(
            '\n', ''
        )  # Some Docker API versions occassionally send newlines in responses
        self.log.debug('Docker API response: %s', response)
        if multi and "}{" in response:  # docker api sometimes returns juxtaposed json dictionaries
            response = "[{0}]".format(response.replace("}{", "},{"))

        if not response:
            return []

        try:
            return json.loads(response)
        except Exception as e:
            self.log.error('Failed to parse Docker API response: %s', response)
            raise DockerJSONDecodeError

    # Cgroups

    def _find_cgroup_filename_pattern(self):
        if self._mountpoints:
            # We try with different cgroups so that it works even if only one is properly working
            for mountpoint in self._mountpoints.values():
                stat_file_path_lxc = os.path.join(mountpoint, "lxc")
                stat_file_path_docker = os.path.join(mountpoint, "docker")
                stat_file_path_coreos = os.path.join(mountpoint,
                                                     "system.slice")
Exemplo n.º 46
0
 def _get_data(self, url):
     try:
         data = json.loads(urllib2.urlopen(url).read())
     except urllib2.URLError, e:
         raise Exception('Cannot open RabbitMQ API url: %s %s' % (url, str(e)))
Exemplo n.º 47
0
class Docker(AgentCheck):
    def __init__(self, name, init_config, agentConfig):
        AgentCheck.__init__(self, name, init_config, agentConfig)
        self._mountpoints = {}
        docker_root = init_config.get('docker_root', '/')
        for metric in CGROUP_METRICS:
            self._mountpoints[metric["cgroup"]] = self._find_cgroup(metric["cgroup"], docker_root)
        self._last_event_collection_ts = defaultdict(lambda: None)
        self.url_opener = urllib2.build_opener(UnixSocketHandler())
        self.should_get_size = True
        self._cgroup_filename_pattern = None

    def _find_cgroup_filename_pattern(self):
        if self._mountpoints:
            # We try with different cgroups so that it works even if only one is properly working
            for mountpoint in self._mountpoints.values():
                stat_file_path_lxc = os.path.join(mountpoint, "lxc")
                stat_file_path_docker = os.path.join(mountpoint, "docker")
                stat_file_path_coreos = os.path.join(mountpoint, "system.slice")

                if os.path.exists(stat_file_path_lxc):
                    return os.path.join('%(mountpoint)s/lxc/%(id)s/%(file)s')
                elif os.path.exists(stat_file_path_docker):
                    return os.path.join('%(mountpoint)s/docker/%(id)s/%(file)s')
                elif os.path.exists(stat_file_path_coreos):
                    return os.path.join('%(mountpoint)s/system.slice/docker-%(id)s.scope/%(file)s')

        raise Exception("Cannot find Docker cgroup directory. Be sure your system is supported.")

    def _get_cgroup_file(self, cgroup, container_id, filename):
        # This can't be initialized at startup because cgroups may not be mounted
        if not self._cgroup_filename_pattern:
            self._cgroup_filename_pattern = self._find_cgroup_filename_pattern()

        return self._cgroup_filename_pattern % (dict(
                    mountpoint=self._mountpoints[cgroup],
                    id=container_id,
                    file=filename,
                ))

    def check(self, instance):
        try:
            self._process_events(self._get_events(instance))
        except (socket.timeout, urllib2.URLError):
            self.warning('Timeout during socket connection. Events will be missing.')

        self._count_images(instance)
        containers = self._get_and_count_containers(instance)

        max_containers = instance.get('max_containers', DEFAULT_MAX_CONTAINERS)

        if not instance.get("exclude") or not instance.get("include"):
            if len(containers) > max_containers:
                self.warning("Too many containers to collect. Please refine the containers to collect"
                    "by editing the configuration file. Truncating to %s containers" % max_containers)
                containers = containers[:max_containers]

        collected_containers = 0
        for container in containers:
            container_tags = instance.get("tags", [])
            for name in container["Names"]:
                container_tags.append(self._make_tag("name", name.lstrip("/")))
            for key in DOCKER_TAGS:
                container_tags.append(self._make_tag(key, container[key]))

            # Check if the container is included/excluded via its tags
            if not self._is_container_included(instance, container_tags):
                continue

            for key, (dd_key, metric_type) in DOCKER_METRICS.items():
                if key in container:
                    getattr(self, metric_type)(dd_key, int(container[key]), tags=container_tags)
            for cgroup in CGROUP_METRICS:
                stat_file = self._get_cgroup_file(cgroup["cgroup"], container['Id'], cgroup['file'])
                stats = self._parse_cgroup_file(stat_file)
                if stats:
                    for key, (dd_key, metric_type) in cgroup['metrics'].items():
                        if key.startswith('total_') and not instance.get('collect_total'):
                            continue
                        if key in stats:
                            getattr(self, metric_type)(dd_key, int(stats[key]), tags=container_tags)

            collected_containers += 1
            if collected_containers >= max_containers:
                self.warning("Too many containers are matching the current configuration. Some containers will not be collected. Please refine your configuration")
                break

    def _process_events(self, events):
        for ev in events:
            self.log.debug("Creating event for %s" % ev)
            self.event({
                'timestamp': ev['time'],
                'host': self.hostname,
                'event_type': EVENT_TYPE,
                'msg_title': "%s %s on %s" % (ev['from'], ev['status'], self.hostname),
                'source_type_name': EVENT_TYPE,
                'event_object': ev['from'],
            })

    def _count_images(self, instance):
        tags = instance.get("tags", [])
        active_images = len(self._get_images(instance, get_all=False))
        all_images = len(self._get_images(instance, get_all=True))

        self.gauge("docker.images.available", active_images, tags=tags)
        self.gauge("docker.images.intermediate", (all_images - active_images), tags=tags)

    def _get_and_count_containers(self, instance):
        tags = instance.get("tags", [])

        try:
            containers = self._get_containers(instance, with_size=self.should_get_size)
        except (socket.timeout, urllib2.URLError):
            # Probably because of: https://github.com/DataDog/dd-agent/issues/963
            # Then we should stop trying to get size info
            self.log.info("Cannot get container size because of API timeout. Stop collecting it.")
            self.should_get_size = False
            containers = self._get_containers(instance, with_size=self.should_get_size)

        if not containers:
            containers = []
            self.warning("No containers are running.")

        stopped_containers_count = len(self._get_containers(instance, get_all=True)) - len(containers)
        self.gauge("docker.containers.running", len(containers), tags=tags)
        self.gauge("docker.containers.stopped", stopped_containers_count, tags=tags)

        return containers


    def _make_tag(self, key, value):
        return "%s:%s" % (key.lower(), value.strip())

    def _is_container_included(self, instance, tags):
        def _is_tag_included(tag):
            for exclude_rule in instance.get("exclude") or []:
                if re.match(exclude_rule, tag):
                    for include_rule in instance.get("include") or []:
                        if re.match(include_rule, tag):
                            return True
                    return False
            return True
        for tag in tags:
            if _is_tag_included(tag):
                return True
        return False


    def _get_containers(self, instance, with_size=False, get_all=False):
        """Gets the list of running/all containers in Docker."""
        return self._get_json("%(url)s/containers/json" % instance, params={'size': with_size, 'all': get_all})

    def _get_images(self, instance, with_size=True, get_all=False):
        """Gets the list of images in Docker."""
        return self._get_json("%(url)s/images/json" % instance, params={'all': get_all})

    def _get_events(self, instance):
        """Get the list of events """
        now = int(time.time())
        result = self._get_json("%s/events" % instance["url"], params={
                "until": now,
                "since": self._last_event_collection_ts[instance["url"]] or now - 60,
            }, multi=True)
        self._last_event_collection_ts[instance["url"]] = now
        if type(result) == dict:
            result = [result]
        return result

    def _get_json(self, uri, params=None, multi=False):
        """Utility method to get and parse JSON streams."""
        if params:
            uri = "%s?%s" % (uri, urllib.urlencode(params))
        self.log.debug("Connecting to: %s" % uri)
        req = urllib2.Request(uri, None)

        service_check_name = 'docker.service_up'        
        service_check_tags = ['host:%s' % self.hostname]

        try:
            request = self.url_opener.open(req)
        except urllib2.URLError, e:            
            self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
            if "Errno 13" in str(e):
                raise Exception("Unable to connect to socket. dd-agent user must be part of the 'docker' group")
            raise

        self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)

        response = request.read()
        if multi and "}{" in response: # docker api sometimes returns juxtaposed json dictionaries
            response = "[{0}]".format(response.replace("}{", "},{"))

        if not response:
            return []

        return json.loads(response)
Exemplo n.º 48
0
        "comments": [
            {
                "date": new Date(2009,3,5,16,36,49), 
                "body": "Recommended by internet blowhards. ;)", 
                "id": "2de42bf6-1841-4259-96fc-81c61b51a754", 
                "user": {
                    "profileUrl": "http:\/\/friendfeed.com\/jeber", 
                    "nickname": "jeber", 
                    "id": "ab9c7c30-e89e-11dc-8447-003048343a40", 
                    "name": "Jack Carlson"
                }
            }
        ], 
        "link": "http:\/\/www.amazon.com\/Nature-Made-Vitamin-Premium-Tablets\/dp\/B00008I8NJ\/ref=sr_1_1?ie=UTF8&s=hpc&qid=1238948946&sr=1-1", 
        "likes": [], 
        "anonymous": false, 
        "published": new Date(2009,3,5,16,34,53), 
        "hidden": false, 
        "id": "1ca72391-dcec-49fd-898f-deea499b7367", 
        "user": {
            "profileUrl": "http:\/\/friendfeed.com\/davew", 
            "nickname": "davew", 
            "id": "8be56d9f-6650-4aee-8e15-e7791dfe7e66", 
            "name": "Dave Winer"
        }
    }
]
"""

obj = json.loads(stri)
Exemplo n.º 49
0
class Docker(AgentCheck):
    def __init__(self, *args, **kwargs):
        super(Docker, self).__init__(*args, **kwargs)
        self._mountpoints = {}
        self.cgroup_path_prefix = None  # Depending on the version
        for metric in LXC_METRICS:
            self._mountpoints[metric["cgroup"]] = self._find_cgroup(
                metric["cgroup"])
        self._path_prefix = None
        self._last_event_collection_ts = defaultdict(lambda: None)

    @property
    def path_prefix(self):
        if self._path_prefix is None:
            metric = LXC_METRICS[0]
            mountpoint = self._mountpoints[metric["cgroup"]]
            stat_file_lxc = os.path.join(mountpoint, "lxc")
            stat_file_docker = os.path.join(mountpoint, "docker")

            if os.path.exists(stat_file_lxc):
                self._path_prefix = "lxc"
            elif os.path.exists(stat_file_docker):
                self._path_prefix = "docker"
            else:
                raise Exception(
                    "Cannot find Docker cgroup file. If you are using Docker 0.9 or 0.10, it is a known bug in Docker fixed in Docker 0.11"
                )
        return self._path_prefix

    def check(self, instance):
        urllib2.install_opener(
            urllib2.build_opener(UnixSocketHandler())
        )  # We need to reinstall the opener every time as it gets uninstalled
        tags = instance.get("tags") or []

        try:
            self._process_events(self._get_events(instance))
        except socket.timeout:
            self.warning(
                'Timeout during socket connection. Events will be missing.')

        try:
            containers = self._get_containers(instance)
        except socket.timeout:
            raise Exception(
                'Cannot get containers list: timeout during socket connection. Try to refine the containers to collect by editing the configuration file.'
            )

        if not containers:
            self.gauge("docker.containers.running", 0)
            raise Exception("No containers are running.")

        self.gauge("docker.containers.running", len(containers))

        max_containers = instance.get('max_containers', DEFAULT_MAX_CONTAINERS)

        if not instance.get("exclude") or not instance.get("include"):
            if len(containers) > max_containers:
                self.warning(
                    "Too many containers to collect. Please refine the containers to collect by editing the configuration file. Truncating to %s containers"
                    % max_containers)
                containers = containers[:max_containers]

        collected_containers = 0
        for container in containers:
            container_tags = list(tags)
            for name in container["Names"]:
                container_tags.append(self._make_tag("name", name.lstrip("/")))
            for key in DOCKER_TAGS:
                container_tags.append(self._make_tag(key, container[key]))

            # Check if the container is included/excluded via its tags
            if not self._is_container_included(instance, container_tags):
                continue

            collected_containers += 1
            if collected_containers > max_containers:
                self.warning(
                    "Too many containers are matching the current configuration. Some containers will not be collected. Please refine your configuration"
                )
                break

            for key, (dd_key, metric_type) in DOCKER_METRICS.items():
                if key in container:
                    getattr(self, metric_type)(dd_key,
                                               int(container[key]),
                                               tags=container_tags)
            for metric in LXC_METRICS:
                mountpoint = self._mountpoints[metric["cgroup"]]
                stat_file = os.path.join(
                    mountpoint,
                    metric["file"] % (self.path_prefix, container["Id"]))
                stats = self._parse_cgroup_file(stat_file)
                for key, (dd_key, metric_type) in metric["metrics"].items():
                    if key.startswith(
                            "total_") and not instance.get("collect_total"):
                        continue
                    if key in stats:
                        getattr(self, metric_type)(dd_key,
                                                   int(stats[key]),
                                                   tags=container_tags)

    def _process_events(self, events):
        for ev in events:
            self.log.debug("Creating event for %s" % ev)
            self.event({
                'timestamp':
                ev['time'],
                'host':
                self.hostname,
                'event_type':
                EVENT_TYPE,
                'msg_title':
                "%s %s on %s" % (ev['from'], ev['status'], self.hostname),
                'source_type_name':
                EVENT_TYPE,
                'event_object':
                ev['from'],
            })

    def _make_tag(self, key, value):
        return "%s:%s" % (key.lower(), value.strip())

    def _is_container_included(self, instance, tags):
        def _is_tag_included(tag):
            for exclude_rule in instance.get("exclude") or []:
                if re.match(exclude_rule, tag):
                    for include_rule in instance.get("include") or []:
                        if re.match(include_rule, tag):
                            return True
                    return False
            return True

        for tag in tags:
            if _is_tag_included(tag):
                return True
        return False

    def _get_containers(self, instance):
        """Gets the list of running containers in Docker."""
        return self._get_json("%(url)s/containers/json" % instance,
                              params={"size": 1})

    def _get_container(self, instance, cid):
        """Get container information from Docker, gived a container Id."""
        return self._get_json("%s/containers/%s/json" % (instance["url"], cid))

    def _get_events(self, instance):
        """Get the list of events """
        now = int(time.time())
        result = self._get_json(
            "%s/events" % instance["url"],
            params={
                "until":
                now,
                "since":
                self._last_event_collection_ts[instance["url"]] or now - 60,
            },
            multi=True)
        self._last_event_collection_ts[instance["url"]] = now
        if type(result) == dict:
            result = [result]
        return result

    def _get_json(self, uri, params=None, multi=False):
        """Utility method to get and parse JSON streams."""
        if params:
            uri = "%s?%s" % (uri, urllib.urlencode(params))
        self.log.debug("Connecting to: %s" % uri)
        req = urllib2.Request(uri, None)
        try:
            request = urllib2.urlopen(req)
        except urllib2.URLError, e:
            if "Errno 13" in str(e):
                raise Exception(
                    "Unable to connect to socket. dd-agent user must be part of the 'docker' group"
                )
            raise
        response = request.read()
        if multi and "}{" in response:  # docker api sometimes returns juxtaposed json dictionaries
            response = "[{0}]".format(response.replace("}{", "},{"))

        if not response:
            return []

        return json.loads(response)
Exemplo n.º 50
0
        except socket.timeout, e:
            self.timeout_event(url, timeout, aggregation_key)
            return

        except socket.error, e:
            self.timeout_event(url, timeout, aggregation_key)
            return

        except HttpLib2Error, e:
            self.timeout_event(url, timeout, aggregation_key)
            return

        if resp.status != 200:
            self.status_code_event(url, r, aggregation_key)

        stats = json.loads(content)

        [self.gauge("riak." + k, stats[k]) for k in self.keys if k in stats]

        coord_redirs_total = stats["coord_redirs_total"]
        if self.prev_coord_redirs_total > -1:
            count = coord_redirs_total - self.prev_coord_redirs_total
            self.gauge('riak.coord_redirs', count)

        self.prev_coord_redirs_total = coord_redirs_total

    def timeout_event(self, url, timeout, aggregation_key):
        self.event({
            'timestamp':
            int(time.time()),
            'event_type':
Exemplo n.º 51
0
async def _to_json(data):
    try:
        content = json.loads(data)
        return content
    except json.JSONDecodeError:
        raise json.JSONDecodeError('Error decoding data', data, 0)
Exemplo n.º 52
0
def _get_data(agentConfig, url):
    "Hit a given URL and return the parsed json"
    req = urllib2.Request(url, None, headers(agentConfig))
    request = urllib2.urlopen(req)
    response = request.read()
    return json.loads(response)
Exemplo n.º 53
0
def _get_data(agentConfig, url):
    "Hit a given URL and return the parsed json"
    req = urllib2.Request(url, None, headers(agentConfig))
    request = urllib2.urlopen(req)
    response = request.read()
    return json.loads(response)
Exemplo n.º 54
0
class Docker(AgentCheck):
    def __init__(self, *args, **kwargs):
        super(Docker, self).__init__(*args, **kwargs)
        urllib2.install_opener(urllib2.build_opener(UnixSocketHandler()))
        self._mounpoints = {}
        for metric in LXC_METRICS:
            self._mounpoints[metric["cgroup"]] = self._find_cgroup(metric["cgroup"])

    def check(self, instance):
        tags = instance.get("tags") or []
        containers = self._get_containers(instance)
        if not containers:
            self.warning("No containers are running.")

        max_containers = instance.get('max_containers', DEFAULT_MAX_CONTAINERS)

        if not instance.get("exclude") or not instance.get("include"):
            if len(containers) > max_containers:
                self.warning("Too many containers to collect. Please refine the containers to collect by editing the configuration file. Truncating to %s containers" % max_containers)
                containers = containers[:max_containers]

        collected_containers = 0
        for container in containers:
            container_tags = list(tags)
            for name in container["Names"]:
                container_tags.append(self._make_tag("name", name.lstrip("/")))
            for key in DOCKER_TAGS:
                container_tags.append(self._make_tag(key, container[key]))

            # Check if the container is included/excluded via its tags
            if not self._is_container_included(instance, container_tags):
                continue

            collected_containers += 1
            if collected_containers > max_containers:
                self.warning("Too many containers are matching the current configuration. Some containers will not be collected. Please refine your configuration")
                break

            for key, (dd_key, metric_type) in DOCKER_METRICS.items():
                if key in container:
                    getattr(self, metric_type)(dd_key, int(container[key]), tags=container_tags)
            for metric in LXC_METRICS:
                mountpoint = self._mounpoints[metric["cgroup"]]
                stat_file = os.path.join(mountpoint, metric["file"] % container["Id"])
                stats = self._parse_cgroup_file(stat_file)
                for key, (dd_key, metric_type) in metric["metrics"].items():
                    if key in stats:
                        getattr(self, metric_type)(dd_key, int(stats[key]), tags=container_tags)

    def _make_tag(self, key, value):
        return "%s:%s" % (key.lower(), value.strip())

    def _is_container_included(self, instance, tags):
        def _is_tag_included(tag):
            for exclude_rule in instance.get("exclude") or []:
                if re.match(exclude_rule, tag):
                    for include_rule in instance.get("include") or []:
                        if re.match(include_rule, tag):
                            return True
                    return False
            return True
        for tag in tags:
            if _is_tag_included(tag):
                return True
        return False

    def _get_containers(self, instance):
        """Gets the list of running containers in Docker."""
        return self._get_json("%(url)s/containers/json" % instance, params={"size": 1})

    def _get_container(self, instance, cid):
        """Get container information from Docker, gived a container Id."""
        return self._get_json("%s/containers/%s/json" % (instance["url"], cid))

    def _get_json(self, uri, params=None):
        """Utility method to get and parse JSON streams."""
        if params:
            uri = "%s?%s" % (uri, urllib.urlencode(params))
        self.log.debug("Connecting to: %s" % uri)
        req = urllib2.Request(uri, None)
        try:
            request = urllib2.urlopen(req)
        except urllib2.URLError, e:
            if "Errno 13" in str(e):
                raise Exception("Unable to connect to socket. dd-agent user must be part of the 'docker' group")
            raise
        response = request.read()
        return json.loads(response)
Exemplo n.º 55
0
        if values_only:
            cmd = "%s -v true" % cmd

        try:
            self._jmx.sendline(cmd)
            self._wait_prompt()
            content = self._jmx.before.replace(cmd, '').strip()
        except ExceptionPexpect, e:
            self.log.critical(
                "POPEN error while dumping data. \n JMX Connector will be relaunched  \n %s"
                % str(e))
            self.terminate()
            raise

        try:
            jsonvar = json.loads(content)
        except Exception, e:
            self.log.error(
                "Couldn't decode JSON %s. %s \n JMX Connector will be relaunched"
                % (str(e), content))
            self.terminate()
            raise

        return jsonvar


class JMXMetric:
    def __init__(self,
                 instance,
                 init_config,
                 bean_name,
Exemplo n.º 56
0
        except socket.timeout, e:
            self.timeout_event(url, timeout, aggregation_key)
            return

        except socket.error, e:
            self.timeout_event(url, timeout, aggregation_key)
            return

        except HttpLib2Error, e:
            self.timeout_event(url, timeout, aggregation_key)
            return

        if resp.status != 200:
            self.status_code_event(url, r, aggregation_key)

        stats = json.loads(content)

        [self.gauge("riak." + k, stats[k]) for k in self.keys if k in stats]

        coord_redirs_total = stats["coord_redirs_total"]
        if self.prev_coord_redirs_total > -1:
            count = coord_redirs_total - self.prev_coord_redirs_total
            self.gauge('riak.coord_redirs', count)

        self.prev_coord_redirs_total = coord_redirs_total

        def timeout_event(self, url, timeout, aggregation_key):
            self.event({
                'timestamp': int(time.time()),
                'event_type': 'riak_check',
                'msg_title': 'riak check timeout',
Exemplo n.º 57
0
        if domain is not None:
            cmd = "%s -d %s" % (cmd, domain)
        if values_only:
            cmd = "%s -v true" % cmd
        
        try:
            self._jmx.sendline(cmd)
            self._wait_prompt()
            content = self._jmx.before.replace(cmd,'').strip()
        except BaseException, e:
            self.log.critical("POPEN error while dumping data. \n JMX Connector will be relaunched  \n %s" % str(e))
            self.terminate()
            raise

        try:
            jsonvar = json.loads(content)
        except Exception, e:
            self.log.error("Couldn't decode JSON %s. %s \n JMX Connector will be relaunched" % (str(e), content))
            self.terminate()
            raise

        return jsonvar

class JMXMetric:


    def __init__(self, instance, init_config, bean_name, attribute_name, attribute_value, 
        tags={}, name_suffix=None):
        if name_suffix is not None:
            attribute_name = "%s.%s" % (attribute_name, name_suffix)
Exemplo n.º 58
0
 def _get_data(self, url):
     try:
         data = json.loads(urllib2.urlopen(url).read())
     except urllib2.URLError, e:
         raise Exception('Cannot open RabbitMQ API url: %s %s' %
                         (url, str(e)))