def stringReceived(self, rawRequest): request = self.unpickler.loads(rawRequest) if request['type'] == 'cache-query': metric = request['metric'] datapoints = MetricCache.get(metric, []) result = dict(datapoints=datapoints) if settings.LOG_CACHE_HITS: log.query('[%s] cache query for \"%s\" returned %d values' % (self.peerAddr, metric, len(datapoints))) instrumentation.increment('cacheQueries') elif request['type'] == 'cache-query-bulk': datapointsByMetric = {} metrics = request['metrics'] for metric in metrics: datapointsByMetric[metric] = MetricCache.get(metric, []) result = dict(datapointsByMetric=datapointsByMetric) if settings.LOG_CACHE_HITS: log.query('[%s] cache query bulk for \"%d\" metrics returned %d values' % (self.peerAddr, len(metrics), sum([len(datapoints) for datapoints in datapointsByMetric.values()]))) instrumentation.increment('cacheBulkQueries') instrumentation.append('cacheBulkQuerySize', len(metrics)) elif request['type'] == 'get-metadata': result = management.getMetadata(request['metric'], request['key']) elif request['type'] == 'set-metadata': result = management.setMetadata(request['metric'], request['key'], request['value']) else: result = dict(error="Invalid request type \"%s\"" % request['type']) response = pickle.dumps(result, protocol=-1) self.sendString(response)
def stringReceived(self, rawRequest): request = self.unpickler.loads(rawRequest) if request['type'] == 'cache-query': metric = request['metric'] datapoints = MetricCache.get(metric, {}).items() result = dict(datapoints=datapoints) if settings.LOG_CACHE_HITS: log.query('[%s] cache query for \"%s\" returned %d values' % (self.peerAddr, metric, len(datapoints))) instrumentation.increment('cacheQueries') elif request['type'] == 'cache-query-bulk': datapointsByMetric = {} metrics = request['metrics'] for metric in metrics: datapointsByMetric[metric] = MetricCache.get(metric, {}).items() result = dict(datapointsByMetric=datapointsByMetric) if settings.LOG_CACHE_HITS: log.query('[%s] cache query bulk for \"%d\" metrics returned %d values' % (self.peerAddr, len(metrics), sum([len(datapoints) for datapoints in datapointsByMetric.values()]))) instrumentation.increment('cacheBulkQueries') instrumentation.append('cacheBulkQuerySize', len(metrics)) elif request['type'] == 'get-metadata': result = management.getMetadata(request['metric'], request['key']) elif request['type'] == 'set-metadata': result = management.setMetadata(request['metric'], request['key'], request['value']) else: result = dict(error="Invalid request type \"%s\"" % request['type']) response = pickle.dumps(result, protocol=-1) self.sendString(response)
def run(self): global buckets last_values = {} sock = socket.socket() sock.connect((self.ip, self.port)) pp = pprint.PrettyPrinter(indent=4) while True: time.sleep(5) pp.pprint(buckets) # On recupere la liste de bucket et on supprime le dernier list_buckets = buckets.keys() try: if len(list_buckets) == 0: next list_buckets.pop() except Exception as e: self.logger.debug( '[PUBLISHER] Exception, le tableau buckets est vide') next # Pour chaque bucket restant for bucket in list_buckets: self.logger.debug('[PUBLISHER] Current bucket : %d' % (bucket)) current_bucket = buckets[bucket] # On calcule les aggregations definies pour cette metrique et on met a jour last_values for metric in current_bucket: self.logger.debug('[PUBLISHER] Current metric : %s' % (metric)) list_metrics = current_bucket[metric] for aggregation_method in self.aggregation_methods[metric]: self.logger.debug( '[PUBLISHER] Current aggregation method : %s' % (aggregation_method)) last_values[metric][aggregation_method] = globals( )[aggregation_method](list_metrics) self.logger.debug( '[PUBLISHER] Current result : %0.2f' % (last_values[metric][aggregation_method])) # On publie last_values sur graphite via le protocole pickle graphite_data = [] for metric in last_values: for x in ['min', 'avg', 'max']: path = '%s%s.%s' % (prefix, metric, x) timestamp = bucket value = last_values[metric][x] graphite_data.append([path, [timestamp, value]]) self.logger.debug('[PUBLISHER] %s %0.1f %d' % (path, value, timestamp)) payload = pickle.dumps(graphite_data, protocol=2) header = struct.pack("!L", len(payload)) message = header + payload sock.sendall(message) # On supprime le bucket pour qu'il ne soit pas retraite del buckets[bucket]
def sendQueued(self): while (not self.paused) and self.factory.hasQueuedDatapoints(): datapoints = self.factory.takeSomeFromQueue() self.sendString(pickle.dumps(datapoints, protocol=-1)) self.factory.checkQueue() instrumentation.increment(self.sent, len(datapoints)) if (settings.USE_FLOW_CONTROL and state.metricReceiversPaused and self.factory.queueSize < SEND_QUEUE_LOW_WATERMARK): log.clients( 'send queue has space available, resuming paused clients') events.resumeReceivingMetrics()
def sendQueued(self): while (not self.paused) and self.factory.hasQueuedDatapoints(): datapoints = self.factory.takeSomeFromQueue() self.sendString(pickle.dumps(datapoints, protocol=-1)) self.factory.checkQueue() instrumentation.increment(self.sent, len(datapoints)) if ( settings.USE_FLOW_CONTROL and state.metricReceiversPaused and self.factory.queueSize < SEND_QUEUE_LOW_WATERMARK ): log.clients("send queue has space available, resuming paused clients") events.resumeReceivingMetrics()
def sendDatapoint(self, metric, datapoint): if self.paused: self.factory.enqueue(metric, datapoint) instrumentation.increment(self.queuedUntilReady) elif self.factory.hasQueuedDatapoints(): self.factory.enqueue(metric, datapoint) self.sendQueued() else: datapoints = [(metric, datapoint)] self.sendString(pickle.dumps(datapoints, protocol=-1)) instrumentation.increment(self.sent) self.factory.checkQueue()
def stringReceived(self, rawRequest): request = self.unpickler.loads(rawRequest) if request["type"] == "cache-query": metric = request["metric"] datapoints = MetricCache.get(metric, []) result = dict(datapoints=datapoints) log.query('[%s] cache query for "%s" returned %d values' % (self.peerAddr, metric, len(datapoints))) instrumentation.increment("cacheQueries") # elif request['type'] == 'get-metadata': # result = management.getMetadata(request['metric'], request['key']) # elif request['type'] == 'set-metadata': # result = management.setMetadata(request['metric'], request['key'], request['value']) else: result = dict(error='Invalid request type "%s"' % request["type"]) response = pickle.dumps(result, protocol=-1) self.sendString(response)
def stringReceived(self, rawRequest): request = self.unpickler.loads(rawRequest) if request['type'] == 'cache-query': metric = request['metric'] datapoints = MetricCache.get(metric, []) result = dict(datapoints=datapoints) log.query('[%s] cache query for \"%s\" returned %d values' % (self.peerAddr, metric, len(datapoints))) instrumentation.increment('cacheQueries') elif request['type'] == 'get-metadata': result = management.getMetadata(request['metric'], request['key']) elif request['type'] == 'set-metadata': result = management.setMetadata(request['metric'], request['key'], request['value']) else: result = dict(error="Invalid request type \"%s\"" % request['type']) response = pickle.dumps(result, protocol=-1) self.sendString(response)
def stringReceived(self, rawRequest): request = self.unpickler.loads(rawRequest) if request["type"] == "cache-query": metric = request["metric"] datapoints = MetricCache.get(metric, []) result = dict(datapoints=datapoints) if settings.LOG_CACHE_HITS: log.query('[%s] cache query for "%s" returned %d values' % (self.peerAddr, metric, len(datapoints))) instrumentation.increment("cacheQueries") elif request["type"] == "cache-query-bulk": datapointsByMetric = {} metrics = request["metrics"] for metric in metrics: datapointsByMetric[metric] = MetricCache.get(metric, []) result = dict(datapointsByMetric=datapointsByMetric) if settings.LOG_CACHE_HITS: log.query( '[%s] cache query bulk for "%d" metrics returned %d values' % ( self.peerAddr, len(metrics), sum([len(datapoints) for datapoints in datapointsByMetric.values()]), ) ) instrumentation.increment("cacheBulkQueries") instrumentation.append("cacheBulkQuerySize", len(metrics)) elif request["type"] == "get-metadata": result = management.getMetadata(request["metric"], request["key"]) elif request["type"] == "set-metadata": result = management.setMetadata(request["metric"], request["key"], request["value"]) else: result = dict(error='Invalid request type "%s"' % request["type"]) response = pickle.dumps(result, protocol=-1) self.sendString(response)
def stringReceived(self, rawRequest): request = self.unpickler.loads(rawRequest) if request['type'] == 'cache-query': metric = request['metric'] datapoints = MetricCache.get(metric, []) result = dict(datapoints=datapoints) if settings.LOG_CACHE_HITS is True: log.query('[%s] cache query for \"%s\" returned %d values' % (self.peerAddr, metric, len(datapoints))) instrumentation.increment('cacheQueries') elif request['type'] == 'get-metadata': result = management.getMetadata(request['metric'], request['key']) elif request['type'] == 'set-metadata': result = management.setMetadata(request['metric'], request['key'], request['value']) else: result = dict(error="Invalid request type \"%s\"" % request['type']) response = pickle.dumps(result, protocol=-1) self.sendString(response)
def _sendDatapoints(self, datapoints): self.sendString(pickle.dumps(datapoints, protocol=-1)) instrumentation.increment(self.sent, len(datapoints)) instrumentation.increment(self.batchesSent) self.factory.checkQueue()
def _sendDatapointsNow(self, datapoints): self.sendString(pickle.dumps(datapoints, protocol=2))
def _sendDatapointsNow(self, datapoints): self.sendString(pickle.dumps(datapoints, protocol=-1))