def optimalWriteOrder(): "Generates metrics with the most cached values first and applies a soft rate limit on new metrics" global lastCreateInterval global createCount metrics = MetricCache.counts() t = time.time() metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t)) for metric, queueSize in metrics: if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: events.cacheSpaceAvailable() # Let our persister do its own check, and ignore the metric if needed. if not persister.pre_get_datapoints_check(metric): continue try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store() datapoints = MetricCache.pop(metric) except KeyError: log.msg("MetricCache contention, skipping %s update for now" % metric) continue # we simply move on to the next metric when this race condition occurs dbInfo = persister.get_dbinfo(metric) dbIdentifier = dbInfo[0] dbExists = dbInfo[1] yield (metric, datapoints, dbIdentifier, dbExists)
def queueSpaceCallback(self, result): if self.queueFull.called: log.clients('%s send queue has space available' % self.connectedProtocol) self.queueFull = Deferred() self.queueFull.addCallback(self.queueFullCallback) events.cacheSpaceAvailable() self.queueHasSpace = Deferred() self.queueHasSpace.addCallback(self.queueSpaceCallback)
def optimalWriteOrder(): "Generates metrics with the most cached values first and applies a soft rate limit on new metrics" global lastCreateInterval global createCount metrics = MetricCache.counts() t = time.time() #metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t)) if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: events.cacheSpaceAvailable() for metric, queueSize in metrics: #Handling special characters in metric names metric_sanit = list(metric) for i in range(0,len(metric_sanit),1): if metric_sanit[i] < '\x20' or metric_sanit[i] > '\x7e': metric_sanit[i] = '_' metric_sanit = "".join(metric_sanit) try: dbFilePath = getFilesystemPath(metric_sanit) dbFileExists = exists(dbFilePath) except: log.err() log.msg("dbFilePath: %s" % (dbFilePath)) continue if not dbFileExists: createCount += 1 now = time.time() if now - lastCreateInterval >= 60: lastCreateInterval = now createCount = 1 elif createCount >= settings.MAX_CREATES_PER_MINUTE: # dropping queued up datapoints for new metrics prevents filling up the entire cache # when a bunch of new metrics are received. try: MetricCache.pop(metric) except KeyError: pass continue try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store() datapoints = MetricCache.pop(metric) except KeyError: log.msg("MetricCache contention, skipping %s update for now" % metric) continue # we simply move on to the next metric when this race condition occurs yield (metric_sanit, datapoints, dbFilePath, dbFileExists)
def optimalWriteOrder(): log.msg("Entered optimalWriteOrder") metrics = MetricCache.counts() t = time.time() metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t)) for metric, queueSize in metrics: if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: events.cacheSpaceAvailable() try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store() datapoints = MetricCache.pop(metric) except KeyError: log.msg("MetricCache contention, skipping %s update for now" % metric) continue # we simply move on to the next metric when this race condition occurs yield (metric, datapoints)
def optimalWriteOrder(): """Generates metrics with the most cached values first and applies a soft rate limit on new metrics""" global lastCreateInterval global createCount metrics = MetricCache.counts() t = time.time() metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending log.debug("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t)) for metric, queueSize in metrics: if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: events.cacheSpaceAvailable() dbFilePath = getFilesystemPath(metric) dbFileExists = exists(dbFilePath) if not dbFileExists: createCount += 1 now = time.time() if now - lastCreateInterval >= 60: lastCreateInterval = now createCount = 1 elif createCount >= settings.MAX_CREATES_PER_MINUTE: # dropping queued up datapoints for new metrics prevents filling up the entire cache # when a bunch of new metrics are received. try: MetricCache.pop(metric) except KeyError: pass continue try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store() datapoints = MetricCache.pop(metric) except KeyError: log.msg("MetricCache contention, skipping %s update for now" % metric) continue # we simply move on to the next metric when this race condition occurs yield (metric, datapoints, dbFilePath, dbFileExists)
def optimalWriteOrder(): """Generates metrics with the most cached values first and applies a soft rate limit on new metrics""" global lastCreateInterval global createCount metrics = MetricCache.counts() time_ = time.time() metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending log.debug("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - time_)) for metric, queueSize in metrics: if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: events.cacheSpaceAvailable() dbFileExists = APP_DB.exists(metric) if not dbFileExists: createCount += 1 now = time.time() if now - lastCreateInterval >= 60: lastCreateInterval = now createCount = 1 elif createCount >= settings.MAX_CREATES_PER_MINUTE: # dropping queued up datapoints for new metrics prevents filling up the entire cache # when a bunch of new metrics are received. try: MetricCache.pop(metric) except KeyError: pass continue try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store() datapoints = MetricCache.pop(metric) except KeyError: log.msg("MetricCache contention, skipping %s update for now" % metric) continue # we simply move on to the next metric when this race condition occurs yield (metric, datapoints, dbFileExists)
def optimalWriteOrder(): """Generates metrics with the most cached values first and applies a soft rate limit on new metrics""" while MetricCache: (metric, datapoints) = MetricCache.pop() if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: events.cacheSpaceAvailable() dbFilePath = getFilesystemPath(metric) dbFileExists = exists(dbFilePath) if not dbFileExists and CREATE_BUCKET: # If our tokenbucket has enough tokens available to create a new metric # file then yield the metric data to complete that operation. Otherwise # we'll just drop the metric on the ground and move on to the next # metric. # XXX This behavior should probably be configurable to no tdrop metrics # when rate limitng unless our cache is too big or some other legit # reason. if CREATE_BUCKET.drain(1): yield (metric, datapoints, dbFilePath, dbFileExists) continue yield (metric, datapoints, dbFilePath, dbFileExists)
def _check_available_space(self): if state.cacheTooFull and self.size < settings.CACHE_SIZE_LOW_WATERMARK: log.msg("MetricCache below watermark: self.size=%d" % self.size) events.cacheSpaceAvailable()