def __init__(self, cluster_proxy): log.debug("Starting metric capture") self.stats = scales.collection('/cassandra', scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), scales.IntStat('read_timeouts'), scales.IntStat('unavailables'), scales.IntStat('other_errors'), scales.IntStat('retries'), scales.IntStat('ignores'), # gauges scales.Stat('known_hosts', lambda: len(cluster_proxy.metadata.all_hosts())), scales.Stat('connected_to', lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))), scales.Stat('open_connections', lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors self.write_timeouts = self.stats.write_timeouts self.read_timeouts = self.stats.read_timeouts self.unavailables = self.stats.unavailables self.other_errors = self.stats.other_errors self.retries = self.stats.retries self.ignores = self.stats.ignores self.known_hosts = self.stats.known_hosts self.connected_to = self.stats.connected_to self.open_connections = self.stats.open_connections
def __init__(self, cluster_proxy): log.debug("Starting metric capture") self.stats_name = 'cassandra-{0}'.format(str(self._stats_counter)) Metrics._stats_counter += 1 self.stats = scales.collection( self.stats_name, scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), scales.IntStat('read_timeouts'), scales.IntStat('unavailables'), scales.IntStat('other_errors'), scales.IntStat('retries'), scales.IntStat('ignores'), # gauges scales.Stat('known_hosts', lambda: len(cluster_proxy.metadata.all_hosts())), scales.Stat( 'connected_to', lambda: len( set( chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))), scales.Stat( 'open_connections', lambda: sum( sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) # TODO, to be removed in 4.0 # /cassandra contains the metrics of the first cluster registered if 'cassandra' not in scales._Stats.stats: scales._Stats.stats['cassandra'] = scales._Stats.stats[ self.stats_name] self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors self.write_timeouts = self.stats.write_timeouts self.read_timeouts = self.stats.read_timeouts self.unavailables = self.stats.unavailables self.other_errors = self.stats.other_errors self.retries = self.stats.retries self.ignores = self.stats.ignores self.known_hosts = self.stats.known_hosts self.connected_to = self.stats.connected_to self.open_connections = self.stats.open_connections
class DynamicRoot(object): """Root class with a dynamic stat.""" value = 100 dynamicStat = scales.Stat('dynamic') def __init__(self): scales.init(self) self.dynamicStat = lambda: DynamicRoot.value
class Child(object): """Child level test class.""" countStat = scales.IntStat('count') stateStat = scales.Stat('state') errorsStat = scales.IntDictStat('errors') def __init__(self, name='C'): scales.initChild(self, name)
class Root1(object): """Root level test class.""" stateStat = scales.Stat('state') errorsStat = scales.IntDictStat('errors') activeUrlsStat = scales.IntDictStat('activeUrls', autoDelete=True) def __init__(self): scales.init(self, 'path/to/A') def getChild(self, cls, *args): """Creates a child.""" return cls(*args)
def check_queue_lengths(self): while not self.should_stop: try: lengths = collections.Counter() with self.app.connection() as connection: pipe = connection.channel().client.pipeline( transaction=False) for queue in self.app.conf['task_queues']: if not hasattr(stats_queue, queue.name): setattr(type(stats_queue), queue.name, scales.Stat(queue.name)) # Not claimed by any worker yet pipe.llen(queue.name) # Claimed by worker but not acked/processed yet pipe.hvals('unacked') result = pipe.execute() unacked = result.pop() for task in unacked: task = json.loads(task.decode('utf-8')) lengths[task[-1]] += 1 unacked = [[-1] for v in unacked] unacked = len([x for x in unacked]) for llen, queue in zip(result, self.app.conf['task_queues']): lengths[queue.name] += llen for queue, length in lengths.items(): setattr(stats_queue, queue, length) time.sleep(self.queuelength_interval) except Exception: log.error( 'Uncaught exception, preventing thread from crashing.', exc_info=True)