class RequestAnalyzer(object): """ Class used to track request and error counts for a Session. Also computes statistics on encoded request size. """ requests = scales.PmfStat('request size') errors = scales.IntStat('errors') successful = scales.IntStat("success") # Throw exceptions when invoked. throw_on_success = False throw_on_fail = False def __init__(self, session, throw_on_success=False, throw_on_fail=False): scales.init(self, '/request') # each instance will be registered with a session, and receive a callback for each request generated session.add_request_init_listener(self.on_request) self.throw_on_fail = throw_on_fail self.throw_on_success = throw_on_success def on_request(self, rf): # This callback is invoked each time a request is created, on the thread creating the request. # We can use this to count events, or add callbacks rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf, ), errback_args=(rf, )) def on_success(self, _, response_future): # future callback on a successful request; just record the size self.requests.addValue(response_future.request_encoded_size) self.successful += 1 if self.throw_on_success: raise AttributeError def on_error(self, _, response_future): # future callback for failed; record size and increment errors self.requests.addValue(response_future.request_encoded_size) self.errors += 1 if self.throw_on_fail: raise AttributeError def remove_ra(self, session): session.remove_request_init_listener(self.on_request) def __str__(self): # just extracting request count from the size stats (which are recorded on all requests) request_sizes = dict(self.requests) count = request_sizes.pop('count') return "%d requests (%d errors)\nRequest size statistics:\n%s" % ( count, self.errors, pp.pformat(request_sizes))
class RegisterView(FlaskView): """ Handle signup """ client_errors = scales.IntStat('4xx') server_errors = scales.IntStat('5xx') latency = scales.PmfStat('latency') def __init__(self): super(RegisterView, self).__init__() scales.init(self, '/api/register') def index(self): return "Register", 200
class ProfileView(FlaskView): """ Shows personal profile """ client_errors = scales.IntStat('4xx') server_errors = scales.IntStat('5xx') latency = scales.PmfStat('latency') def __init__(self): super(ProfileView, self).__init__() scales.init(self, '/api/profile') self._users_backend = UsersBackend(config) def index(self): return "MyProfile", 200
class TypedChild(object): """Child level test class.""" countStat = scales.IntStat('count') def __init__(self): scales.initChildOfType(self, 'C')
def __init__(self, uri, config, masterGraph, mqtt, influx): self.uri = uri self.config = config self.masterGraph = masterGraph self.mqtt = mqtt self.influx = influx self.mqttTopic = self.topicFromConfig(self.config) statPath = '/subscribed_topic/' + self.mqttTopic.decode( 'ascii').replace('/', '|') scales.init(self, statPath) self._mqttStats = scales.collection(statPath + '/incoming', scales.IntStat('count'), scales.RecentFpsStat('fps')) rawBytes = self.subscribeMqtt() rawBytes = rx.operators.do_action(self.countIncomingMessage)(rawBytes) parsed = self.getParser()(rawBytes) g = self.config for conv in g.items(g.value(self.uri, ROOM['conversions'])): parsed = self.conversionStep(conv)(parsed) outputQuadsSets = rx.combine_latest(*[ self.makeQuads(parsed, plan) for plan in g.objects(self.uri, ROOM['graphStatements']) ]) outputQuadsSets.subscribe_(self.updateQuads)
class Child(object): """Child test class.""" countStat = scales.IntStat('count') def __init__(self, name, collapsed): scales.initChild(self, name).setCollapsed(collapsed)
def __init__(self, cluster_proxy): log.debug("Starting metric capture") self.stats = scales.collection('/cassandra', scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), scales.IntStat('read_timeouts'), scales.IntStat('unavailables'), scales.IntStat('other_errors'), scales.IntStat('retries'), scales.IntStat('ignores'), # gauges scales.Stat('known_hosts', lambda: len(cluster_proxy.metadata.all_hosts())), scales.Stat('connected_to', lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))), scales.Stat('open_connections', lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors self.write_timeouts = self.stats.write_timeouts self.read_timeouts = self.stats.read_timeouts self.unavailables = self.stats.unavailables self.other_errors = self.stats.other_errors self.retries = self.stats.retries self.ignores = self.stats.ignores self.known_hosts = self.stats.known_hosts self.connected_to = self.stats.connected_to self.open_connections = self.stats.open_connections
class Child(object): """Child level test class.""" countStat = scales.IntStat('count') stateStat = scales.Stat('state') errorsStat = scales.IntDictStat('errors') def __init__(self, name='C'): scales.initChild(self, name)
class UsersView(FlaskView): """ Users list view """ client_errors = scales.IntStat('4xx') server_errors = scales.IntStat('5xx') latency = scales.PmfStat('latency') def __init__(self): super(UsersView, self).__init__() scales.init(self, '/api/home') def index(self): return "UsersList", 200 def get(self, user_id): return "UserID: %s" % user_id, 200
class MediasView(FlaskView): """ Medias list view """ client_errors = scales.IntStat('4xx') server_errors = scales.IntStat('5xx') latency = scales.PmfStat('latency') def __init__(self): super(MediasView, self).__init__() scales.init(self, '/api/medias') self._media_backend = MediasBackend(config) self._image_processor = ImageProcessorService(config) self._video_processor = VideoProcessorService(config) def index(self): return "MediaList", 200 def get(self, media_id): """ Return the media passed in parameter :param media_id: :type media_id: str :return: """ return "MediaID: %s" % media_id, 200 def post(self, data): """ Handle new media :param data: the media to save/handle :return: """ raise NotImplementedError() def delete(self, media_id): """
def testCollection(self): """Tests for a stat collection.""" collection = scales.collection('/thePath', scales.IntStat('count'), scales.IntDictStat('histo')) collection.count += 100 collection.histo['cheese'] += 12300 collection.histo['cheese'] += 45 self.assertEquals( {'thePath': { 'count': 100, 'histo': { 'cheese': 12345 } }}, scales.getStats())
def __init__(self, cluster_proxy): log.debug("Starting metric capture") self.stats_name = 'cassandra-{0}'.format(str(self._stats_counter)) Metrics._stats_counter += 1 self.stats = scales.collection( self.stats_name, scales.PmfStat('request_timer'), scales.IntStat('connection_errors'), scales.IntStat('write_timeouts'), scales.IntStat('read_timeouts'), scales.IntStat('unavailables'), scales.IntStat('other_errors'), scales.IntStat('retries'), scales.IntStat('ignores'), # gauges scales.Stat('known_hosts', lambda: len(cluster_proxy.metadata.all_hosts())), scales.Stat( 'connected_to', lambda: len( set( chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))), scales.Stat( 'open_connections', lambda: sum( sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions))) # TODO, to be removed in 4.0 # /cassandra contains the metrics of the first cluster registered if 'cassandra' not in scales._Stats.stats: scales._Stats.stats['cassandra'] = scales._Stats.stats[ self.stats_name] self.request_timer = self.stats.request_timer self.connection_errors = self.stats.connection_errors self.write_timeouts = self.stats.write_timeouts self.read_timeouts = self.stats.read_timeouts self.unavailables = self.stats.unavailables self.other_errors = self.stats.other_errors self.retries = self.stats.retries self.ignores = self.stats.ignores self.known_hosts = self.stats.known_hosts self.connected_to = self.stats.connected_to self.open_connections = self.stats.open_connections
class MediasBackend(object): """ Controls media model and media data fetching from database """ requests = scales.IntStat('requests') latency = scales.PmfStat('latency') def __init__(self, config): """ :param config: :type config: application.common.tools.Map :return: """ scales.init(self, '/backend/users') self._connection = None self._db_name = config.mongo.db_name replica_set = config.mongo.replicaset_name if 'replicaset_name' in config.mongo else None self._connection = pymongo.MongoClient( config.mongo.uri, replicaSet=replica_set, maxPoolSize=config.mongo.max_pool_size, waitQueueMultiple=config.mongo.wait_queue_multiple, waitQueueTimeoutMS=config.mongo.wait_queue_timeout_ms, tz_aware=True) self._db = self._connection[self._db_name] self._medias = self._db[config.mongo.media_collection] def fetch_medias(self, user_id, start=0, max_size=20): """ Fetch all medias of user matching user_id :param user_id: :type user_id: int :param start: the document index from which we should fetch :type start: int :param max_size: the number of documents to fetch at a time :return: list of medias, else None :rtype: dict[str, T] | None """ raise NotImplementedError()
import threading import time from pyzabbix import ZabbixSender, ZabbixMetric from configparser import ConfigParser from io import StringIO try: import _thread as thread except ImportError: import thread log = logging.getLogger(__name__) stats = scales.collection( '/celery', scales.IntStat('started'), scales.IntStat('succeeded'), scales.IntStat('failed'), scales.IntStat('retried'), scales.PmfStat('queuetime'), scales.PmfStat('runtime'), ) stats_queue = type('Stats:queues', (object, ), {})() scales._Stats.initChild(stats_queue, 'queues', '', stats) def task_handler(fn): @wraps(fn) def wrapper(self, event): self.state.event(event) task = self.state.tasks.get(event['uuid'])
from __future__ import absolute_import, division, print_function from greplin import scales import json import pyramid.config import pytest import webtest STATS = scales.collection('/', scales.IntStat('errors')) STATS.errors += 1 @pytest.fixture def app(): config = pyramid.config.Configurator() config.include('pyramid_scales') return config.make_wsgi_app() @pytest.fixture def browser(app): return webtest.TestApp(app) def test_displays_metrics_as_html(browser): r = browser.get('/scales/', status=200) assert (b'<span class="key">errors</span> <span class="int">1</span>' in r.body) def test_displays_metrics_as_json(browser):
from twisted.internet import reactor, task import cyclone.web from greplin import scales from greplin.scales.cyclonehandler import StatsHandler from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler from standardservice.logsetup import log, verboseLogging DEV = Namespace("http://projects.bigasterisk.com/device/") ROOM = Namespace("http://projects.bigasterisk.com/room/") ctx = DEV['dhcp'] STATS = scales.collection( '/root', scales.PmfStat('readLeases'), scales.IntStat('filesDidntChange'), ) def timeLiteral(dt): return Literal(dt.replace(tzinfo=tzlocal()).isoformat(), datatype=XSD.dateTime) def macUri(macAddress: str) -> URIRef: return URIRef("http://bigasterisk.com/mac/%s" % macAddress.lower()) class Poller: def __init__(self, graph): self.graph = graph
import cyclone import logging, time, json, random, string, traceback from logsetup import log, enableTwistedLog from greplin import scales from greplin.scales.cyclonehandler import StatsHandler from export_to_influxdb import InfluxExporter from tags import NfcDevice, FakeNfc, NfcError, AuthFailedError ROOM = Namespace('http://projects.bigasterisk.com/room/') ctx = ROOM['frontDoorWindowRfidCtx'] STATS = scales.collection( '/root', scales.PmfStat('cardReadPoll'), scales.IntStat('newCardReads'), ) class OutputPage(cyclone.web.RequestHandler): def put(self): arg = self.request.arguments if arg.get('s') and arg.get('p'): self._onQueryStringStatement(arg['s'][-1], arg['p'][-1], self.request.body) else: self._onGraphBodyStatements(self.request.body, self.request.headers) post = put
class UsersBackend(object): """ Controls user model and user data fetching from database """ requests = scales.IntStat('requests') latency = scales.PmfStat('latency') def __init__(self, config): """ :param config: :type config: application.common.tools.Map :return: """ scales.init(self, '/backend/users') self._connection = None self._db_name = config.mongo.db_name replica_set = config.mongo.replicaset_name if 'replicaset_name' in config.mongo else None self._connection = pymongo.MongoClient( config.mongo.uri, replicaSet=replica_set, maxPoolSize=config.mongo.max_pool_size, waitQueueMultiple=config.mongo.wait_queue_multiple, waitQueueTimeoutMS=config.mongo.wait_queue_timeout_ms, tz_aware=True) self._db = self._connection[self._db_name] self._users = self._db[config.mongo.user_collection] def register(self, username, email, password): """ Register the new user :param username: :type username: str :param email: :type email: str :param password: :type password: str :return: :rtype: None :raise DuplicateUser: the username or email already exists """ raise NotImplementedError() def authenticate(self, username, password): """ Validate login/password of a user :param username: :type username: str :param password: :type password: str :return: the user id if authenticated, else None :rtype: bool | None """ raise NotImplementedError() def fetch_user(self, user_id): """ Fetch user_id informations :param user_id: :type user_id: int :return: informations about the user if exists, else None :rtype: dict[str, T] | None """ raise NotImplementedError()
from twisted.python.failure import Failure from twisted.python.filepath import FilePath import cyclone.web, cyclone.websocket from rdflib import ConjunctiveGraph, URIRef, Graph import twisted.internet.error from rdfdb.file_vs_uri import correctToTopdirPrefix, fileForUri, uriFromFile, DirUriMap from rdfdb.graphfile import GraphFile, PatchCb, GetSubgraph from rdfdb.patch import Patch, ALLSTMTS from rdfdb.rdflibpatch import patchQuads from standardservice.scalessetup import gatherProcessStats gatherProcessStats() stats = scales.collection( '/webServer', scales.IntStat('clients'), scales.IntStat('liveClients'), scales.PmfStat('setAttr'), ) graphStats = scales.collection( '/graph', scales.IntStat('statements'), scales.RecentFpsStat('patchFps'), ) fileStats = scales.collection( '/file', scales.IntStat('mappedGraphFiles'), ) log = logging.getLogger('rdfdb')