Exemplo n.º 1
0
 def __init__(self, graph, uri, pi, pinNumber):
     self.graph, self.uri, self.pi = graph, uri, pi
     self.pinNumber = pinNumber
     scales.init(self, self.__class__.__name__)
     self.stats = scales.collection(self.__class__.__name__,
                                    scales.PmfStat('poll'),
                                    scales.PmfStat('output'),
     )
     self.hostStateInit()
Exemplo n.º 2
0
    def init_app(self, app):
        self.config = app.config

        enabled = self.config.setdefault("GRAPHITE_ENABLED", False)
        self.app = app

        if not enabled:
            return

        from greplin import scales
        from greplin.scales.graphite import GraphitePeriodicPusher
        from greplin.scales.meter import MeterStat

        host = self.config.setdefault("GRAPHITE_HOST", "localhost")
        port = self.config.setdefault("GRAPHITE_PORT", 2003)
        prefix = self.config.setdefault("GRAPHITE_PREFIX", "doorman")
        period = self.config.setdefault("GRAPHITE_REPORTING_INTERVAL", 60)

        app.metrics = {}
        for rule in app.url_map.iter_rules():
            app.metrics[rule.endpoint] = scales.collection(
                rule.endpoint,
                MeterStat('count'),
                scales.PmfStat('latency'),
            )

        app.graphite = GraphitePeriodicPusher(
            host, port, period=period, prefix=prefix,
        )

        for rule in self.config.setdefault("GRAPHITE_ALLOW", ['*']):
            app.graphite.allow(rule)

        app.graphite.start()
        return
    def __init__(self, cluster_proxy):
        log.debug("Starting metric capture")

        self.stats = scales.collection('/cassandra',
            scales.PmfStat('request_timer'),
            scales.IntStat('connection_errors'),
            scales.IntStat('write_timeouts'),
            scales.IntStat('read_timeouts'),
            scales.IntStat('unavailables'),
            scales.IntStat('other_errors'),
            scales.IntStat('retries'),
            scales.IntStat('ignores'),

            # gauges
            scales.Stat('known_hosts',
                lambda: len(cluster_proxy.metadata.all_hosts())),
            scales.Stat('connected_to',
                lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))),
            scales.Stat('open_connections',
                lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions)))

        self.request_timer = self.stats.request_timer
        self.connection_errors = self.stats.connection_errors
        self.write_timeouts = self.stats.write_timeouts
        self.read_timeouts = self.stats.read_timeouts
        self.unavailables = self.stats.unavailables
        self.other_errors = self.stats.other_errors
        self.retries = self.stats.retries
        self.ignores = self.stats.ignores
        self.known_hosts = self.stats.known_hosts
        self.connected_to = self.stats.connected_to
        self.open_connections = self.stats.open_connections
Exemplo n.º 4
0
class RequestAnalyzer(object):
    """
    Class used to track request and error counts for a Session.

    Also computes statistics on encoded request size.
    """

    requests = scales.PmfStat('request size')
    errors = scales.IntStat('errors')

    def __init__(self, session):
        scales.init(self, '/cassandra')
        # each instance will be registered with a session, and receive a callback for each request generated
        session.add_request_init_listener(self.on_request)

    def on_request(self, rf):
        # This callback is invoked each time a request is created, on the thread creating the request.
        # We can use this to count events, or add callbacks
        rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf,), errback_args=(rf,))

    def on_success(self, _, response_future):
        # future callback on a successful request; just record the size
        self.requests.addValue(response_future.request_encoded_size)

    def on_error(self, _, response_future):
        # future callback for failed; record size and increment errors
        self.requests.addValue(response_future.request_encoded_size)
        self.errors += 1

    def __str__(self):
        # just extracting request count from the size stats (which are recorded on all requests)
        request_sizes = dict(self.requests)
        count = request_sizes.pop('count')
        return "%d requests (%d errors)\nRequest size statistics:\n%s" % (count, self.errors, pp.pformat(request_sizes))
Exemplo n.º 5
0
class Stats(ApplicationStats):
    status_ok = meter.MeterStat('status_ok')
    status_failed = meter.MeterStat('status_failed')
    status_forbidden = meter.MeterStat('status_forbidden')
    status_not_found = meter.MeterStat('status_not_found')
    status_not_modified = meter.MeterStat('status_not_modified')
    status_redirect = meter.MeterStat('status_redirect')
    requests = meter.MeterStat('requests')
    latency = scales.PmfStat('latency')

    def __init__(self, parent, name, regexp=None):
        scales._Stats.initChild(self, name, '', parent=parent)
        if regexp is not None:
            self.pattern = re.compile(regexp)
Exemplo n.º 6
0
class RegisterView(FlaskView):
    """
    Handle signup
    """
    client_errors = scales.IntStat('4xx')
    server_errors = scales.IntStat('5xx')
    latency = scales.PmfStat('latency')

    def __init__(self):
        super(RegisterView, self).__init__()
        scales.init(self, '/api/register')

    def index(self):
        return "Register", 200
Exemplo n.º 7
0
class ProfileView(FlaskView):
    """
    Shows personal profile
    """

    client_errors = scales.IntStat('4xx')
    server_errors = scales.IntStat('5xx')
    latency = scales.PmfStat('latency')

    def __init__(self):
        super(ProfileView, self).__init__()
        scales.init(self, '/api/profile')
        self._users_backend = UsersBackend(config)

    def index(self):
        return "MyProfile", 200
Exemplo n.º 8
0
    def __init__(self, cluster_proxy):
        log.debug("Starting metric capture")

        self.stats_name = 'cassandra-{0}'.format(str(self._stats_counter))
        Metrics._stats_counter += 1
        self.stats = scales.collection(
            self.stats_name,
            scales.PmfStat('request_timer'),
            scales.IntStat('connection_errors'),
            scales.IntStat('write_timeouts'),
            scales.IntStat('read_timeouts'),
            scales.IntStat('unavailables'),
            scales.IntStat('other_errors'),
            scales.IntStat('retries'),
            scales.IntStat('ignores'),

            # gauges
            scales.Stat('known_hosts',
                        lambda: len(cluster_proxy.metadata.all_hosts())),
            scales.Stat(
                'connected_to', lambda: len(
                    set(
                        chain.from_iterable(s._pools.keys() for s in
                                            cluster_proxy.sessions)))),
            scales.Stat(
                'open_connections', lambda: sum(
                    sum(p.open_count for p in s._pools.values())
                    for s in cluster_proxy.sessions)))

        # TODO, to be removed in 4.0
        # /cassandra contains the metrics of the first cluster registered
        if 'cassandra' not in scales._Stats.stats:
            scales._Stats.stats['cassandra'] = scales._Stats.stats[
                self.stats_name]

        self.request_timer = self.stats.request_timer
        self.connection_errors = self.stats.connection_errors
        self.write_timeouts = self.stats.write_timeouts
        self.read_timeouts = self.stats.read_timeouts
        self.unavailables = self.stats.unavailables
        self.other_errors = self.stats.other_errors
        self.retries = self.stats.retries
        self.ignores = self.stats.ignores
        self.known_hosts = self.stats.known_hosts
        self.connected_to = self.stats.connected_to
        self.open_connections = self.stats.open_connections
Exemplo n.º 9
0
class UsersView(FlaskView):
    """
    Users list view
    """

    client_errors = scales.IntStat('4xx')
    server_errors = scales.IntStat('5xx')
    latency = scales.PmfStat('latency')

    def __init__(self):
        super(UsersView, self).__init__()
        scales.init(self, '/api/home')

    def index(self):
        return "UsersList", 200

    def get(self, user_id):
        return "UserID: %s" % user_id, 200
Exemplo n.º 10
0
class MediasBackend(object):
    """
    Controls media model and media data fetching from database
    """

    requests = scales.IntStat('requests')
    latency = scales.PmfStat('latency')

    def __init__(self, config):
        """
        :param config:
        :type config: application.common.tools.Map
        :return:
        """
        scales.init(self, '/backend/users')
        self._connection = None
        self._db_name = config.mongo.db_name

        replica_set = config.mongo.replicaset_name if 'replicaset_name' in config.mongo else None
        self._connection = pymongo.MongoClient(
            config.mongo.uri,
            replicaSet=replica_set,
            maxPoolSize=config.mongo.max_pool_size,
            waitQueueMultiple=config.mongo.wait_queue_multiple,
            waitQueueTimeoutMS=config.mongo.wait_queue_timeout_ms,
            tz_aware=True)
        self._db = self._connection[self._db_name]
        self._medias = self._db[config.mongo.media_collection]

    def fetch_medias(self, user_id, start=0, max_size=20):
        """
        Fetch all medias of user matching user_id
        :param user_id:
        :type user_id: int
        :param start: the document index from which we should fetch
        :type start: int
        :param max_size: the number of documents to fetch at a time
        :return: list of medias, else None
        :rtype: dict[str, T] | None
        """
        raise NotImplementedError()
Exemplo n.º 11
0
class MediasView(FlaskView):
    """
    Medias list view
    """

    client_errors = scales.IntStat('4xx')
    server_errors = scales.IntStat('5xx')
    latency = scales.PmfStat('latency')

    def __init__(self):
        super(MediasView, self).__init__()
        scales.init(self, '/api/medias')
        self._media_backend = MediasBackend(config)
        self._image_processor = ImageProcessorService(config)
        self._video_processor = VideoProcessorService(config)

    def index(self):
        return "MediaList", 200

    def get(self, media_id):
        """
        Return the media passed in parameter
        :param media_id:
        :type media_id: str
        :return:
        """
        return "MediaID: %s" % media_id, 200

    def post(self, data):
        """
        Handle new media
        :param data: the media to save/handle
        :return:
        """
        raise NotImplementedError()

    def delete(self, media_id):
        """
Exemplo n.º 12
0
from cycloneerr import PrettyErrorHandler
from docopt import docopt
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from mqtt_client import MqttClient
from rdflib import Namespace
from standardservice.logsetup import log, verboseLogging
from twisted.internet import reactor

import rdf_over_http

ROOM = Namespace('http://projects.bigasterisk.com/room/')

STATS = scales.collection(
    '/root',
    scales.PmfStat('putRequests'),
    scales.PmfStat('statement'),
    scales.PmfStat('mqttPublish'),
)

devs = {
    ROOM['kitchenLight']: {
        'root': 'h801_skylight',
        'hasWhite': True,
    },
    ROOM['kitchenCounterLight']: {
        'root': 'h801_counter',
        'hasWhite': True,
    },
    ROOM['livingLampShelf']: {
        'root': 'sonoff_0/switch/sonoff_basic_relay/command',
from greplin import scales
from tornado import ioloop, web, httpserver, gen

STATS = scales.collection('/web', scales.PmfStat('latency'))


class TimedHandler(web.RequestHandler):
    @gen.coroutine
    def get(self):
        with STATS.latency.time():
            self.write('finished')


if __name__ == "__main__":
    application = web.Application([
        (r"/", TimedHandler),
    ])

    server = httpserver.HTTPServer(application)
    server.bind(8888)
    server.start(0)
    ioloop.IOLoop.instance().start()
Exemplo n.º 14
0
from dateutil.relativedelta import relativedelta, FR
from rdflib import Namespace, Literal
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler
from twilight import isWithinTwilight
from standardservice.logsetup import log, verboseLogging

from rdfdoc import Doc

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

STATS = scales.collection(
    '/root',
    scales.PmfStat('update'),
)


class CycloneGraphEventsHandlerWithCors(CycloneGraphEventsHandler):
    def flush(self):
        self.set_header("Access-Control-Allow-Origin", "*")
        return CycloneGraphEventsHandler.flush(self)


@STATS.update.time()
def update(masterGraph):
    stmt = lambda s, p, o: masterGraph.patchObject(ROOM.environment, s, p, o)

    now = datetime.datetime.now(tzlocal())
Exemplo n.º 15
0
from rx.subjects import BehaviorSubject
from twisted.python.filepath import FilePath
from twisted.internet import reactor

from patchablegraph.patchsource import ReconnectingPatchSource
from rdfdb.rdflibpatch import patchQuads
from rdfdb.patch import Patch

log = logging.getLogger('fetch')

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

STATS = scales.collection(
    '/web',
    scales.PmfStat('combineGraph'),
)


def parseRdf(text: str, contentType: str):
    g = Graph()
    g.parse(StringInputSource(text), format={
        'text/n3': 'n3',
    }[contentType])
    return g


class RemoteData(object):
    def __init__(self, onChange: Callable[[], None]):
        """we won't fire onChange during init"""
        self.onChange = onChange
Exemplo n.º 16
0
import cyclone.web, cyclone.websocket
from rdflib import ConjunctiveGraph, URIRef, Graph
import twisted.internet.error

from rdfdb.file_vs_uri import correctToTopdirPrefix, fileForUri, uriFromFile, DirUriMap
from rdfdb.graphfile import GraphFile, PatchCb, GetSubgraph
from rdfdb.patch import Patch, ALLSTMTS
from rdfdb.rdflibpatch import patchQuads
from standardservice.scalessetup import gatherProcessStats

gatherProcessStats()
stats = scales.collection(
    '/webServer',
    scales.IntStat('clients'),
    scales.IntStat('liveClients'),
    scales.PmfStat('setAttr'),
)
graphStats = scales.collection(
    '/graph',
    scales.IntStat('statements'),
    scales.RecentFpsStat('patchFps'),
)
fileStats = scales.collection(
    '/file',
    scales.IntStat('mappedGraphFiles'),
)

log = logging.getLogger('rdfdb')


class WebsocketDisconnect(ValueError):
Exemplo n.º 17
0
import contextlib
import datetime
from greplin import scales

import logger

STATS = scales.collection('/goaldecider',
    scales.PmfStat('duration')
    )


def write(filename=None):
    try:
        if filename is None:
            filename=logger.filepath.replace(".py",".json")
        logger.dbg("Writing metrics to {}".format(filename))
        scales.dumpStatsTo(filename)
    except Exception as e:
        logger.dbg("Exception in stats writing")
        logger.log_exception(e)


@contextlib.contextmanager
def simple_timer(name):
    start_time = datetime.datetime.now()
    yield
    delta = datetime.datetime.now() - start_time
    logger.log("{} duration: {} s".format(name, delta.total_seconds()))
    return False

Exemplo n.º 18
0
from cyclone.httpclient import fetch
import cyclone
import logging, time, json, random, string, traceback
from logsetup import log, enableTwistedLog
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from export_to_influxdb import InfluxExporter
from tags import NfcDevice, FakeNfc, NfcError, AuthFailedError

ROOM = Namespace('http://projects.bigasterisk.com/room/')

ctx = ROOM['frontDoorWindowRfidCtx']

STATS = scales.collection(
    '/root',
    scales.PmfStat('cardReadPoll'),
    scales.IntStat('newCardReads'),
)


class OutputPage(cyclone.web.RequestHandler):
    def put(self):
        arg = self.request.arguments
        if arg.get('s') and arg.get('p'):
            self._onQueryStringStatement(arg['s'][-1], arg['p'][-1],
                                         self.request.body)
        else:
            self._onGraphBodyStatements(self.request.body,
                                        self.request.headers)

    post = put
Exemplo n.º 19
0
from inference import infer, readRules
from actions import Actions, PutOutputsTable
from inputgraph import InputGraph
from escapeoutputstatements import unquoteOutputStatements

from standardservice.logsetup import log, verboseLogging
from patchablegraph import PatchableGraph, CycloneGraphHandler, CycloneGraphEventsHandler

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

NS = {'': ROOM, 'dev': DEV}

STATS = scales.collection(
    '/web',
    scales.PmfStat('graphChanged'),
    scales.PmfStat('updateRules'),
)


def ntStatement(stmt: Tuple[Node, Node, Node]):
    def compact(u):
        if isinstance(u, URIRef) and u.startswith(ROOM):
            return 'room:' + u[len(ROOM):]
        return u.n3()

    return '%s %s %s .' % (compact(stmt[0]), compact(stmt[1]), compact(
        stmt[2]))


class Reasoning(object):
Exemplo n.º 20
0
from patchablegraph.patchsource import ReconnectingPatchSource

from collector_config import config


#SourceUri = NewType('SourceUri', URIRef) # doesn't work
class SourceUri(URIRef):
    pass


ROOM = Namespace("http://projects.bigasterisk.com/room/")
COLLECTOR = SourceUri(URIRef('http://bigasterisk.com/sse_collector/'))

STATS = scales.collection(
    '/root',
    scales.PmfStat('getState'),
    scales.PmfStat('localStatementsPatch'),
    scales.PmfStat('makeSyncPatch'),
    scales.PmfStat('onPatch'),
    scales.PmfStat('sendUpdatePatch'),
    scales.PmfStat('replaceSourceStatements'),
)


class LocalStatements(object):
    """
    functions that make statements originating from sse_collector itself
    """
    def __init__(self, applyPatch: Callable[[URIRef, Patch], None]):
        self.applyPatch = applyPatch
        self._sourceState: Dict[SourceUri, URIRef] = {}  # source: state URIRef
Exemplo n.º 21
0
    def poll(self):
        milliC = open('/sys/class/thermal/thermal_zone0/temp').read().strip()
        c = float(milliC) / 1000.
        f = c * 1.8 + 32
        return [
            (self.uri, ROOM['temperatureF'], Literal(round(f, 3), datatype=XSD['decimal'])),
            ]

    def watchPrefixes(self):
        # these uris will become dynamic! see note on watchPrefixes
        # about eliminating it.
        return [(self.uri, ROOM['temperatureF']),
                ]

pixelStats = scales.collection('/rgbPixels',
                               scales.PmfStat('updateOutput'),
                               scales.PmfStat('currentColors'),
                               scales.PmfStat('poll'),
                               )

@register
class RgbPixels(DeviceType):
    """chain of ws2812 rgb pixels on pin GPIO18"""
    deviceType = ROOM['RgbPixels']

    def hostStateInit(self):
        self.anim = RgbPixelsAnimation(self.graph, self.uri, self.updateOutput)
        log.debug('%s maxIndex = %s', self.uri, self.anim.maxIndex())
        self.neo = rpi_ws281x.Adafruit_NeoPixel(self.anim.maxIndex() + 1, pin=18)
        self.neo.begin()
Exemplo n.º 22
0
import os, contextlib
try:
    from rdflib.Graph import Graph
except ImportError:
    from rdflib import Graph

from rdflib import Namespace
from rdflib.parser import StringInputSource

from FuXi.Rete.Util import generateTokenSet
from FuXi.Rete import ReteNetwork
from FuXi.Rete.RuleStore import N3RuleStore

from greplin import scales
STATS = scales.collection('/web', scales.PmfStat('readRules'))

from escapeoutputstatements import escapeOutputStatements
ROOM = Namespace("http://projects.bigasterisk.com/room/")


def _loadAndEscape(ruleStore, n3, outputPatterns):
    ruleGraph = Graph(ruleStore)

    # Can't escapeOutputStatements in the ruleStore since it
    # doesn't support removals. Can't copy plainGraph into
    # ruleGraph since something went wrong with traversing the
    # triples inside quoted graphs, and I lose all the bodies
    # of my rules. This serialize/parse version is very slow (400ms),
    # but it only runs when the file changes.
    plainGraph = Graph()
Exemplo n.º 23
0
class RequestMachine:
    """
    Don't use IDLE as the primary state identifier when
    trying to find out if the first request has been sent.
    Use stats instead. One may put the machine in idle
    after stopping, and it would still use the cached
    limits.
    """

    # state averages from 75, 95, 98, 99, 999,
    # min, max, median, mean, and stddev
    latency = scales.PmfStat('latency')

    # timing for 1/5/15 minute averages
    latency_window = meter.MeterStat('latency_window')

    console = logging.StreamHandler()
    format_str = '%(asctime)s\t%(levelname)s -- %(processName)s %(filename)s:%(lineno)s -- %(message)s'
    console.setFormatter(logging.Formatter(format_str))

    def __init__(self, session, request_spec):
        self.log = logging.getLogger()
        self.log.setLevel(logging.DEBUG)
        self.log.addHandler(self.console)
        self.uuid = uuid4()  # unit identifier
        scales.init(self, '/request-machine/%s' % self.uuid)
        # self.register_child_stat('/request-machine/%s' % self.uuid)
        self.session = session  # request http session
        self.request_spec = request_spec  # request spec with spec of call
        self.timings = RequestTimings(request_spec)
        self.state = RequestMachineStates.Idle  # current state
        self._seed_session()

    def _seed_temporal_data(self):
        # add temporal based headers (time limits, etag, etc...)
        if self.timings.etag is not None:
            self.session.headers.update({
                self.request_spec.headers.ifnonmatch:
                RequestSpec.wrap(self.timings.etag)
            })

    def _seed_session(self):
        # add all headers (token, auth, etc...)
        self.session.headers.update(self.request_spec.send_headers)

        self._seed_temporal_data()

    def limit_has_reset(self):
        """
        Call this if the remaining limit is 0.
        :return:
        """
        time_stamp = self.timings.last_request_timestamp
        ttr = float(self.timings.time_to_reset)
        now = timeutils.milliseconds()

        # last time called was before or at the reset and
        # time now is at or beyond the reset allow
        if time_stamp <= ttr and now >= ttr:
            print "ts less than ttr and now greater than ttr"
            return True
        else:
            return False

    def limit_reached(self):
        return int(self.timings.rate_limit_remaining) <= 0

    def past_interval(self):
        now = timeutils.milliseconds()
        last = self.timings.last_request_timestamp
        estimated_interval_ts = last + float(self.timings.interval)
        result = now > estimated_interval_ts

        if not result:
            self.log.debug("now (%d) not yet past the interval (%d)" %
                           (now, estimated_interval_ts))

        return result

    def past_reset_window(self):
        now = timeutils.milliseconds()
        reset_window = float(self.timings.time_to_reset)
        result = now >= reset_window

        if not result:
            self.log.debug("request not yet past reset window")

        return result

    def request_made_since_reset_window(self):
        timestamp = float(self.timings.last_request_timestamp)
        reset_window = float(self.timings.time_to_reset)
        result = timestamp >= reset_window

        if result:
            self.log.warn("request was made since reset window")

        return result

    def edge_case(self):
        result = self.limit_reached \
               and self.past_reset_window() \
               and self.request_made_since_reset_window()

        if result:
            self.log.error("RequestMachine edge case detected")

        return result

    def can_request(self):
        if self.edge_case():
            return False, RequestMachineStates.EdgeCaseError

        if self.past_interval():
            # if the limit has been reached
            if not self.limit_reached():
                return True
            else:  # limit reached
                if self.past_reset_window():  # limit should be reset
                    return True
                return False, RequestMachineStates.WaitingForReset

        return False, RequestMachineStates.WaitingForIntervalToPass

    def _update(self, resp):
        self.timings.update(resp)
        self._seed_temporal_data()

    def _idle_state(self):
        self.state = RequestMachineStates.Idle

    def _error_state(self):
        self.state = RequestMachineStates.Error

    def _processing_state(self):
        self.state = RequestMachineStates.Processing

    def reset_state(self):
        self._idle_state()

    def has_error_state(self):
        return self.state == RequestMachineStates.Error \
               or self.state == RequestMachineStates.EdgeCaseError

    def get(self):
        if self.has_error_state():
            print "Cannot proceed, machine is in ERROR state. Please see the logs."
            return None

        can_request, proposed_error_state = self.can_request()

        if can_request:
            self._processing_state()

            with self.latency.time():
                self.latency_window.mark()
                resp = self.session.get(self.request_spec.uri)

                if resp.status_code == 304:
                    self.state = RequestMachineStates.WaitingForModifiedContent
                    # even with 304s, the server will still attempt
                    # to send limits back.
                    self._update(resp)
                    return resp
                elif resp.status_code == 200:
                    self._update(resp)
                    self._idle_state()
                    return resp
                else:
                    print "Request failed!"
                    print resp.status_code
                    print resp.content
                    self._error_state()

                    # even with 304s, the server will still attempt
                    # to send limits back.
                    self._update(resp)

                    self._idle_state()
                    return resp
        else:
            self.state = proposed_error_state
            return None
Exemplo n.º 24
0
class BaseService(ErrorHandlerMixin):
    """
    A base service. Note this is a class which composites a greenlet. This is in
    contrast with the Actor class which subclasses greenlet and may be more
    performant and lightweight. This class is still aimed for performance but
    has many management routines and actions.

    This base class has many of the same attributes as the underlying gevent
    greenlet but note they are not the same. Effort was made to make semantic
    usage easier. Do not assume anything similar with gevent.
    """

    # state averages from 75, 95, 98, 99, 999,
    # min, max, median, mean, and stddev
    latency = scales.PmfStat('latency')

    # timing for 1/5/15 minute averages
    latency_window = meter.MeterStat('latency_window')

    # console = logging.StreamHandler()
    # format_str = '%(asctime)s\t%(levelname)s -- %(processName)s %(filename)s:%(lineno)s -- %(message)s'
    # console.setFormatter(logging.Formatter(format_str))

    def __init__(self,
                 name="base-service",
                 directory_proxy=None,
                 parent_logger=None,
                 enable_service_recovery=False):
        """
        uuid - a uuid4 value for the service
        alias - a colloquial alias
        unique_name - a name which includes an easier to remember alias with the uuid

        :param name:
        :param directory_proxy:
        :param parent_logger:
        :return:
        """
        ErrorHandlerMixin.__init__(self)

        # time indexes
        self.time_starting_index = None  # time index when service was 'starting'
        self.time_started_index = None  # time index when service was started

        self.uuid = uuid4()  # unique uuid
        self.alias = name  # name, may collide
        self.unique_name = '%s/%s' % (
            self.alias, self.uuid
        )  # a unique name for this service, will always be unique
        scales.init(self, self.unique_name)

        if parent_logger is None:  # no parent, use fq name
            self.lineage = "%s" % self.unique_name
        else:
            parent_name = parent_logger._context["name"]
            self.lineage = "%s/%s" % (parent_name, self.unique_name)

        self.log = Logger.get_logger(self.lineage)
        self.greenlet = None
        self._service_state = None
        self.set_state(BaseStates.Idle)

        # directory service proxy
        self.directory_proxy = directory_proxy

        # service recovery option
        self.enable_service_recovery = enable_service_recovery

        self.log.debug("Initialized.")

    def event_loop(self):
        """
        Override this
        """
        # while True:
        #     with self.latency.time():
        #         self.latency_window.mark()
        #         # do some work here
        #         # sleep or idle
        while self.should_loop():
            gevent.idle()

    def should_loop(self):
        # return not self.ready() or not self.has_stopped()
        # chose to signal on if the service has started rather than started or idle
        # which would be a confusing state which a loop would be allowed execution.
        # In an effort to narrow down to one state I choose `started`.
        return self.has_started()

    def register_child_stat(self, name):
        scales.initChild(self, name)

    def register(self):
        """
        Once a service has been actively managed, it is populated
        by the service manager with addtional information or services.
        This method registers that data.
        Typically you will find a database and queue proxy to be set
        here.
        :return:
        """
        pass

    def did_service_timeout(self):
        """
        A timeout occurs when a service remains in the starting phase.
        :return:
        """
        timeout = self.get_directory_service_proxy().get_service_meta(
            self.alias).start_timeout

        if timeout > 0 and self.is_starting():
            delay = self.get_directory_service_proxy().get_service_meta(
                self.alias).delay
            timeout = self.get_directory_service_proxy().get_service_meta(
                self.alias).start_timeout

            # calculate by adding the delay that will be introduced
            # with the timeout value, and this time index will be the maximum time
            # which with to wait for the service to start
            expected_timeout = delay + timeout

            # now determine if the time that has passed has met or exceeded the
            # calculated timeout index from above
            return self.start_time_delta() >= expected_timeout

        return False

    def start_time_delta(self):
        """
        Returns the time difference between now and when the service entered into the
        `Starting` state. This is how long since the service first indexed as being
        in been in the `Starting` position. Note that this will always calculate,
        and should be used as a utility method.
        :return:
        """
        now = time.time()
        return now - self.time_starting_index

    def pre_handle_error(self, exception):
        """
        Triggered before error handlers have run.
        This may be overriden for additional functionality.
        :param exception: An optional exception.
        :return:
        """
        pass

    def post_handle_error(self, exception=None):
        """
        Triggered after error handlers have run.
        This may be overriden for additional functionality.
        :param exception: An optional exception.
        :return:
        """
        pass

    def start_event_loop(self):
        self.log.debug("service starting event loop...")
        self.set_state(BaseStates.Started)
        self.time_started_index = time.time()

        # self.event_loop()

        try:
            self.event_loop()
        except Exception as ex:
            self.handle_error(ex)
            self.post_handle_error(
                ex
            )  # a built-in method which signals the post event of handling errors

    def start(self, meta=None):
        if self.get_state(
        ) is not BaseStates.Idle:  # or not self.enable_service_recovery:
            self.log.error(
                "could not start service as it is not in an idle state, current state: [%s]"
                % self.get_state(),
                state=self.get_state())
            raise ServiceNotIdleException()

        if meta is not None:
            # delays are only allowed on first start, after this a function must be supplied
            delay = 0
            msg = "service starting with delay..."
            if meta.starts == 0 and meta.delay > 0:
                self.log.debug("service starting with delay...", delay=delay)
                delay = meta.delay
            else:
                self.log.debug("service starting with delay function...",
                               delay=delay,
                               delay_func=meta.retry_delay_fx.__name__)
                delay = meta.next_delay()

            self.time_starting_index = time.time()
            self.greenlet = gevent.spawn_later(delay, self.start_event_loop)
            self.set_state(
                BaseStates.Starting
            )  # TODO: time how long services take to actually start
        else:  # no meta, assume base service
            self.time_starting_index = time.time()
            self.set_state(
                BaseStates.Starting
            )  # TODO: time how long services take to actually start
            self.log.debug("service starting...")
            self.greenlet = gevent.spawn(self.start_event_loop)

        #
        # --- NO CODE BEYOND THE IF/ELSE BLOCK ABOVE ---
        #
        # this method works asynchronously and thus code here will execute immediately

        return self.greenlet

    def stop(self):
        self.log.info("Stopping...")
        self.set_state(BaseStates.Stopping)

        if self.greenlet is not None:
            gevent.kill(self.greenlet)
        else:
            self.log.warn("service [%s] was found already stopped." %
                          self.lineage)

        self.set_state(BaseStates.Stopped)
        return self.greenlet

    def get_greenlet(self):
        return self.greenlet

    def ready(self):
        """
        Only return true when state is Idle which means
        it is ready to accept specs to start. Start
        should assume the service is already acting on
        a user's request for 'work'. This is an boolean
        alias to check if in Idle state.
        :return:
        """
        if self.get_state() is BaseStates.Idle:
            return True

        return False

    def has_started(self):
        return self.get_state() is BaseStates.Started

    def has_stopped(self):
        return self.get_state() is BaseStates.Stopped

    def is_starting(self):
        return self.get_state() is BaseStates.Starting

    def is_stopping(self):
        return self.get_state() is BaseStates.Stopping

    def has_state(self):
        return self.get_state() is not None

    def is_zombie(self):
        """
        If there is no state such as Idle, Start, or Stop then this service
        is a zombie.
        :return:
        """
        return not self.has_state() or not self.greenlet.started

    def is_truly_dead(self):
        """
        Not a zombie. Stopped is not dead. Zombie is not dead (It's alive stupid!).
        This method checks if the greenlet was not successful and has logged an
        exception.

        Note: below I don't check for service.idle() because that is a sign that
        a service is alive and about to be started. Instead I focus on the greenlet
        which if was created will still have values such as exception and successful
        registered.
        :return:
        """
        return self.greenlet is not None \
            and not self.greenlet.successful() \
            and self.greenlet.exception is not None

    def idle(self):
        """
        Resets a service which you expect to restart.
        Prior to started a service it must be set to idle.
        :return:
        """
        if self.get_state() is BaseStates.Stopped or self.is_zombie():
            self.set_state(BaseStates.Idle)
        else:
            raise IdleActionException()

    def get_state(self):
        return self._service_state

    def set_state(self, state):
        """
        Should be used only by base class and inheritors
        """
        self.log.debug("Service state is being set to: [%s]" % state)
        self._service_state = state

    def set_directory_service_proxy(self, directory_proxy):
        self.directory_proxy = directory_proxy

    def get_directory_service_proxy(self):
        return self.directory_proxy
Exemplo n.º 25
0
from configparser import ConfigParser
from io import StringIO

try:
    import _thread as thread
except ImportError:
    import thread

log = logging.getLogger(__name__)
stats = scales.collection(
    '/celery',
    scales.IntStat('started'),
    scales.IntStat('succeeded'),
    scales.IntStat('failed'),
    scales.IntStat('retried'),
    scales.PmfStat('queuetime'),
    scales.PmfStat('runtime'),
)
stats_queue = type('Stats:queues', (object, ), {})()
scales._Stats.initChild(stats_queue, 'queues', '', stats)


def task_handler(fn):
    @wraps(fn)
    def wrapper(self, event):
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        return fn(self, event, task)

    return wrapper
Exemplo n.º 26
0
import time, logging, math, os, sys, socket
from influxdb import InfluxDBClient
from rdflib import Namespace
from twisted.internet import task
from greplin import scales

log = logging.getLogger()
ROOM = Namespace('http://projects.bigasterisk.com/room/')

stats = scales.collection(
    '/export_to_influxdb',
    scales.PmfStat('exportToInflux'),
)


class RetentionPolicies(object):
    def __init__(self, influx):
        self.influx = influx
        self.createdPolicies = set() # days

    def getCreatedPolicy(self, days):
        name = 'ret_%d_day' % days
        if days not in self.createdPolicies:
            self.influx.create_retention_policy(name,
                                                duration='%dd' % days,
                                                replication='1')
            self.createdPolicies.add(days)
        return name

class InfluxExporter(object):
    def __init__(self, configGraph, influxHost='bang5'):
Exemplo n.º 27
0
class BaseService:
    """
    A base service. Note this is a class which
    composites a greenlet. This is in contrast
    with the Actor class which subclasses greenlet
    for performance and being lightweight. This
    class is still aimed for performance but has
    many management routines and actions which are
    necessary.
    """

    # state averages from 75, 95, 98, 99, 999,
    # min, max, median, mean, and stddev
    latency = scales.PmfStat('latency')

    # timing for 1/5/15 minute averages
    latency_window = meter.MeterStat('latency_window')

    # console = logging.StreamHandler()
    # format_str = '%(asctime)s\t%(levelname)s -- %(processName)s %(filename)s:%(lineno)s -- %(message)s'
    # console.setFormatter(logging.Formatter(format_str))

    def event_loop(self):
        """
        Override
        """
        # while True:
        #     with self.latency.time():
        #         self.latency_window.mark()
        #         # do some work here
        #         # sleep or keep going
        pass

    def __init__(self, name="base-service", directory_proxy=None):
        self.uuid = uuid4()
        self.unique_name = '/%s/%s' % (name, self.uuid)
        scales.init(self, self.unique_name)

        # self.log = logging.getLogger(self.unique_name)
        # self.log.setLevel(logging.DEBUG)
        # self.log.addHandler(self.console)
        self.log = Logger.get_logger(self.unique_name)

        print "%s - Init" % name
        self.name = name
        self.greenlet = None
        self._service_state = BaseStates.Idle

        # directory service proxy
        self._directory_proxy = directory_proxy

    def register_child_stat(self, name):
        scales.initChild(self, name)

    def start(self):
        # print "%s - Starting..." % self.name
        self.greenlet = gevent.spawn(self.event_loop)
        self._service_state = BaseStates.Started
        return self.greenlet

    def stop(self):
        # print "%s - Stopping..." % self.name
        gevent.kill(self.greenlet)
        self._service_state = BaseStates.Stopped
        return self.greenlet

    def get_greenlet(self):
        return self.greenlet

    def ready(self):
        """
        Only return true when state is Idle which means
        it is ready to accept specs to start. Start
        should assume the service is already acting on
        a user's request for 'work'. This is an boolean
        alias to check if in Idle state.
        :return:
        """
        if self.get_state() is BaseStates.Idle:
            return True

        return False

    def idle(self):
        if self.get_state() is BaseStates.Stopped:
            self._service_state = BaseStates.Idle
        else:
            raise IdleActionException()

    def get_state(self):
        return self._service_state

    def set_state(self, state):
        """
        Should be used only by base class and inheritors
        """
        self._service_state = state

    def set_directory_service_proxy(self, directory_proxy):
        self._directory_proxy = directory_proxy

    def get_directory_service_proxy(self):
        return self._directory_proxy
Exemplo n.º 28
0
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from influxdb import InfluxDBClient
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
import cyclone.web
from rdflib import Namespace, Literal

from standardservice.logsetup import log, verboseLogging
from patchablegraph import PatchableGraph, CycloneGraphHandler, CycloneGraphEventsHandler

from private_config import deviceIp, cloudId, installId, macId, periodSec
ROOM = Namespace("http://projects.bigasterisk.com/room/")

STATS = scales.collection('/root',
                          scales.PmfStat('poll'),
                          )

authPlain = cloudId + ':' + installId
auth = binascii.b2a_base64(authPlain.encode('ascii')).strip(b'=\n')

class Poller(object):
    def __init__(self, influx, graph):
        self.influx = influx
        self.graph = graph
        reactor.callLater(0, self.poll)

    @STATS.poll.time()
    @inlineCallbacks
    def poll(self):
        ret = None
Exemplo n.º 29
0
class UsersBackend(object):
    """
    Controls user model and user data fetching from database
    """

    requests = scales.IntStat('requests')
    latency = scales.PmfStat('latency')

    def __init__(self, config):
        """
        :param config:
        :type config: application.common.tools.Map
        :return:
        """
        scales.init(self, '/backend/users')
        self._connection = None
        self._db_name = config.mongo.db_name

        replica_set = config.mongo.replicaset_name if 'replicaset_name' in config.mongo else None
        self._connection = pymongo.MongoClient(
            config.mongo.uri,
            replicaSet=replica_set,
            maxPoolSize=config.mongo.max_pool_size,
            waitQueueMultiple=config.mongo.wait_queue_multiple,
            waitQueueTimeoutMS=config.mongo.wait_queue_timeout_ms,
            tz_aware=True)
        self._db = self._connection[self._db_name]
        self._users = self._db[config.mongo.user_collection]

    def register(self, username, email, password):
        """
        Register the new user
        :param username:
        :type username: str
        :param email:
        :type email: str
        :param password:
        :type password: str
        :return:
        :rtype: None
        :raise DuplicateUser: the username or email already exists
        """
        raise NotImplementedError()

    def authenticate(self, username, password):
        """
        Validate login/password of a user
        :param username:
        :type username: str
        :param password:
        :type password: str
        :return: the user id if authenticated, else None
        :rtype: bool | None
        """
        raise NotImplementedError()

    def fetch_user(self, user_id):
        """
        Fetch user_id informations
        :param user_id:
        :type user_id: int
        :return: informations about the user if exists, else None
        :rtype: dict[str, T] | None
        """
        raise NotImplementedError()
Exemplo n.º 30
0
from rdflib import URIRef, Namespace, Literal, RDF, RDFS, XSD, ConjunctiveGraph
from twisted.internet import reactor, task
import cyclone.web

from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler
from standardservice.logsetup import log, verboseLogging

DEV = Namespace("http://projects.bigasterisk.com/device/")
ROOM = Namespace("http://projects.bigasterisk.com/room/")
ctx = DEV['dhcp']

STATS = scales.collection(
    '/root',
    scales.PmfStat('readLeases'),
    scales.IntStat('filesDidntChange'),
)


def timeLiteral(dt):
    return Literal(dt.replace(tzinfo=tzlocal()).isoformat(),
                   datatype=XSD.dateTime)


def macUri(macAddress: str) -> URIRef:
    return URIRef("http://bigasterisk.com/mac/%s" % macAddress.lower())


class Poller:
    def __init__(self, graph):