Example #1
0
    def init_app(self, app):
        self.config = app.config

        enabled = self.config.setdefault("GRAPHITE_ENABLED", False)
        self.app = app

        if not enabled:
            return

        from greplin import scales
        from greplin.scales.graphite import GraphitePeriodicPusher
        from greplin.scales.meter import MeterStat

        host = self.config.setdefault("GRAPHITE_HOST", "localhost")
        port = self.config.setdefault("GRAPHITE_PORT", 2003)
        prefix = self.config.setdefault("GRAPHITE_PREFIX", "doorman")
        period = self.config.setdefault("GRAPHITE_REPORTING_INTERVAL", 60)

        app.metrics = {}
        for rule in app.url_map.iter_rules():
            app.metrics[rule.endpoint] = scales.collection(
                rule.endpoint,
                MeterStat('count'),
                scales.PmfStat('latency'),
            )

        app.graphite = GraphitePeriodicPusher(
            host, port, period=period, prefix=prefix,
        )

        for rule in self.config.setdefault("GRAPHITE_ALLOW", ['*']):
            app.graphite.allow(rule)

        app.graphite.start()
        return
Example #2
0
    def __init__(self, uri, config, masterGraph, mqtt, influx):
        self.uri = uri
        self.config = config
        self.masterGraph = masterGraph
        self.mqtt = mqtt
        self.influx = influx

        self.mqttTopic = self.topicFromConfig(self.config)

        statPath = '/subscribed_topic/' + self.mqttTopic.decode(
            'ascii').replace('/', '|')
        scales.init(self, statPath)
        self._mqttStats = scales.collection(statPath + '/incoming',
                                            scales.IntStat('count'),
                                            scales.RecentFpsStat('fps'))

        rawBytes = self.subscribeMqtt()
        rawBytes = rx.operators.do_action(self.countIncomingMessage)(rawBytes)
        parsed = self.getParser()(rawBytes)

        g = self.config
        for conv in g.items(g.value(self.uri, ROOM['conversions'])):
            parsed = self.conversionStep(conv)(parsed)

        outputQuadsSets = rx.combine_latest(*[
            self.makeQuads(parsed, plan)
            for plan in g.objects(self.uri, ROOM['graphStatements'])
        ])

        outputQuadsSets.subscribe_(self.updateQuads)
Example #3
0
    def __init__(self, min_level=NOTSET, include_loggers=NOTSET, exclude_loggers=NOTSET,
                host=NOTSET, port=NOTSET, prefix=NOTSET, *args, **kwargs):

        super(GraphiteProcessor, self).__init__(*args, **kwargs)

        if min_level is NOTSET:
            min_level = getattr(settings, 'GRAPHITE_LEVEL', logging.DEBUG)
        if include_loggers is NOTSET:
            include_loggers = getattr(settings, 'GRAPHITE_INCLUDE_LOGGERS', None)
        if exclude_loggers is NOTSET:
            exclude_loggers = getattr(settings, 'GRAPHITE_EXCLUDE_LOGGERS', None)
        if host is NOTSET:
            host = getattr(settings, 'GRAPHITE_HOST', '127.0.0.1')
        if port is NOTSET:
            port = getattr(settings, 'GRAPHITE_PORT', 2444)
        if prefix is NOTSET:
            prefix = getattr(settings, 'GRAPHITE_PREFIX', 'sentry')

        self.min_level = min_level
        self.include_loggers = include_loggers
        self.exclude_loggers = exclude_loggers
        self.host = host
        self.port = port
        self.prefix = prefix

        # self.client = Client(host=self.host, port=self.port)
        self.stats = scales.collection('/sentry', scales.IntStat('totalEvents'),
                                  scales.IntDictStat('eventsByTop'))
        global pusher
        if pusher is None:
            pusher = graphite.GraphitePeriodicPusher(self.host, self.port, self.prefix)
            pusher.start()
    def __init__(self, cluster_proxy):
        log.debug("Starting metric capture")

        self.stats = scales.collection('/cassandra',
            scales.PmfStat('request_timer'),
            scales.IntStat('connection_errors'),
            scales.IntStat('write_timeouts'),
            scales.IntStat('read_timeouts'),
            scales.IntStat('unavailables'),
            scales.IntStat('other_errors'),
            scales.IntStat('retries'),
            scales.IntStat('ignores'),

            # gauges
            scales.Stat('known_hosts',
                lambda: len(cluster_proxy.metadata.all_hosts())),
            scales.Stat('connected_to',
                lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))),
            scales.Stat('open_connections',
                lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions)))

        self.request_timer = self.stats.request_timer
        self.connection_errors = self.stats.connection_errors
        self.write_timeouts = self.stats.write_timeouts
        self.read_timeouts = self.stats.read_timeouts
        self.unavailables = self.stats.unavailables
        self.other_errors = self.stats.other_errors
        self.retries = self.stats.retries
        self.ignores = self.stats.ignores
        self.known_hosts = self.stats.known_hosts
        self.connected_to = self.stats.connected_to
        self.open_connections = self.stats.open_connections
Example #5
0
 def __init__(self, graph, uri, pi, pinNumber):
     self.graph, self.uri, self.pi = graph, uri, pi
     self.pinNumber = pinNumber
     scales.init(self, self.__class__.__name__)
     self.stats = scales.collection(self.__class__.__name__,
                                    scales.PmfStat('poll'),
                                    scales.PmfStat('output'),
     )
     self.hostStateInit()
Example #6
0
 def __init__(self, redis):
     """
     """
     self.log = logging.getLogger("{}.Gymkhana".format(__name__))
     self.redis = redis
     self.stats = scales.collection(
         '/ghymkhana',
         scales.IntDictStat('competition_add'),
         scales.IntDictStat('competition_recovery'),
         scales.IntDictStat('competition_update'),
         scales.IntDictStat('competition_validation_failed'),
         scales.IntDictStat('competition_removed'),
     )
Example #7
0
    def testCollection(self):
        """Tests for a stat collection."""
        collection = scales.collection('/thePath', scales.IntStat('count'),
                                       scales.IntDictStat('histo'))
        collection.count += 100
        collection.histo['cheese'] += 12300
        collection.histo['cheese'] += 45

        self.assertEquals(
            {'thePath': {
                'count': 100,
                'histo': {
                    'cheese': 12345
                }
            }}, scales.getStats())
Example #8
0
  def testCollection(self):
    """Tests for a stat collection."""
    collection = scales.collection('/thePath', scales.IntStat('count'), scales.IntDictStat('histo'))
    collection.count += 100
    collection.histo['cheese'] += 12300
    collection.histo['cheese'] += 45

    self.assertEquals({
      'thePath': {
        'count': 100,
        'histo': {
          'cheese': 12345
        }
      }
    }, scales.getStats())
Example #9
0
    def __init__(self, cluster_proxy):
        log.debug("Starting metric capture")

        self.stats_name = 'cassandra-{0}'.format(str(self._stats_counter))
        Metrics._stats_counter += 1
        self.stats = scales.collection(
            self.stats_name,
            scales.PmfStat('request_timer'),
            scales.IntStat('connection_errors'),
            scales.IntStat('write_timeouts'),
            scales.IntStat('read_timeouts'),
            scales.IntStat('unavailables'),
            scales.IntStat('other_errors'),
            scales.IntStat('retries'),
            scales.IntStat('ignores'),

            # gauges
            scales.Stat('known_hosts',
                        lambda: len(cluster_proxy.metadata.all_hosts())),
            scales.Stat(
                'connected_to', lambda: len(
                    set(
                        chain.from_iterable(s._pools.keys() for s in
                                            cluster_proxy.sessions)))),
            scales.Stat(
                'open_connections', lambda: sum(
                    sum(p.open_count for p in s._pools.values())
                    for s in cluster_proxy.sessions)))

        # TODO, to be removed in 4.0
        # /cassandra contains the metrics of the first cluster registered
        if 'cassandra' not in scales._Stats.stats:
            scales._Stats.stats['cassandra'] = scales._Stats.stats[
                self.stats_name]

        self.request_timer = self.stats.request_timer
        self.connection_errors = self.stats.connection_errors
        self.write_timeouts = self.stats.write_timeouts
        self.read_timeouts = self.stats.read_timeouts
        self.unavailables = self.stats.unavailables
        self.other_errors = self.stats.other_errors
        self.retries = self.stats.retries
        self.ignores = self.stats.ignores
        self.known_hosts = self.stats.known_hosts
        self.connected_to = self.stats.connected_to
        self.open_connections = self.stats.open_connections
Example #10
0
    def __init__(self, cluster_proxy):
        log.debug("Starting metric capture")

        self.stats_name = "cassandra-{0}".format(str(self._stats_counter))
        Metrics._stats_counter += 1
        self.stats = scales.collection(
            self.stats_name,
            scales.PmfStat("request_timer"),
            scales.IntStat("connection_errors"),
            scales.IntStat("write_timeouts"),
            scales.IntStat("read_timeouts"),
            scales.IntStat("unavailables"),
            scales.IntStat("other_errors"),
            scales.IntStat("retries"),
            scales.IntStat("ignores"),
            # gauges
            scales.Stat("known_hosts", lambda: len(cluster_proxy.metadata.all_hosts())),
            scales.Stat(
                "connected_to", lambda: len(set(chain.from_iterable(s._pools.keys() for s in cluster_proxy.sessions)))
            ),
            scales.Stat(
                "open_connections",
                lambda: sum(sum(p.open_count for p in s._pools.values()) for s in cluster_proxy.sessions),
            ),
        )

        # TODO, to be removed in 4.0
        # /cassandra contains the metrics of the first cluster registered
        if "cassandra" not in scales._Stats.stats:
            scales._Stats.stats["cassandra"] = scales._Stats.stats[self.stats_name]

        self.request_timer = self.stats.request_timer
        self.connection_errors = self.stats.connection_errors
        self.write_timeouts = self.stats.write_timeouts
        self.read_timeouts = self.stats.read_timeouts
        self.unavailables = self.stats.unavailables
        self.other_errors = self.stats.other_errors
        self.retries = self.stats.retries
        self.ignores = self.stats.ignores
        self.known_hosts = self.stats.known_hosts
        self.connected_to = self.stats.connected_to
        self.open_connections = self.stats.open_connections
Example #11
0
def gatherProcessStats():
    procStats = scales.collection(
        '/process',
        scales.DoubleStat('time'),
        scales.DoubleStat('cpuPercent'),
        scales.DoubleStat('memMb'),
    )
    proc = psutil.Process()
    lastCpu = [0.]

    def updateTimeStat():
        now = time.time()
        procStats.time = round(now, 3)
        if now - lastCpu[0] > 3:
            procStats.cpuPercent = round(proc.cpu_percent(),
                                         6)  # (since last call)
            lastCpu[0] = now
        procStats.memMb = round(proc.memory_info().rss / 1024 / 1024, 6)

    task.LoopingCall(updateTimeStat).start(.1)
Example #12
0
from pyzabbix import ZabbixSender, ZabbixMetric
from configparser import ConfigParser
from io import StringIO

try:
    import _thread as thread
except ImportError:
    import thread

log = logging.getLogger(__name__)
stats = scales.collection(
    '/celery',
    scales.IntStat('started'),
    scales.IntStat('succeeded'),
    scales.IntStat('failed'),
    scales.IntStat('retried'),
    scales.PmfStat('queuetime'),
    scales.PmfStat('runtime'),
)
stats_queue = type('Stats:queues', (object, ), {})()
scales._Stats.initChild(stats_queue, 'queues', '', stats)


def task_handler(fn):
    @wraps(fn)
    def wrapper(self, event):
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        return fn(self, event, task)
from greplin import scales
from tornado import ioloop, web, httpserver, gen

STATS = scales.collection('/web', scales.PmfStat('latency'))


class TimedHandler(web.RequestHandler):
    @gen.coroutine
    def get(self):
        with STATS.latency.time():
            self.write('finished')


if __name__ == "__main__":
    application = web.Application([
        (r"/", TimedHandler),
    ])

    server = httpserver.HTTPServer(application)
    server.bind(8888)
    server.start(0)
    ioloop.IOLoop.instance().start()
Example #14
0
import os, contextlib
try:
    from rdflib.Graph import Graph
except ImportError:
    from rdflib import Graph
    
from rdflib import Namespace
from rdflib.parser import StringInputSource

from FuXi.Rete.Util import generateTokenSet
from FuXi.Rete import ReteNetwork
from FuXi.Rete.RuleStore import N3RuleStore

from greplin import scales 
STATS = scales.collection('/web',
                          scales.PmfStat('readRules'))

from escapeoutputstatements import escapeOutputStatements
ROOM = Namespace("http://projects.bigasterisk.com/room/")

def _loadAndEscape(ruleStore, n3, outputPatterns):
    ruleGraph = Graph(ruleStore)

    # Can't escapeOutputStatements in the ruleStore since it
    # doesn't support removals. Can't copy plainGraph into
    # ruleGraph since something went wrong with traversing the
    # triples inside quoted graphs, and I lose all the bodies
    # of my rules. This serialize/parse version is very slow (400ms),
    # but it only runs when the file changes.
    plainGraph = Graph()
    plainGraph.parse(StringInputSource(n3), format='n3') # for inference
import time, logging, math, os, sys, socket
from influxdb import InfluxDBClient
from rdflib import Namespace
from twisted.internet import task
from greplin import scales

log = logging.getLogger()
ROOM = Namespace('http://projects.bigasterisk.com/room/')

stats = scales.collection(
    '/export_to_influxdb',
    scales.PmfStat('exportToInflux'),
)


class RetentionPolicies(object):
    def __init__(self, influx):
        self.influx = influx
        self.createdPolicies = set() # days

    def getCreatedPolicy(self, days):
        name = 'ret_%d_day' % days
        if days not in self.createdPolicies:
            self.influx.create_retention_policy(name,
                                                duration='%dd' % days,
                                                replication='1')
            self.createdPolicies.add(days)
        return name

class InfluxExporter(object):
    def __init__(self, configGraph, influxHost='bang5'):
Example #16
0
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from influxdb import InfluxDBClient
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
import cyclone.web
from rdflib import Namespace, Literal

from standardservice.logsetup import log, verboseLogging
from patchablegraph import PatchableGraph, CycloneGraphHandler, CycloneGraphEventsHandler

from private_config import deviceIp, cloudId, installId, macId, periodSec
ROOM = Namespace("http://projects.bigasterisk.com/room/")

STATS = scales.collection('/root',
                          scales.PmfStat('poll'),
                          )

authPlain = cloudId + ':' + installId
auth = binascii.b2a_base64(authPlain.encode('ascii')).strip(b'=\n')

class Poller(object):
    def __init__(self, influx, graph):
        self.influx = influx
        self.graph = graph
        reactor.callLater(0, self.poll)

    @STATS.poll.time()
    @inlineCallbacks
    def poll(self):
        ret = None
Example #17
0
STATS = scales.collection(
    "/api",
    scales.IntStat('notfound'),
    scales.IntStat('validation'),
    scales.IntStat('errors'),
    scales.IntStat('success'),

    # account types
    scales.PmfStat('get_account_type'),
    scales.PmfStat('all_account_types'),
    scales.PmfStat('add_account_type'),
    scales.PmfStat('delete_account_type'),
    scales.PmfStat('update_account_type'),

    # accounts
    scales.PmfStat('get_account'),
    scales.PmfStat('all_accounts'),
    scales.PmfStat('add_account'),
    scales.PmfStat('delete_account'),
    scales.PmfStat('update_account'),
    scales.PmfStat('get_account_transactions'),

    # transactions
    scales.PmfStat('get_transaction'),
    scales.PmfStat('all_transactions'),
    scales.PmfStat('add_transaction'),
    scales.PmfStat('delete_transaction'),
    scales.PmfStat('update_transaction'),
    scales.PmfStat('upload_transactions'),

)
Example #18
0
from cyclone.httpclient import fetch
import cyclone
import logging, time, json, random, string, traceback
from logsetup import log, enableTwistedLog
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from export_to_influxdb import InfluxExporter
from tags import NfcDevice, FakeNfc, NfcError, AuthFailedError

ROOM = Namespace('http://projects.bigasterisk.com/room/')

ctx = ROOM['frontDoorWindowRfidCtx']

STATS = scales.collection(
    '/root',
    scales.PmfStat('cardReadPoll'),
    scales.IntStat('newCardReads'),
)


class OutputPage(cyclone.web.RequestHandler):
    def put(self):
        arg = self.request.arguments
        if arg.get('s') and arg.get('p'):
            self._onQueryStringStatement(arg['s'][-1], arg['p'][-1],
                                         self.request.body)
        else:
            self._onGraphBodyStatements(self.request.body,
                                        self.request.headers)

    post = put
Example #19
0
from rdfdb.patch import Patch
from rdfdb.rdflibpatch import inContext


log = logging.getLogger()
logging.getLogger('serial').setLevel(logging.WARN)

ROOM = Namespace('http://projects.bigasterisk.com/room/')
HOST = Namespace('http://bigasterisk.com/ruler/host/')

ACTION_BASE = 10 # higher than any of the fixed command numbers

hostname = socket.gethostname()
CTX = ROOM['arduinosOn%s' % hostname]

STATS = scales.collection('/root',
)


etcd = etcd3.client(host='bang6', port=9022)

class Config(object):
    def __init__(self, masterGraph, slowMode=False):
        self.masterGraph = masterGraph
        self.slowMode = slowMode
        self.configGraph = ConjunctiveGraph()

        self.etcPrefix = 'arduino/'
        
        self.boards = []
        self.reread()
Example #20
0
from greplin import scales
from greplin.scales.meter import MeterStat

import config
import pusher

STATS = scales.collection(
    "/web",
    scales.IntStat('errors'),
    scales.IntStat('success'),
    scales.PmfStat('latency'),
    scales.IntStat('counter'),
    MeterStat('hits'),
)

def main():
    """Periodically send metrics out"""
    stat_server = pusher.PeriodicPusher("localhost", 5001, "/stats", 5)
    for allowed in config.STAT_RULES_ALLOWED:
      stat_server.allow(allowed)
    stat_server.run()

if __name__ == "__main__":
    main()
Example #21
0
pp-latchpony-service

This provides the views which are used in the dispatch routing set up.

PythonPro Limited

"""
import pkg_resources

from greplin import scales

from pyramid.view import view_config


STATS = scales.collection(
    '/status',
    scales.IntStat('ping_called'),
)


@view_config(route_name='home', request_method='GET', renderer='json')
def status(request):
    """This is used to 'ping' the web service to check if it's running.

    :returns: a status dict which the configured view will return as JSON.

    The dict has the form::

        dict(
            status="ok",
            name="<project name>",
            version="<egg version of pp.latchpony.service>"
Example #22
0
import devices
from export_to_influxdb import InfluxExporter

log = logging.getLogger()
logging.getLogger('serial').setLevel(logging.WARN)

ROOM = Namespace('http://projects.bigasterisk.com/room/')
HOST = Namespace('http://bigasterisk.com/ruler/host/')

hostname = socket.gethostname()
CTX = ROOM['pi/%s' % hostname]

STATS = scales.collection('/root',
                          scales.PmfStat('configReread'),
                          scales.IntStat('pollException'),
                          scales.PmfStat('boardPoll'),
                          scales.PmfStat('sendOneshot'),
                          scales.PmfStat('outputStatements'),

)
def patchRandid():
    """
    I'm concerned urandom is slow on raspberry pi, and I'm adding to
    graphs a lot. Unclear what the ordered return values might do to
    the balancing of the graph.
    """
    _id_serial = [1000]
    def randid():
        _id_serial[0] += 1
        return _id_serial[0]
    import rdflib.plugins.memory
    rdflib.plugins.memory.randid = randid
Example #23
0
from rdfdb.patch import Patch
from rdfdb.rdflibpatch import inContext


log = logging.getLogger()
logging.getLogger('serial').setLevel(logging.WARN)

ROOM = Namespace('http://projects.bigasterisk.com/room/')
HOST = Namespace('http://bigasterisk.com/ruler/host/')

ACTION_BASE = 10 # higher than any of the fixed command numbers

hostname = socket.gethostname()
CTX = ROOM['arduinosOn%s' % hostname]

STATS = scales.collection('/root',
)


etcd = etcd3.client(host='bang6', port=9022)

class Config(object):
    def __init__(self, masterGraph, slowMode=False):
        self.masterGraph = masterGraph
        self.slowMode = slowMode
        self.configGraph = ConjunctiveGraph()

        self.etcPrefix = 'arduino/'

        self.boards = []
        self.reread()
Example #24
0
from __future__ import absolute_import, division, print_function

from greplin import scales
import json
import pyramid.config
import pytest
import webtest

STATS = scales.collection('/', scales.IntStat('errors'))
STATS.errors += 1


@pytest.fixture
def app():
    config = pyramid.config.Configurator()
    config.include('pyramid_scales')
    return config.make_wsgi_app()


@pytest.fixture
def browser(app):
    return webtest.TestApp(app)


def test_displays_metrics_as_html(browser):
    r = browser.get('/scales/', status=200)
    assert (b'<span class="key">errors</span> <span class="int">1</span>'
            in r.body)


def test_displays_metrics_as_json(browser):
Example #25
0
from dateutil.tz import tzlocal
from dateutil.relativedelta import relativedelta, FR
from rdflib import Namespace, Literal
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler
from twilight import isWithinTwilight
from logsetup import log, enableTwistedLog

from rdfdoc import Doc

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

STATS = scales.collection('/root',
                          scales.PmfStat('update'),
)

@STATS.update.time()
def update(masterGraph):
    stmt = lambda s, p, o: masterGraph.patchObject(ROOM.environment, s, p, o)
    
    now = datetime.datetime.now(tzlocal())

    stmt(DEV.environment, ROOM.localHour, Literal(now.hour))
    stmt(DEV.environment, ROOM.localTimeToMinute,
         Literal(now.strftime("%H:%M")))

    stmt(DEV.environment, ROOM.localTimeToSecond,
         Literal(now.strftime("%H:%M:%S")))
Example #26
0
import contextlib
import datetime
from greplin import scales

import logger

STATS = scales.collection('/goaldecider',
    scales.PmfStat('duration')
    )


def write(filename=None):
    try:
        if filename is None:
            filename=logger.filepath.replace(".py",".json")
        logger.dbg("Writing metrics to {}".format(filename))
        scales.dumpStatsTo(filename)
    except Exception as e:
        logger.dbg("Exception in stats writing")
        logger.log_exception(e)


@contextlib.contextmanager
def simple_timer(name):
    start_time = datetime.datetime.now()
    yield
    delta = datetime.datetime.now() - start_time
    logger.log("{} duration: {} s".format(name, delta.total_seconds()))
    return False

import logging

from greplin import scales
from pyramid.view import view_config


def get_log(extra=None):
    m = "{}.{}".format(__name__, extra) if extra else __name__
    return logging.getLogger(m)


STATS = scales.collection(
    "/gymkhana",
    scales.PmfStat("listing_recovery"),
    scales.PmfStat("recover_competition"),
    scales.PmfStat("load_competition"),
    scales.PmfStat("dump_competition"),
    scales.PmfStat("can_perform"),
    scales.PmfStat("slot_types_for"),
)


@view_config(route_name="listing", request_method="GET", renderer="json")
def competition_listing(request):
    """Return a JSON list of competitions housed in the Gymkhana.

    """
    results = []

    # show how long its take to recover the list of org_ids we are housing:
    with STATS.listing_recovery.time():
Example #28
0
import treq
import math
import time
from twisted.internet.inotify import INotify
from twisted.python.filepath import FilePath

from light9 import networking
from light9.namespaces import L9, RDF
from light9.vidref.musictime import MusicTime
from light9.effect import effecteval
from greplin import scales
from txzmq import ZmqEndpoint, ZmqFactory, ZmqPushConnection

log = logging.getLogger('sequencer')
stats = scales.collection('/sequencer/',
                          scales.PmfStat('update'),
                          scales.DoubleStat('recentFps'),
)

_zmqClient=None
class TwistedZmqClient(object):
    def __init__(self, service):
        zf = ZmqFactory()
        e = ZmqEndpoint('connect', 'tcp://%s:%s' % (service.host, service.port))
        self.conn = ZmqPushConnection(zf, e)
        
    def send(self, msg):
        self.conn.push(msg)


def toCollectorJson(client, session, settings):
    return json.dumps({'settings': settings,
Example #29
0
#!/usr/bin/python

import os.path
import tornado.ioloop
from tornadoes import ESConnection

from greplin import scales
from greplin.scales.meter import MeterStat

import json
from hashlib import md5

import re
import unicodedata

STATS = scales.collection('/index', MeterStat('docs'))

BASE_PATH = '/Users/jisaacso/Documents/projects/bayes-impact/team-thorn/data/escort_all'
FBDUMP = os.path.join(BASE_PATH, 'escort_all.tsv')

es = ESConnection('localhost', 9200)
es.httprequest_kwargs = {
    'request_timeout': 1500.00,
    'connect_timeout': 1500.00
}
wspaceNuker = re.compile(' +')
def fold_accents(raw):
    if type(raw) == str:
        raw = unicode(raw, 'utf-8')
    return ''.join([c for c in unicodedata.normalize('NFKD', raw).encode('ascii', 'ignore')])
Example #30
0
from __future__ import absolute_import, division, print_function

from greplin import scales
import json
import pyramid.config
import pytest
import webtest


STATS = scales.collection(
    '/',
    scales.IntStat('errors')
)
STATS.errors += 1


@pytest.fixture
def app():
    config = pyramid.config.Configurator()
    config.include('pyramid_scales')
    return config.make_wsgi_app()


@pytest.fixture
def browser(app):
    return webtest.TestApp(app)


def test_displays_metrics_as_html(browser):
    r = browser.get('/scales/', status=200)
    assert (
Example #31
0
from cycloneerr import PrettyErrorHandler
from docopt import docopt
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from mqtt_client import MqttClient
from rdflib import Namespace
from standardservice.logsetup import log, verboseLogging
from twisted.internet import reactor

import rdf_over_http

ROOM = Namespace('http://projects.bigasterisk.com/room/')

STATS = scales.collection(
    '/root',
    scales.PmfStat('putRequests'),
    scales.PmfStat('statement'),
    scales.PmfStat('mqttPublish'),
)

devs = {
    ROOM['kitchenLight']: {
        'root': 'h801_skylight',
        'hasWhite': True,
    },
    ROOM['kitchenCounterLight']: {
        'root': 'h801_counter',
        'hasWhite': True,
    },
    ROOM['livingLampShelf']: {
        'root': 'sonoff_0/switch/sonoff_basic_relay/command',
        'values': 'binary',
Example #32
0
#!/usr/bin/python

import os.path
import tornado.ioloop
from tornadoes import ESConnection

from greplin import scales
from greplin.scales.meter import MeterStat

import json
from hashlib import md5

import re
import unicodedata

STATS = scales.collection('/index', MeterStat('docs'))

BASE_PATH = '/Users/jisaacso/Documents/projects/bayes-impact/team-thorn/data/escort_all'
FBDUMP = os.path.join(BASE_PATH, 'escort_all.tsv')

es = ESConnection('localhost', 9200)
es.httprequest_kwargs = {
    'request_timeout': 1500.00,
    'connect_timeout': 1500.00
}
wspaceNuker = re.compile(' +')


def fold_accents(raw):
    if type(raw) == str:
        raw = unicode(raw, 'utf-8')
Example #33
0
from rdflibtrig import addTrig
from graphop import graphEqual
from greplin import scales

from patchablegraph.patchsource import ReconnectingPatchSource

from rdfdb.rdflibpatch import patchQuads

log = logging.getLogger('fetch')

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")


STATS = scales.collection('/web',
                          scales.PmfStat('combineGraph'),
)
def parseRdf(text, contentType):
    g = Graph()
    g.parse(StringInputSource(text), format={
        'text/n3': 'n3',
        }[contentType])
    return g


class RemoteData(object):
    def __init__(self, onChange):
        self.onChange = onChange
        self.graph = ConjunctiveGraph()
        self.patchSource = ReconnectingPatchSource(
            URIRef('http://bang:9072/graph/home'),
Example #34
0
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
import cyclone.web, cyclone.websocket
from rdflib import ConjunctiveGraph, URIRef, Graph
import twisted.internet.error

from rdfdb.file_vs_uri import correctToTopdirPrefix, fileForUri, uriFromFile, DirUriMap
from rdfdb.graphfile import GraphFile, PatchCb, GetSubgraph
from rdfdb.patch import Patch, ALLSTMTS
from rdfdb.rdflibpatch import patchQuads
from standardservice.scalessetup import gatherProcessStats

gatherProcessStats()
stats = scales.collection(
    '/webServer',
    scales.IntStat('clients'),
    scales.IntStat('liveClients'),
    scales.PmfStat('setAttr'),
)
graphStats = scales.collection(
    '/graph',
    scales.IntStat('statements'),
    scales.RecentFpsStat('patchFps'),
)
fileStats = scales.collection(
    '/file',
    scales.IntStat('mappedGraphFiles'),
)

log = logging.getLogger('rdfdb')

Example #35
0
import logging
import time

from greplin import scales

logger = logging.getLogger('sociallists.events')

STATS = scales.collection(
    '/sociallists',
    scales.PmfStat('feed_update'),

    scales.IntStat('thumbnail_fetched_from_summary'),
    scales.IntStat('thumbnail_fetched_from_content'),
    scales.IntStat('thumbnail_fetched_from_link'),
    scales.IntStat('thumbnail_fetched_not_found'),

    scales.IntStat('thumbnail_is_direct'),
    scales.IntStat('thumbnail_is_open_graph'),
    scales.IntStat('thumbnail_is_twitter'),
    scales.IntStat('thumbnail_is_link_rel'),
    scales.IntStat('thumbnail_is_known_goodness'),
    scales.IntStat('thumbnail_is_not_supported'),
    scales.IntStat('thumbnail_is_img_tag'),
)

def log_stats():
    thumbnail_total = (
        STATS.thumbnail_fetched_from_link +
        STATS.thumbnail_fetched_not_found +
        STATS.thumbnail_fetched_from_content +
        STATS.thumbnail_fetched_from_summary
Example #36
0
from inference import infer, readRules
from actions import Actions
from inputgraph import InputGraph
from escapeoutputstatements import unquoteOutputStatements

sys.path.append("../../lib")
from logsetup import log


ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

NS = {'': ROOM, 'dev': DEV}

STATS = scales.collection('/web',
                          scales.PmfStat('graphChanged'))

class Reasoning(object):
    def __init__(self):
        self.prevGraph = None

        self.actions = Actions(sendToLiveClients)

        self.rulesN3 = "(not read yet)"
        self.inferred = Graph() # gets replaced in each graphChanged call

        self.inputGraph = InputGraph([], self.graphChanged)      
        self.inputGraph.updateFileData()

    def updateRules(self):
        rulesPath = 'rules.n3'
Example #37
0
from rdflib import URIRef, Namespace, Literal, RDF, RDFS, XSD, ConjunctiveGraph
from twisted.internet import reactor, task
import cyclone.web

from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler
from standardservice.logsetup import log, verboseLogging

DEV = Namespace("http://projects.bigasterisk.com/device/")
ROOM = Namespace("http://projects.bigasterisk.com/room/")
ctx = DEV['dhcp']

STATS = scales.collection(
    '/root',
    scales.PmfStat('readLeases'),
    scales.IntStat('filesDidntChange'),
)


def timeLiteral(dt):
    return Literal(dt.replace(tzinfo=tzlocal()).isoformat(),
                   datatype=XSD.dateTime)


def macUri(macAddress: str) -> URIRef:
    return URIRef("http://bigasterisk.com/mac/%s" % macAddress.lower())


class Poller:
    def __init__(self, graph):
Example #38
0
import os, contextlib
try:
    from rdflib.Graph import Graph
except ImportError:
    from rdflib import Graph

from rdflib import Namespace
from rdflib.parser import StringInputSource

from FuXi.Rete.Util import generateTokenSet
from FuXi.Rete import ReteNetwork
from FuXi.Rete.RuleStore import N3RuleStore

from greplin import scales
STATS = scales.collection('/web', scales.PmfStat('readRules'))

from escapeoutputstatements import escapeOutputStatements
ROOM = Namespace("http://projects.bigasterisk.com/room/")


def _loadAndEscape(ruleStore, n3, outputPatterns):
    ruleGraph = Graph(ruleStore)

    # Can't escapeOutputStatements in the ruleStore since it
    # doesn't support removals. Can't copy plainGraph into
    # ruleGraph since something went wrong with traversing the
    # triples inside quoted graphs, and I lose all the bodies
    # of my rules. This serialize/parse version is very slow (400ms),
    # but it only runs when the file changes.
    plainGraph = Graph()
Example #39
0
from dateutil.tz import tzlocal
from rdflib import URIRef, Namespace, Literal, RDF, RDFS, XSD, ConjunctiveGraph
from twisted.internet import reactor, task
import cyclone.web

from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler
from standardservice.logsetup import log, verboseLogging

DEV = Namespace("http://projects.bigasterisk.com/device/")
ROOM = Namespace("http://projects.bigasterisk.com/room/")
ctx = DEV['dhcp']

STATS = scales.collection('/root',
                          scales.PmfStat('readLeases'),
                          scales.IntStat('filesDidntChange'),
                          )

def timeLiteral(dt):
    return Literal(dt.replace(tzinfo=tzlocal()).isoformat(),
                   datatype=XSD.dateTime)
    
def macUri(macAddress: str) -> URIRef:
    return URIRef("http://bigasterisk.com/mac/%s" % macAddress.lower())

class Poller:
    def __init__(self, graph):
        self.graph = graph
        self.fileTimes = {'/opt/dnsmasq/10.1/leases': 0, '/opt/dnsmasq/10.2/leases': 0}
        task.LoopingCall(self.poll).start(2)
Example #40
0
        milliC = open('/sys/class/thermal/thermal_zone0/temp').read().strip()
        c = float(milliC) / 1000.
        f = c * 1.8 + 32
        return [
            (self.uri, ROOM['temperatureF'], Literal(round(f, 3), datatype=XSD['decimal'])),
            ]

    def watchPrefixes(self):
        # these uris will become dynamic! see note on watchPrefixes
        # about eliminating it.
        return [(self.uri, ROOM['temperatureF']),
                ]

pixelStats = scales.collection('/rgbPixels',
                               scales.PmfStat('updateOutput'),
                               scales.PmfStat('currentColors'),
                               scales.PmfStat('poll'),
                               )

@register
class RgbPixels(DeviceType):
    """chain of ws2812 rgb pixels on pin GPIO18"""
    deviceType = ROOM['RgbPixels']

    def hostStateInit(self):
        self.anim = RgbPixelsAnimation(self.graph, self.uri, self.updateOutput)
        log.debug('%s maxIndex = %s', self.uri, self.anim.maxIndex())
        self.neo = rpi_ws281x.Adafruit_NeoPixel(self.anim.maxIndex() + 1, pin=18)
        self.neo.begin()

        colorOrder, stripType = self.anim.getColorOrder(self.graph, self.uri)
Example #41
0
from dateutil.tz import tzlocal
from dateutil.relativedelta import relativedelta, FR
from rdflib import Namespace, Literal
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from patchablegraph import PatchableGraph, CycloneGraphEventsHandler, CycloneGraphHandler
from twilight import isWithinTwilight
from standardservice.logsetup import log, verboseLogging

from rdfdoc import Doc

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

STATS = scales.collection(
    '/root',
    scales.PmfStat('update'),
)


class CycloneGraphEventsHandlerWithCors(CycloneGraphEventsHandler):
    def flush(self):
        self.set_header("Access-Control-Allow-Origin", "*")
        return CycloneGraphEventsHandler.flush(self)


@STATS.update.time()
def update(masterGraph):
    stmt = lambda s, p, o: masterGraph.patchObject(ROOM.environment, s, p, o)

    now = datetime.datetime.now(tzlocal())
Example #42
0
import cyclone.web
from cyclone.httpclient import fetch
import cyclone
import logging, time, json, random, string, traceback
from logsetup import log, enableTwistedLog
from greplin import scales
from greplin.scales.cyclonehandler import StatsHandler
from export_to_influxdb import InfluxExporter
from tags import NfcDevice, FakeNfc, NfcError, AuthFailedError

ROOM = Namespace('http://projects.bigasterisk.com/room/')

ctx = ROOM['frontDoorWindowRfidCtx']

STATS = scales.collection('/root',
                          scales.PmfStat('cardReadPoll'),
                          scales.IntStat('newCardReads'),
)

class OutputPage(cyclone.web.RequestHandler):
    def put(self):
        arg = self.request.arguments
        if arg.get('s') and arg.get('p'):
            self._onQueryStringStatement(arg['s'][-1], arg['p'][-1], self.request.body)
        else:
            self._onGraphBodyStatements(self.request.body, self.request.headers)
    post = put
    def _onQueryStringStatement(self, s, p, body):
        subj = URIRef(s)
        pred = URIRef(p)
        turtleLiteral = self.request.body
        try:
Example #43
0
from rdflib.parser import StringInputSource
from rx.subjects import BehaviorSubject
from twisted.python.filepath import FilePath
from twisted.internet import reactor

from patchablegraph.patchsource import ReconnectingPatchSource
from rdfdb.rdflibpatch import patchQuads
from rdfdb.patch import Patch

log = logging.getLogger('fetch')

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

STATS = scales.collection(
    '/web',
    scales.PmfStat('combineGraph'),
)


def parseRdf(text: str, contentType: str):
    g = Graph()
    g.parse(StringInputSource(text), format={
        'text/n3': 'n3',
    }[contentType])
    return g


class RemoteData(object):
    def __init__(self, onChange: Callable[[], None]):
        """we won't fire onChange during init"""
        self.onChange = onChange
Example #44
0
from inference import infer, readRules
from actions import Actions, PutOutputsTable
from inputgraph import InputGraph
from escapeoutputstatements import unquoteOutputStatements

from standardservice.logsetup import log, verboseLogging
from patchablegraph import PatchableGraph, CycloneGraphHandler, CycloneGraphEventsHandler

ROOM = Namespace("http://projects.bigasterisk.com/room/")
DEV = Namespace("http://projects.bigasterisk.com/device/")

NS = {'': ROOM, 'dev': DEV}

STATS = scales.collection(
    '/web',
    scales.PmfStat('graphChanged'),
    scales.PmfStat('updateRules'),
)


def ntStatement(stmt: Tuple[Node, Node, Node]):
    def compact(u):
        if isinstance(u, URIRef) and u.startswith(ROOM):
            return 'room:' + u[len(ROOM):]
        return u.n3()

    return '%s %s %s .' % (compact(stmt[0]), compact(stmt[1]), compact(
        stmt[2]))


class Reasoning(object):
Example #45
0
from collector_config import config


#SourceUri = NewType('SourceUri', URIRef) # doesn't work
class SourceUri(URIRef):
    pass


ROOM = Namespace("http://projects.bigasterisk.com/room/")
COLLECTOR = SourceUri(URIRef('http://bigasterisk.com/sse_collector/'))

STATS = scales.collection(
    '/root',
    scales.PmfStat('getState'),
    scales.PmfStat('localStatementsPatch'),
    scales.PmfStat('makeSyncPatch'),
    scales.PmfStat('onPatch'),
    scales.PmfStat('sendUpdatePatch'),
    scales.PmfStat('replaceSourceStatements'),
)


class LocalStatements(object):
    """
    functions that make statements originating from sse_collector itself
    """
    def __init__(self, applyPatch: Callable[[URIRef, Patch], None]):
        self.applyPatch = applyPatch
        self._sourceState: Dict[SourceUri, URIRef] = {}  # source: state URIRef

    @STATS.localStatementsPatch.time()