Beispiel #1
0
class EPUControllerService(ServiceProcess):
    """EPU Controller service interface
    """

    declare = ServiceProcess.service_declare(name=DEFAULT_NAME, version='0.1.0', dependencies=[])

    @defer.inlineCallbacks
    def slc_init(self):

        scoped_name = self.get_scoped_name("system", self.svc_name)
        self.scoped_name = scoped_name

        queue_name_work = self.spawn_args.get("queue_name_work")
        if queue_name_work:
            self.queue_name_work = self.get_scoped_name("system", queue_name_work)

            extradict = {"queue_name_work":self.queue_name_work}
            cei_events.event(self.svc_name, "init_begin", extra=extradict)
            yield self._make_queue(queue_name_work)

            queuestat_client = QueueStatClient(self)
            yield queuestat_client.watch_queue(self.queue_name_work, self.scoped_name, 'sensor_info')
            cei_events.event(self.svc_name, "queue_watched")

        else:
            self.worker_queue_receiver = None
            self.queue_name_work = None
            extradict = None
            cei_events.event(self.svc_name, "init_begin", extra=extradict)

        engineclass = "epu.decisionengine.impls.NpreservingEngine"
        if self.spawn_args.has_key("engine_class"):
            engineclass = self.spawn_args["engine_class"]
            log.info("Using configured decision engine: %s" % engineclass)
        else:
            log.info("Using default decision engine: %s" % engineclass)

        if self.spawn_args.has_key("engine_conf"):
            engine_conf = self.spawn_args["engine_conf"]
            if isinstance(engine_conf, str):
                engine_conf = json.loads(engine_conf)
        else:
            engine_conf = None

        if self.spawn_args.has_key("cassandra"):
            cass = self.spawn_args["cassandra"]
            host = cass['hostname']
            username = cass['username']
            password = cass['password']
            port = cass['port']
            keyspace = cass['keyspace']

            store = CassandraControllerStore(self.svc_name, host, port,
                                             username, password, keyspace,
                                             CoreInstance, SensorItem)
            store.initialize()
            store.activate()
        elif self.spawn_args.has_key('store'):
            store = self.spawn_args['store']
        else:
            store = ControllerStore()

        self.core = ControllerCore(ProvisionerClient(self), engineclass,
                                   scoped_name, conf=engine_conf, store=store)

        # run state recovery and engine initialization

        # this one needs to run before any messages start arriving. It pulls
        # information from persistence and refreshes local caches.
        yield self.core.run_recovery()

        # temporarily doing this later due to a potential bug in ioncore where
        # queues may not be bound before slc_init runs. This means  if the
        # provisioner is quck to reply to dump_state some messages may be
        # missed.
        reactor.callLater(1, self._delayed_init)

    @defer.inlineCallbacks
    def _delayed_init(self):
        yield self.core.run_initialize()

        self.core.begin_controlling()
        cei_events.event(self.svc_name, "init_end")


    @defer.inlineCallbacks
    def _make_queue(self, name):
        self.worker_queue_receiver = ServiceWorkerReceiver(
            label=name,
            name=name,
            scope='system')
        yield self.worker_queue_receiver.initialize()

    def op_heartbeat(self, content, headers, msg):
        log.debug("Got node heartbeat: %s", content)
        return self.core.new_heartbeat(content)

    def op_instance_state(self, content, headers, msg):
        return self.core.new_instance_state(content)

    def op_sensor_info(self, content, headers, msg):
        return self.core.new_sensor_info(content)
        
    def op_reconfigure(self, content, headers, msg):
        log.info("EPU Controller: reconfigure: '%s'" % content)
        return self.core.run_reconfigure(content)

    @defer.inlineCallbacks
    def op_reconfigure_rpc(self, content, headers, msg):
        log.info("EPU Controller: reconfigure_rpc: '%s'" % content)
        yield self.core.run_reconfigure(content)
        yield self.reply_ok(msg, "")

    @defer.inlineCallbacks
    def op_de_state(self, content, headers, msg):
        state = self.core.de_state()
        extradict = {"state":state}
        cei_events.event(self.svc_name, "de_state", extra=extradict)
        yield self.reply_ok(msg, state)

    @defer.inlineCallbacks
    def op_whole_state(self, content, headers, msg):
        state = yield self.core.whole_state()
        yield self.reply_ok(msg, state)

    @defer.inlineCallbacks
    def op_node_error(self, content, headers, msg):
        node_id = content
        state = yield self.core.node_error(node_id)
        yield self.reply_ok(msg, state)
Beispiel #2
0
 def _make_queue(self, name):
     self.worker_queue_receiver = ServiceWorkerReceiver(
         label=name,
         name=name,
         scope='system')
     yield self.worker_queue_receiver.initialize()
Beispiel #3
0
 def _make_queue(self, name):
     self.receiver = ServiceWorkerReceiver(label=name,
                                           name=name,
                                           scope='system')
     yield self.receiver.initialize()
Beispiel #4
0
class TestQueueStatServiceLive(IonTestCase):
    """Queuestat tests that use a live broker on localhost
    """
    @defer.inlineCallbacks
    def setUp(self):

        #unconditional skip now that @itv is gone. we'll make our own decorator??
        raise unittest.SkipTest(
            "Skipping test that requires localhost rabbit broker")

        if not os.path.exists(os.path.expanduser('~/.erlang.cookie')):
            raise unittest.SkipTest('Needs a RabbitMQ server on localhost')

        log.debug('Temporarily changing broker_host to 127.0.0.1')
        self.other_broker_host = ion.test.iontest.CONF.obj['broker_host']
        ion.test.iontest.CONF.obj['broker_host'] = '127.0.0.1'

        yield self._start_container()
        procs = [
            {
                'name': 'queuestat',
                'module': 'epu.ionproc.queuestat',
                'class': 'QueueStatService',
                'spawnargs': {
                    'interval_seconds': 0.1
                }
            },
        ]
        self.sup = yield self._spawn_processes(procs)

        id = str(uuid.uuid4())
        id = id[id.rfind('-') + 1:]  # shorter id
        queuename = '_'.join((__name__, id))
        yield self._make_queue(queuename)

        self.queuename = pu.get_scoped_name(queuename, "system")

    @defer.inlineCallbacks
    def _make_queue(self, name):
        self.receiver = ServiceWorkerReceiver(label=name,
                                              name=name,
                                              scope='system')
        yield self.receiver.initialize()

    @defer.inlineCallbacks
    def tearDown(self):

        # activating the receiver causes the queue to drain, apparently
        yield self.receiver.activate()
        yield self._shutdown_processes()
        yield self._stop_container()

        log.debug('Resetting broker_host')
        ion.test.iontest.CONF.obj['broker_host'] = self.other_broker_host

    @defer.inlineCallbacks
    def test_queuestat(self):
        subscriber = TestSubscriber()
        subId = yield self._spawn_process(subscriber)
        queuestat_client = QueueStatClient(subscriber)

        yield queuestat_client.watch_queue(self.queuename, str(subId), 'stat')
        yield pu.asleep(0.3)

        assert subscriber.queue_length[self.queuename] == 0

        yield self._add_messages(5)
        yield pu.asleep(0.3)
        assert subscriber.queue_length[self.queuename] == 5

        yield self._add_messages(3)
        yield pu.asleep(0.3)
        assert subscriber.queue_length[self.queuename] == 8

        yield pu.asleep(0.3)
        yield queuestat_client.unwatch_queue(self.queuename, str(subId),
                                             'stat')

        yield self._add_messages(3)
        yield pu.asleep(0.3)
        assert subscriber.queue_length[self.queuename] == 8

    @defer.inlineCallbacks
    def _add_messages(self, count):
        for i in range(count):
            yield self.sup.send(self.queuename, 'work',
                                {'deal': 'this is a fake message'})
Beispiel #5
0
class EPUControllerService(ServiceProcess):
    """EPU Controller service interface
    """

    declare = ServiceProcess.service_declare(name=DEFAULT_NAME,
                                             version='0.1.0',
                                             dependencies=[])

    @defer.inlineCallbacks
    def slc_init(self):

        scoped_name = self.get_scoped_name("system", self.svc_name)
        self.scoped_name = scoped_name

        queue_name_work = self.spawn_args.get("queue_name_work")
        if queue_name_work:
            self.queue_name_work = self.get_scoped_name(
                "system", queue_name_work)

            extradict = {"queue_name_work": self.queue_name_work}
            cei_events.event(self.svc_name, "init_begin", extra=extradict)
            yield self._make_queue(queue_name_work)

            queuestat_client = QueueStatClient(self)
            yield queuestat_client.watch_queue(self.queue_name_work,
                                               self.scoped_name, 'sensor_info')
            cei_events.event(self.svc_name, "queue_watched")

        else:
            self.worker_queue_receiver = None
            self.queue_name_work = None
            extradict = None
            cei_events.event(self.svc_name, "init_begin", extra=extradict)

        engineclass = "epu.decisionengine.impls.NpreservingEngine"
        if self.spawn_args.has_key("engine_class"):
            engineclass = self.spawn_args["engine_class"]
            log.info("Using configured decision engine: %s" % engineclass)
        else:
            log.info("Using default decision engine: %s" % engineclass)

        if self.spawn_args.has_key("engine_conf"):
            engine_conf = self.spawn_args["engine_conf"]
            if isinstance(engine_conf, str):
                engine_conf = json.loads(engine_conf)
        else:
            engine_conf = None

        if self.spawn_args.has_key("cassandra"):
            cass = self.spawn_args["cassandra"]
            host = cass['hostname']
            username = cass['username']
            password = cass['password']
            port = cass['port']
            keyspace = cass['keyspace']

            store = CassandraControllerStore(self.svc_name, host, port,
                                             username, password, keyspace,
                                             CoreInstance, SensorItem)
            store.initialize()
            store.activate()
        elif self.spawn_args.has_key('store'):
            store = self.spawn_args['store']
        else:
            store = ControllerStore()

        self.core = ControllerCore(ProvisionerClient(self),
                                   engineclass,
                                   scoped_name,
                                   conf=engine_conf,
                                   store=store)

        # run state recovery and engine initialization

        # this one needs to run before any messages start arriving. It pulls
        # information from persistence and refreshes local caches.
        yield self.core.run_recovery()

        # temporarily doing this later due to a potential bug in ioncore where
        # queues may not be bound before slc_init runs. This means  if the
        # provisioner is quck to reply to dump_state some messages may be
        # missed.
        reactor.callLater(1, self._delayed_init)

    @defer.inlineCallbacks
    def _delayed_init(self):
        yield self.core.run_initialize()

        self.core.begin_controlling()
        cei_events.event(self.svc_name, "init_end")

    @defer.inlineCallbacks
    def _make_queue(self, name):
        self.worker_queue_receiver = ServiceWorkerReceiver(label=name,
                                                           name=name,
                                                           scope='system')
        yield self.worker_queue_receiver.initialize()

    def op_heartbeat(self, content, headers, msg):
        log.debug("Got node heartbeat: %s", content)
        return self.core.new_heartbeat(content)

    def op_instance_state(self, content, headers, msg):
        return self.core.new_instance_state(content)

    def op_sensor_info(self, content, headers, msg):
        return self.core.new_sensor_info(content)

    def op_reconfigure(self, content, headers, msg):
        log.info("EPU Controller: reconfigure: '%s'" % content)
        return self.core.run_reconfigure(content)

    @defer.inlineCallbacks
    def op_reconfigure_rpc(self, content, headers, msg):
        log.info("EPU Controller: reconfigure_rpc: '%s'" % content)
        yield self.core.run_reconfigure(content)
        yield self.reply_ok(msg, "")

    @defer.inlineCallbacks
    def op_de_state(self, content, headers, msg):
        state = self.core.de_state()
        extradict = {"state": state}
        cei_events.event(self.svc_name, "de_state", extra=extradict)
        yield self.reply_ok(msg, state)

    @defer.inlineCallbacks
    def op_whole_state(self, content, headers, msg):
        state = yield self.core.whole_state()
        yield self.reply_ok(msg, state)

    @defer.inlineCallbacks
    def op_node_error(self, content, headers, msg):
        node_id = content
        state = yield self.core.node_error(node_id)
        yield self.reply_ok(msg, state)
Beispiel #6
0
class TestQueueStatServiceLive(IonTestCase):
    """Queuestat tests that use a live broker on localhost
    """

    @defer.inlineCallbacks
    def setUp(self):

        #unconditional skip now that @itv is gone. we'll make our own decorator??
        raise unittest.SkipTest("Skipping test that requires localhost rabbit broker")

        if not os.path.exists(os.path.expanduser('~/.erlang.cookie')):
            raise unittest.SkipTest('Needs a RabbitMQ server on localhost')

        log.debug('Temporarily changing broker_host to 127.0.0.1')
        self.other_broker_host = ion.test.iontest.CONF.obj['broker_host']
        ion.test.iontest.CONF.obj['broker_host'] = '127.0.0.1'

        yield self._start_container()
        procs = [
            {'name':'queuestat','module':'epu.ionproc.queuestat', 
                'class':'QueueStatService', 
                'spawnargs' : {'interval_seconds' : 0.1}},
                ]
        self.sup = yield self._spawn_processes(procs)

        
        
        id = str(uuid.uuid4())
        id = id[id.rfind('-')+1:] # shorter id
        queuename = '_'.join((__name__, id))
        yield self._make_queue(queuename)

        self.queuename = pu.get_scoped_name(queuename, "system")

    @defer.inlineCallbacks
    def _make_queue(self, name):
        self.receiver = ServiceWorkerReceiver(
            label=name,
            name=name,
            scope='system')
        yield self.receiver.initialize()

    @defer.inlineCallbacks
    def tearDown(self):

        # activating the receiver causes the queue to drain, apparently
        yield self.receiver.activate()
        yield self._shutdown_processes()
        yield self._stop_container()

        log.debug('Resetting broker_host')
        ion.test.iontest.CONF.obj['broker_host'] = self.other_broker_host

    @defer.inlineCallbacks
    def test_queuestat(self):
        subscriber = TestSubscriber()
        subId = yield self._spawn_process(subscriber)
        queuestat_client = QueueStatClient(subscriber)

        yield queuestat_client.watch_queue(self.queuename, str(subId), 'stat')
        yield pu.asleep(0.3)

        assert subscriber.queue_length[self.queuename] == 0
        
        yield self._add_messages(5)
        yield pu.asleep(0.3)
        assert subscriber.queue_length[self.queuename] == 5
        
        yield self._add_messages(3)
        yield pu.asleep(0.3)
        assert subscriber.queue_length[self.queuename] == 8

        yield pu.asleep(0.3)
        yield queuestat_client.unwatch_queue(self.queuename, str(subId), 'stat')
        
        yield self._add_messages(3)
        yield pu.asleep(0.3)
        assert subscriber.queue_length[self.queuename] == 8


    @defer.inlineCallbacks
    def _add_messages(self, count):
        for i in range(count):
            yield self.sup.send(self.queuename, 'work', 
                {'deal' : 'this is a fake message'})