def test_queuestat(self): subscriber = TestSubscriber() subId = yield self._spawn_process(subscriber) queuestat_client = QueueStatClient(subscriber) yield queuestat_client.watch_queue(self.queuename, str(subId), 'stat') yield pu.asleep(0.3) assert subscriber.queue_length[self.queuename] == 0 yield self._add_messages(5) yield pu.asleep(0.3) assert subscriber.queue_length[self.queuename] == 5 yield self._add_messages(3) yield pu.asleep(0.3) assert subscriber.queue_length[self.queuename] == 8 yield pu.asleep(0.3) yield queuestat_client.unwatch_queue(self.queuename, str(subId), 'stat') yield self._add_messages(3) yield pu.asleep(0.3) assert subscriber.queue_length[self.queuename] == 8
def test_queuestat(self): queuestat = QueueStatService(spawnargs={ 'erlang_cookie_path': self.cookie_path, 'interval_seconds': 5.0 }) self.queuestat = queuestat queuestat.slc_init() self.assertEqual(self.loop_interval, None) queuestat_client = QueueStatClient() yield queuestat_client.watch_queue("q1", "sub_id", 'stat') self.assertEqual(self.loop_interval, 5.0) yield queuestat._wrapped_do_poll() self.rabbitmqctl.set_qlen("q1", 5) yield queuestat._wrapped_do_poll() self.assertEqual(self.queues.pop("q1"), [5]) yield queuestat_client.watch_queue("q2", "sub_id", 'stat') self.rabbitmqctl.set_qlen("q2", 0) yield queuestat._wrapped_do_poll() self.assertEqual(self.queues.pop("q1"), [5]) self.assertEqual(self.queues.pop("q2"), [0]) self.rabbitmqctl.set_qlen("q1", 8) self.rabbitmqctl.set_qlen("q2", 3) self.rabbitmqctl.set_qlen("q3", 1) yield queuestat._wrapped_do_poll() self.assertEqual(self.queues.pop("q1"), [8]) self.assertEqual(self.queues.pop("q2"), [3]) self.assertFalse(self.queues) # should not be a q3 message yield queuestat_client.unwatch_queue("q1", "sub_id", 'stat') yield queuestat._wrapped_do_poll() self.rabbitmqctl.set_qlen("q1", 10) self.assertEqual(self.queues.pop("q2"), [3]) self.assertFalse(self.queues) # should not be a q1 or q3 message
def test_queuestat(self): queuestat = QueueStatService(spawnargs= {'erlang_cookie_path': self.cookie_path, 'interval_seconds': 5.0}) self.queuestat = queuestat queuestat.slc_init() self.assertEqual(self.loop_interval, None) queuestat_client = QueueStatClient() yield queuestat_client.watch_queue("q1", "sub_id", 'stat') self.assertEqual(self.loop_interval, 5.0) yield queuestat._wrapped_do_poll() self.rabbitmqctl.set_qlen("q1", 5) yield queuestat._wrapped_do_poll() self.assertEqual(self.queues.pop("q1"), [5]) yield queuestat_client.watch_queue("q2", "sub_id", 'stat') self.rabbitmqctl.set_qlen("q2", 0) yield queuestat._wrapped_do_poll() self.assertEqual(self.queues.pop("q1"), [5]) self.assertEqual(self.queues.pop("q2"), [0]) self.rabbitmqctl.set_qlen("q1", 8) self.rabbitmqctl.set_qlen("q2", 3) self.rabbitmqctl.set_qlen("q3", 1) yield queuestat._wrapped_do_poll() self.assertEqual(self.queues.pop("q1"), [8]) self.assertEqual(self.queues.pop("q2"), [3]) self.assertFalse(self.queues) # should not be a q3 message yield queuestat_client.unwatch_queue("q1", "sub_id", 'stat') yield queuestat._wrapped_do_poll() self.rabbitmqctl.set_qlen("q1", 10) self.assertEqual(self.queues.pop("q2"), [3]) self.assertFalse(self.queues) # should not be a q1 or q3 message
def slc_init(self): scoped_name = self.get_scoped_name("system", self.svc_name) self.scoped_name = scoped_name queue_name_work = self.spawn_args.get("queue_name_work") if queue_name_work: self.queue_name_work = self.get_scoped_name("system", queue_name_work) extradict = {"queue_name_work":self.queue_name_work} cei_events.event(self.svc_name, "init_begin", extra=extradict) yield self._make_queue(queue_name_work) queuestat_client = QueueStatClient(self) yield queuestat_client.watch_queue(self.queue_name_work, self.scoped_name, 'sensor_info') cei_events.event(self.svc_name, "queue_watched") else: self.worker_queue_receiver = None self.queue_name_work = None extradict = None cei_events.event(self.svc_name, "init_begin", extra=extradict) engineclass = "epu.decisionengine.impls.NpreservingEngine" if self.spawn_args.has_key("engine_class"): engineclass = self.spawn_args["engine_class"] log.info("Using configured decision engine: %s" % engineclass) else: log.info("Using default decision engine: %s" % engineclass) if self.spawn_args.has_key("engine_conf"): engine_conf = self.spawn_args["engine_conf"] if isinstance(engine_conf, str): engine_conf = json.loads(engine_conf) else: engine_conf = None if self.spawn_args.has_key("cassandra"): cass = self.spawn_args["cassandra"] host = cass['hostname'] username = cass['username'] password = cass['password'] port = cass['port'] keyspace = cass['keyspace'] store = CassandraControllerStore(self.svc_name, host, port, username, password, keyspace, CoreInstance, SensorItem) store.initialize() store.activate() elif self.spawn_args.has_key('store'): store = self.spawn_args['store'] else: store = ControllerStore() self.core = ControllerCore(ProvisionerClient(self), engineclass, scoped_name, conf=engine_conf, store=store) # run state recovery and engine initialization # this one needs to run before any messages start arriving. It pulls # information from persistence and refreshes local caches. yield self.core.run_recovery() # temporarily doing this later due to a potential bug in ioncore where # queues may not be bound before slc_init runs. This means if the # provisioner is quck to reply to dump_state some messages may be # missed. reactor.callLater(1, self._delayed_init)
def slc_init(self): scoped_name = self.get_scoped_name("system", self.svc_name) self.scoped_name = scoped_name queue_name_work = self.spawn_args.get("queue_name_work") if queue_name_work: self.queue_name_work = self.get_scoped_name( "system", queue_name_work) extradict = {"queue_name_work": self.queue_name_work} cei_events.event(self.svc_name, "init_begin", extra=extradict) yield self._make_queue(queue_name_work) queuestat_client = QueueStatClient(self) yield queuestat_client.watch_queue(self.queue_name_work, self.scoped_name, 'sensor_info') cei_events.event(self.svc_name, "queue_watched") else: self.worker_queue_receiver = None self.queue_name_work = None extradict = None cei_events.event(self.svc_name, "init_begin", extra=extradict) engineclass = "epu.decisionengine.impls.NpreservingEngine" if self.spawn_args.has_key("engine_class"): engineclass = self.spawn_args["engine_class"] log.info("Using configured decision engine: %s" % engineclass) else: log.info("Using default decision engine: %s" % engineclass) if self.spawn_args.has_key("engine_conf"): engine_conf = self.spawn_args["engine_conf"] if isinstance(engine_conf, str): engine_conf = json.loads(engine_conf) else: engine_conf = None if self.spawn_args.has_key("cassandra"): cass = self.spawn_args["cassandra"] host = cass['hostname'] username = cass['username'] password = cass['password'] port = cass['port'] keyspace = cass['keyspace'] store = CassandraControllerStore(self.svc_name, host, port, username, password, keyspace, CoreInstance, SensorItem) store.initialize() store.activate() elif self.spawn_args.has_key('store'): store = self.spawn_args['store'] else: store = ControllerStore() self.core = ControllerCore(ProvisionerClient(self), engineclass, scoped_name, conf=engine_conf, store=store) # run state recovery and engine initialization # this one needs to run before any messages start arriving. It pulls # information from persistence and refreshes local caches. yield self.core.run_recovery() # temporarily doing this later due to a potential bug in ioncore where # queues may not be bound before slc_init runs. This means if the # provisioner is quck to reply to dump_state some messages may be # missed. reactor.callLater(1, self._delayed_init)