def test_pub_sub_register_addr(self): if not self.do_test: self.skipTest('pub/sub is not enabled') return ns = Namespace() ns.events_num = 0 ns.events_action = None def _db_change_callback(table, key, action, value, topic): if 'log' == key: ns.events_num += 1 ns.events_action = action publisher = self.get_server_publisher() subscriber = self.get_subscriber(_db_change_callback) time.sleep(const.DEFAULT_CMD_TIMEOUT) action = "log" update = db_common.DbUpdate( 'info', 'log', action, "value" ) update.action = action update.topic = db_common.SEND_ALL_TOPIC publisher.send_event(update) time.sleep(const.DEFAULT_CMD_TIMEOUT) self.assertEqual(ns.events_action, action) publisher2 = self.get_server_publisher(port=12346) uri = '%s://%s:%s' % ( cfg.CONF.df.publisher_transport, '127.0.0.1', cfg.CONF.df.publisher_port) subscriber.register_listen_address(uri) time.sleep(const.DEFAULT_CMD_TIMEOUT) update.action = action update.topic = db_common.SEND_ALL_TOPIC ns.events_action = None publisher2.send_event(update) time.sleep(const.DEFAULT_CMD_TIMEOUT) self.assertEqual(ns.events_action, action) subscriber.close() self.stop_publisher(publisher) self.stop_publisher(publisher2)
def test_subscribe_success(self): pubsub = mock.Mock() self.RedisSubscriberAgent.pub_sub = pubsub update = db_common.DbUpdate("router", "key", "action", "value", topic='teststring') data = pub_sub_api.pack_message(update.to_dict()) self.RedisSubscriberAgent.pub_sub.listen.return_value = \ [{'type': 'message', 'data': data}] self.RedisSubscriberAgent.pub_sub.subscribe.return_value = 1 self.RedisSubscriberAgent.pub_sub.unsubscribe.return_value = 1 result = self.RedisSubscriberAgent.register_topic('subscribe') self.assertIsNone(result) result = self.RedisSubscriberAgent.unregister_topic('subscribe') self.RedisSubscriberAgent.db_changes_callback = mock.Mock() self.RedisSubscriberAgent.db_changes_callback.return_value = 1 self.assertIsNone(result)
def run(self): while True: # fetch cluster topology info every 3 sec time.sleep(INTERVAL_TIME) try: nodes = self.get_cluster_topology_by_all_nodes() if len(nodes) > 0: if self.publisher is not None: nodes_json = jsonutils.dumps(nodes) update = db_common.DbUpdate('ha', 'nodes', 'set', nodes_json, topic='redis') self.publisher.send_event(update) # process new nodes got self.redis_failover_callback(nodes) except Exception: LOG.exception("exception happened " "when receive messages from plugin")
def _send_event(self, table, key, action, value): listeners = self.nb_api.get_all(core.Listener) listeners_num = len(listeners) if listeners_num > 1: # Sort by timestamp and choose from the latest ones randomly. # 1. This can avoid we always choose the same listener in one # single interval # 2. Compare to choose from whose timestamp is within a threshold, # e.g 2 * neutron_listener_report_interval, # this way is the easier and can reduce the possibility a dead # one is chosen. For users, do not need to figure out what is # the best report interval. A big interval increase the possility a # dead one is chosen, while a small one may affect the performance listeners.sort(key=lambda l: l.timestamp, reverse=True) selected = random.choice(listeners[:len(listeners) / 2]) elif listeners_num == 1: selected = listeners[0] else: LOG.warning("No neutron listener found") return topic = selected.topic update = db_common.DbUpdate(table, key, action, value, topic=topic) LOG.info("Publish to neutron %s", topic) self.nb_api.publisher.send_event(update)
def test_pub_sub_event_number_different_port(self): if not self.do_test: self.skipTest('pub/sub is not enabled') return ns = Namespace() ns.events_num = 0 ns.events_action = None def _db_change_callback(table, key, action, value, topic): if 'log' == key: ns.events_num += 1 ns.events_action = action publisher = self._get_server_publisher() self.addCleanup(self._stop_publisher, publisher) subscriber = self._get_subscriber(_db_change_callback) self.addCleanup(subscriber.close) time.sleep(const.DEFAULT_CMD_TIMEOUT) local_events_num = ns.events_num action = "log" update = db_common.DbUpdate('info', 'log', action, "test ev no diff ports value") publisher.send_event(update) time.sleep(const.DEFAULT_CMD_TIMEOUT) self.assertEqual(local_events_num + 1, ns.events_num) self.assertEqual(ns.events_action, action) local_events_num = ns.events_num for i in six.moves.range(100): publisher.send_event(update) time.sleep(0) time.sleep(const.DEFAULT_CMD_TIMEOUT) self.assertEqual(local_events_num + 100, ns.events_num)
def _append_event_to_queue(self, table, key, action, value, topic): event = db_common.DbUpdate(table, key, action, value, topic=topic) self._queue.put(event) time.sleep(0)
def db_change_callback(self, table, key, action, value, topic=None): update = db_common.DbUpdate(table, key, action, value, topic=topic) LOG.debug("Pushing Update to Queue: %s", update) self._queue.put(update) time.sleep(0)
def _db_change_callback(self, table, key, action, value, topic=None): update = db_common.DbUpdate(table, key, action, value, topic=topic) self._queue.put(update) time.sleep(0)
def db_change_callback(self, table, key, action, value, topic=None): update = db_common.DbUpdate(table, key, action, value, topic=topic) LOG.info(_LI("Pushing Update to Queue: %s"), update) self._queue.put(update) eventlet.sleep(0)
def _send_port_status_event(self, table, key, action, value): topic = self.nb_api.get_all_port_status_keys() update = db_common.DbUpdate(table, key, action, value, topic=topic) self.pub.send_event(update)