def __init__(self, chassis_name): self.next_network_id = 0 self.db_store = db_store.DbStore() self.chassis_name = chassis_name self.ip = cfg.CONF.df.local_ip self.tunnel_type = cfg.CONF.df.tunnel_type self.sync_finished = False self.port_status_notifier = None nb_driver = df_utils.load_driver(cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) self.nb_api = api_nb.NbApi(nb_driver, use_pubsub=cfg.CONF.df.enable_df_pub_sub) self.vswitch_api = vswitch_impl.OvsApi(self.ip) if cfg.CONF.df.enable_port_status_notifier: self.port_status_notifier = df_utils.load_driver( cfg.CONF.df.port_status_notifier, df_utils.DF_PORT_STATUS_DRIVER_NAMESPACE) kwargs = dict(nb_api=self.nb_api, vswitch_api=self.vswitch_api, db_store=self.db_store) app_mgr = app_manager.AppManager.get_instance() self.open_flow_app = app_mgr.instantiate(ryu_base_app.RyuDFAdapter, **kwargs) self.topology = None self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self.integration_bridge = cfg.CONF.df.integration_bridge
def __init__(self, chassis_name, nb_api): #用于缓存obj self.db_store = db_store.get_instance() self._queue = queue.PriorityQueue() # pending_id -> (model, pender_id) # 'pending_id' is the ID of the object for which we are waiting. # 'model' and 'pender_id' are the model and the ID of the object # which is waiting for the object described by 'pending_id' self._pending_objects = collections.defaultdict(set) self.chassis_name = chassis_name #北向api self.nb_api = nb_api #指定database change event的处理函数 self.nb_api.set_db_change_callback(self.db_change_callback) #指明本主机ip地址 self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types self.neutron_notifier = None if cfg.CONF.df.enable_neutron_notifier: self.neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) #加载switch_backend self.switch_backend = df_utils.load_driver( cfg.CONF.df.switch_backend, df_utils.DF_SWITCH_BACKEND_DRIVER_NAMESPACE, nb_api, cfg.CONF.df.management_ip) #switch_backend初始化 self.switch_backend.initialize(self.db_change_callback, self.neutron_notifier) self.topology = None self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self._sync = sync.Sync( nb_api=self.nb_api, #指明同步对应的update,delete回调 update_cb=self.update, delete_cb=self.delete, selective=self.enable_selective_topo_dist, ) #周期性产生controller_sync事件 self._sync_pulse = loopingcall.FixedIntervalLoopingCall( self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def test_multiproc_pub_sub(self): if not self.do_test: self.skipTest('pub/sub is not enabled') return self.event_received = False cfg.CONF.set_override('publisher_multiproc_socket', '/tmp/ipc_test_socket', group='df') pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_multiproc_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() self.subscriber = pub_sub_driver.get_subscriber() self.subscriber.initialize(self._handle_received_event) self.subscriber.daemonize() publisher.send_event(self.event) test_utils.wait_until_true(lambda: self.event_received) self.subscriber.close() self.subscriber = None # Check that we received the same event self.assertEqual(self.event.table, self.event_received_info.table) self.assertEqual(self.event.key, self.event_received_info.key) self.assertEqual(self.event.action, self.event_received_info.action) # Value is not tested, since it's currently set to None # self.assertEqual(self.event.value, self.event_received_info.value) self.assertEqual(self.event.topic, self.event_received_info.topic)
def get_publisher(): pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_multiproc_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() return publisher
def __init__(self, chassis_name, nb_api): self.db_store = db_store.get_instance() self.chassis_name = chassis_name self.nb_api = nb_api self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types self.sync_finished = False self.vswitch_api = vswitch_impl.OvsApi(cfg.CONF.df.management_ip) self.neutron_notifier = None if cfg.CONF.df.enable_neutron_notifier: self.neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) app_mgr = app_manager.AppManager.get_instance() self.open_flow_app = app_mgr.instantiate( ryu_base_app.RyuDFAdapter, nb_api=self.nb_api, vswitch_api=self.vswitch_api, neutron_server_notifier=self.neutron_notifier, ) self.topology = None self.db_consistency_manager = None self.enable_db_consistency = cfg.CONF.df.enable_df_db_consistency self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution
def _get_publisher(self, pubsub_driver_name): pub_sub_driver = df_utils.load_driver( pubsub_driver_name, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() return publisher
def test_notify_update_fip_status(self): cfg.CONF.set_override('neutron_notifier', 'nb_api_neutron_notifier_driver', group='df') notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) kwargs = {'arg_list': ('router:external', ), 'router:external': True} with self.network(**kwargs) as n: with self.subnet(network=n): floatingip = self.l3p.create_floatingip( self.context, { 'floatingip': { 'floating_network_id': n['network']['id'], 'tenant_id': n['network']['tenant_id'] } }) self.assertEqual(n_const.FLOATINGIP_STATUS_DOWN, floatingip['status']) notifier.notify_neutron_server(df_l3.FloatingIp.table_name, floatingip['id'], "update", n_const.FLOATINGIP_STATUS_ACTIVE) floatingip = self.l3p.get_floatingip(self.context, floatingip['id']) self.assertEqual(n_const.FLOATINGIP_STATUS_ACTIVE, floatingip['status'])
def get_server_publisher(): cfg.CONF.df.publisher_port = "12345" cfg.CONF.df.publisher_bind_address = "127.0.0.1" pub_sub_driver = df_utils.load_driver(cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() return publisher
def setUp(self): super(TestDbApi, self).setUp() self.driver = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) self.driver.initialize(cfg.CONF.df.remote_db_ip, cfg.CONF.df.remote_db_port, config=cfg.CONF.df)
def _get_publisher(self): if self.pub_sub_use_multiproc: pubsub_driver_name = cfg.CONF.df.pub_sub_multiproc_driver else: pubsub_driver_name = cfg.CONF.df.pub_sub_driver pub_sub_driver = df_utils.load_driver( pubsub_driver_name, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_publisher()
def get_server_publisher(): cfg.CONF.df.publisher_port = "12345" cfg.CONF.df.publisher_bind_address = "127.0.0.1" pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() return publisher
def _get_subscriber(self): """ Return the subscriber for inter-process communication. If multi-proc communication is not use (i.e. disabled from config), return None. """ pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_subscriber()
def __init__(self, conf): super(DFMetadataProxyHandler, self).__init__() self.conf = conf nb_driver = df_utils.load_driver(cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) self.nb_api = api_nb.NbApi(nb_driver, use_pubsub=cfg.CONF.df.enable_df_pub_sub) self.nb_api.initialize(db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port)
def _get_publisher(self): if cfg.CONF.df.pub_sub_use_multiproc: pubsub_driver_name = cfg.CONF.df.pub_sub_multiproc_driver else: pubsub_driver_name = cfg.CONF.df.pub_sub_driver pub_sub_driver = df_utils.load_driver( pubsub_driver_name, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_publisher()
def get_publisher(): if cfg.CONF.df.pub_sub_use_multiproc: pubsub_driver_name = cfg.CONF.df.pub_sub_multiproc_driver else: pubsub_driver_name = cfg.CONF.df.pub_sub_driver pub_sub_driver = df_utils.load_driver(pubsub_driver_name, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() return publisher
def get_instance(): global _nb_api if _nb_api is None: nb_driver = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) nb_api = NbApi(nb_driver) ip, port = get_db_ip_port() nb_api._initialize(db_ip=ip, db_port=port) _nb_api = nb_api return _nb_api
def post_fork_initialize(self, resource, event, trigger, **kwargs): # NOTE(nick-ma-z): This will initialize all workers (API, RPC, # plugin service, etc) and threads with network connections. self.nb_api = api_nb.NbApi.get_instance(True) df_qos.initialize(self.nb_api) if cfg.CONF.df.enable_neutron_notifier: neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) neutron_notifier.initialize(self.nb_api, is_neutron_server=True) self.port_status = None
def _get_multiproc_subscriber(self): """ Return the subscriber for inter-process communication. If multi-proc communication is not use (i.e. disabled from config), return None. """ if not cfg.CONF.df.pub_sub_use_multiproc: return None pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_multiproc_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_subscriber()
def get_subscriber(callback): pub_sub_driver = df_utils.load_driver(cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) subscriber = pub_sub_driver.get_subscriber() subscriber.initialize(callback) subscriber.register_topic(db_common.SEND_ALL_TOPIC) uri = '%s://%s:%s' % (cfg.CONF.df.publisher_transport, '127.0.0.1', cfg.CONF.df.publisher_port) subscriber.register_listen_address(uri) subscriber.daemonize() return subscriber
def __init__(self): self._queue = queue.Queue() self.publisher = self._get_publisher() self.multiproc_subscriber = self._get_multiproc_subscriber() self.db = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, )
def get_instance(is_neutron_server): global _nb_api if _nb_api is None: nb_driver = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) nb_api = NbApi(nb_driver, use_pubsub=cfg.CONF.df.enable_df_pub_sub, is_neutron_server=is_neutron_server) nb_api.initialize(db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port) _nb_api = nb_api return _nb_api
def get_instance(): global _nb_api if _nb_api is None: #加载北向database驱动 nb_driver = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) nb_api = NbApi(nb_driver) #获取首个北向库的ip及port,并进行北向api的初始化 ip, port = get_db_ip_port() nb_api._initialize(db_ip=ip, db_port=port) _nb_api = nb_api #返回北向数据库api return _nb_api
def post_fork_initialize(self, resource, event, trigger, **kwargs): # NOTE(nick-ma-z): This will initialize all workers (API, RPC, # plugin service, etc) and threads with network connections. self.nb_api = api_nb.NbApi.get_instance(True) if cfg.CONF.df.enable_port_status_notifier: port_status_notifier = df_utils.load_driver( cfg.CONF.df.port_status_notifier, df_utils.DF_PORT_STATUS_DRIVER_NAMESPACE) self.port_status_notifier = port_status_notifier self.port_status_notifier.initialize(self, self.nb_api, pub=None, sub=self.nb_api.subscriber, is_neutron_server=True) self.port_status = None
def get_subscriber(self, callback): pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) subscriber = pub_sub_driver.get_subscriber() subscriber.initialize(callback) subscriber.register_topic(db_common.SEND_ALL_TOPIC) uri = '%s://%s:%s' % (cfg.CONF.df.publisher_transport, cfg.CONF.host, cfg.CONF.df.publisher_port) subscriber.register_listen_address(uri) publishers = self.nb_api.get_all(core.Publisher) for publisher in publishers: subscriber.register_listen_address(publisher.uri) subscriber.daemonize() return subscriber
def get_subscriber(callback): pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) subscriber = pub_sub_driver.get_subscriber() subscriber.initialize(callback) uri = '%s://%s:%s' % ( cfg.CONF.df.publisher_transport, '127.0.0.1', cfg.CONF.df.publisher_port ) subscriber.register_listen_address(uri) subscriber.daemonize() return subscriber
def setUp(self): cfg.CONF.set_override('neutron_notifier', 'nb_api_neutron_notifier_driver', group='df') mock.patch('dragonflow.db.neutron.lockedobjects_db.wrap_db_lock', side_effect=utils.empty_wrapper).start() super(TestNbApiNeutronNotifier, self).setUp() self.notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) self.notifier.nb_api = mock.Mock() getppid_patch = mock.patch('os.getppid', return_value=1) self.addCleanup(getppid_patch.stop) getppid_patch.start()
def test_multiproc_pub_sub(self): if not self.do_test: return self.event_received = False cfg.CONF.set_override('publisher_multiproc_socket', '/tmp/ipc_test_socket', group='df') pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_multiproc_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() self.subscriber = pub_sub_driver.get_subscriber() self.subscriber.initialize(self._verify_event) self.subscriber.daemonize() publisher.send_event(self.event) test_utils.wait_until_true(lambda: self.event_received)
def __init__(self, chassis_name, nb_api): self.db_store = db_store.get_instance() self._queue = queue.PriorityQueue() # pending_id -> (model, pender_id) # 'pending_id' is the ID of the object for which we are waiting. # 'model' and 'pender_id' are the model and the ID of the object # which is waiting for the object described by 'pending_id' self._pending_objects = collections.defaultdict(set) self.chassis_name = chassis_name self.nb_api = nb_api self.nb_api.set_db_change_callback(self.db_change_callback) self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types self.sync_finished = False self.vswitch_api = vswitch_impl.OvsApi(cfg.CONF.df.management_ip) self.neutron_notifier = None if cfg.CONF.df.enable_neutron_notifier: self.neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) app_mgr = app_manager.AppManager.get_instance() self.open_flow_app = app_mgr.instantiate( ryu_base_app.RyuDFAdapter, nb_api=self.nb_api, vswitch_api=self.vswitch_api, neutron_server_notifier=self.neutron_notifier, db_change_callback=self.db_change_callback ) # The OfctlService is needed to support the 'get_flows' method self.open_flow_service = app_mgr.instantiate(of_service.OfctlService) self.topology = None self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self._sync = sync.Sync( nb_api=self.nb_api, update_cb=self.update, delete_cb=self.delete, selective=self.enable_selective_topo_dist, ) self._sync_pulse = loopingcall.FixedIntervalLoopingCall( self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def test_multiproc_pub_sub(self): if not self.do_test: return self.event_received = False cfg.CONF.df.publisher_multiproc_socket = '/tmp/ipc_test_socket' pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_multiproc_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) publisher = pub_sub_driver.get_publisher() publisher.initialize() self.subscriber = pub_sub_driver.get_subscriber() self.subscriber.initialize(self._verify_event) self.subscriber.daemonize() publisher.send_event(self.event) wait_until_true(lambda: self.event_received) self.subscriber.stop() self.subscriber = None
def get_instance(is_neutron_server, is_external_app=False): global _nb_api if _nb_api is None: nb_driver = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) # Do not use pubsub for external apps - this causes issues with # threads and other issues. use_pubsub = cfg.CONF.df.enable_df_pub_sub if is_external_app: use_pubsub = False nb_api = NbApi(nb_driver, use_pubsub=use_pubsub, is_neutron_server=is_neutron_server) ip, port = get_db_ip_port() nb_api.initialize(db_ip=ip, db_port=port) _nb_api = nb_api return _nb_api
def get_instance(is_neutron_server): LOG.debug("api_nb::get_instance: Enter") global _nb_api if _nb_api is None: LOG.debug("api_nb::get_instance: Initialising new driver: %s", cfg.CONF.df.nb_db_class) nb_driver = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) LOG.debug( "api_nb::get_instance: Initialising new NbAPI instance: %s", cfg.CONF.df.enable_df_pub_sub) nb_api = NbApi(nb_driver, use_pubsub=cfg.CONF.df.enable_df_pub_sub, is_neutron_server=is_neutron_server) LOG.debug("api_nb::get_instance: About to connect to DB") nb_api.initialize(db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port) _nb_api = nb_api LOG.debug("api_nb::get_instance: Done!") return _nb_api
def __init__(self, chassis_name, nb_api): self.db_store = db_store.get_instance() self.chassis_name = chassis_name self.nb_api = nb_api self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types self.sync_finished = False self.vswitch_api = vswitch_impl.OvsApi(cfg.CONF.df.management_ip) self.neutron_notifier = None if cfg.CONF.df.enable_neutron_notifier: self.neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) app_mgr = app_manager.AppManager.get_instance() self.open_flow_app = app_mgr.instantiate( ryu_base_app.RyuDFAdapter, nb_api=self.nb_api, vswitch_api=self.vswitch_api, neutron_server_notifier=self.neutron_notifier, ) # The OfctlService is needed to support the 'get_flows' method self.open_flow_service = app_mgr.instantiate(of_service.OfctlService) self.topology = None self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self._sync = sync.Sync( nb_api=self.nb_api, update_cb=self.update, delete_cb=self.delete, selective=self.enable_selective_topo_dist, ) self._sync_pulse = loopingcall.FixedIntervalLoopingCall( self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def initialize(self): LOG.info(_LI("Starting DFMechDriver")) # When set to True, Nova plugs the VIF directly into the ovs bridge # instead of using the hybrid mode. self.vif_details = {portbindings.CAP_PORT_FILTER: True} self.vif_type = portbindings.VIF_TYPE_OVS self._set_base_port_binding() self.port_status = n_const.PORT_STATUS_ACTIVE self.nb_api = api_nb.NbApi.get_instance(True) if cfg.CONF.df.enable_port_status_notifier: port_status_notifier = df_utils.load_driver( cfg.CONF.df.port_status_notifier, df_utils.DF_PORT_STATUS_DRIVER_NAMESPACE) self.port_status_notifier = port_status_notifier self.port_status_notifier.initialize(self, self.nb_api, pub=None, sub=self.nb_api.subscriber, is_neutron_server=True) self.port_status = None registry.subscribe(self.update_security_group, resources.SECURITY_GROUP, events.AFTER_CREATE) registry.subscribe(self.update_security_group, resources.SECURITY_GROUP, events.AFTER_UPDATE) registry.subscribe(self.delete_security_group, resources.SECURITY_GROUP, events.BEFORE_DELETE) registry.subscribe(self.create_security_group_rule, resources.SECURITY_GROUP_RULE, events.AFTER_CREATE) registry.subscribe(self.delete_security_group_rule, resources.SECURITY_GROUP_RULE, events.AFTER_DELETE)
def main(): parser = argparse.ArgumentParser(usage="missing command name " "(use --help for help)") subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands') add_table_command(subparsers) add_ls_command(subparsers) add_dump_command(subparsers) add_get_command(subparsers) add_bind_command(subparsers) add_clean_command(subparsers) add_rm_command(subparsers) add_init_command(subparsers) add_dropall_command(subparsers) args = parser.parse_args() df_utils.config_parse() db_driver = df_utils.load_driver(cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) db_driver.initialize(db_ip=cfg.CONF.df.remote_db_ip, db_port=cfg.CONF.df.remote_db_port, config=cfg.CONF.df) args.handle(db_driver, args)
def _get_subscriber(self): pub_sub_driver = df_utils.load_driver( cfg.CONF.df.pub_sub_driver, df_utils.DF_PUBSUB_DRIVER_NAMESPACE) return pub_sub_driver.get_subscriber()