def __init__(self): super(OpflexTrunkMixin, self).__init__() self.managed_trunks = {} registry.unsubscribe(self.handle_trunks, resources.TRUNK) registry.register(self.handle_subports, resources.SUBPORT) self._context = n_context.get_admin_context_without_session() self.trunk_rpc = agent.TrunkStub()
def _report_state(self): LOG.debug("Report state task started") try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state(ctx, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info(_LI("Agent has just been revived. " "Scheduling full sync")) self.schedule_full_resync( reason=_("Agent has just been revived")) except AttributeError: # This means the server does not support report_state LOG.warning(_LW("Neutron server does not support state report. " "State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return except Exception: LOG.exception(_LE("Failed reporting state!")) return if self.agent_state.pop('start_flag', None): self.run()
def __init__(self, host, conf=None): try: sock_dir, sock_mode = utils.get_socket_settings() except Exception: sock_dir = constants.VHOSTUSER_SOCKET_DIR sock_mode = constants.VHOSTUSER_SOCKET_MODE LOG.warning("Cannot get vhostuser socket info from fp-vdev, use " "default path '%s' and mode '%s'" % (sock_dir, sock_mode)) self.fp_info = { 'timestamp': '', 'product': 'virtual-accelerator', 'active': False, 'vhostuser_socket_dir': sock_dir, 'vhostuser_socket_prefix': constants.VHOSTUSER_SOCKET_PREFIX, 'vhostuser_socket_mode': sock_mode, 'supported_plugs': ['ovs', 'bridge', 'tap'], } self.agent_state = { 'binary': 'neutron-fastpath-agent', 'host': cfg.CONF.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': self.fp_info, 'start_flag': True, 'agent_type': constants.FP_AGENT_TYPE, } self.ctx = context.get_admin_context_without_session() self._setup_rpc()
def __init__(self, conf): super(LbaasAgentManager, self).__init__(conf) self.conf = conf self.context = ncontext.get_admin_context_without_session() self.serializer = agent_driver_base.DataModelSerializer() self.plugin_rpc = agent_api.LbaasAgentApi( lb_const.LOADBALANCER_PLUGINV2, self.context, self.conf.host ) self._process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='loadbalancer') self._load_drivers() self.agent_state = { 'binary': 'neutron-lbaasv2-agent', 'host': conf.host, 'topic': lb_const.LOADBALANCER_AGENTV2, 'configurations': {'device_drivers': self.device_drivers.keys()}, 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2, 'start_flag': True} self.admin_state_up = True self._setup_state_rpc() self.needs_resync = False # pool_id->device_driver_name mapping used to store known instances self.instance_mapping = {}
def __init__(self, host, conf=None): try: sock_dir, sock_mode = utils.get_socket_settings() except Exception: sock_dir = constants.VHOSTUSER_SOCKET_DIR sock_mode = constants.VHOSTUSER_SOCKET_MODE LOG.warning("Cannot get vhostuser socket info from fp-vdev, use " "default path '%s' and mode '%s'" % (sock_dir, sock_mode)) self.fp_info = { 'timestamp': '', 'product': 'virtual-accelerator', 'active': False, 'vhostuser_socket_dir': sock_dir, 'vhostuser_socket_mode': sock_mode, 'supported_plugs': ['ovs', 'bridge', 'tap'], } self.agent_state = { 'binary': 'neutron-fastpath-agent', 'host': cfg.CONF.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': self.fp_info, 'start_flag': True, 'agent_type': constants.FP_AGENT_TYPE, } self.ctx = context.get_admin_context_without_session() self._setup_rpc()
def context(self): # TODO(kevinbenton): the context should really be passed in to each of # these methods so a call can be tracked all of the way through the # system but that will require a larger refactor to pass the context # everywhere. We just generate a new one here on each call so requests # can be independently tracked server side. return context.get_admin_context_without_session()
def test_neutron_context_admin_without_session_to_dict(self): ctx = context.get_admin_context_without_session() ctx_dict = ctx.to_dict() self.assertIsNone(ctx_dict['user_id']) self.assertIsNone(ctx_dict['tenant_id']) self.assertIsNone(ctx_dict['auth_token']) self.assertFalse(hasattr(ctx, 'session'))
def __init__(self, conf): super(LbaasAgentManager, self).__init__(conf) self.conf = conf self.context = ncontext.get_admin_context_without_session() self.serializer = agent_driver_base.DataModelSerializer() self.plugin_rpc = agent_api.LbaasAgentApi( lb_const.LOADBALANCER_PLUGINV2, self.context, self.conf.host) self._process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='loadbalancer') self._load_drivers() self.agent_state = { 'binary': 'neutron-lbaasv2-agent', 'host': conf.host, 'topic': lb_const.LOADBALANCER_AGENTV2, 'configurations': { 'device_drivers': self.device_drivers.keys() }, 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2, 'start_flag': True } self.admin_state_up = True self._setup_state_rpc() self.needs_resync = False # pool_id->device_driver_name mapping used to store known instances self.instance_mapping = {}
def _report_state(self): LOG.debug("Report state task started") try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state(ctx, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info(_LI("Agent has just been revived. " "Scheduling full sync")) self.schedule_full_resync( reason=_("Agent has just been revived")) except AttributeError: # This means the server does not support report_state LOG.warning(_LW("Neutron server does not support state report. " "State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return except Exception: LOG.exception(_LE("Failed reporting state!")) return if self.agent_state.pop('start_flag', None): self.run()
def start(self): # stores all configured ports on agent self.network_ports = collections.defaultdict(list) # flag to do a sync after revival self.fullsync = False self.context = context.get_admin_context_without_session() self.setup_rpc() self.init_extension_manager(self.connection) configurations = {'extensions': self.ext_manager.names()} configurations.update(self.mgr.get_agent_configurations()) #TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': self.agent_binary, 'host': cfg.CONF.host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': self.agent_type, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True } report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) capabilities.notify_init_event(self.agent_type, self) # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() self.daemon_loop()
def __init__(self): self.context = context.get_admin_context_without_session() self.agent_id = uuidutils.generate_uuid(dashed=True) self.agent_host = socket.gethostname() # Set up oslo_messaging notifier and listener to keep track of other # members # NOTE(hjensas): Override the control_exchange for the notification # transport to allow setting amqp_auto_delete = true. # TODO(hjensas): Remove this and override the exchange when setting up # the notifier once the fix for bug is available. # https://bugs.launchpad.net/oslo.messaging/+bug/1814797 CONF.set_override('control_exchange', 'ironic-neutron-agent') self.transport = oslo_messaging.get_notification_transport( CONF, url=_get_notification_transport_url()) self.notifier = _set_up_notifier(self.transport, self.agent_id) # Note(hjensas): We need to have listener consuming the non-pool queue. # See bug: https://bugs.launchpad.net/oslo.messaging/+bug/1814544 self.listener = _set_up_listener(self.transport, None) self.pool_listener = _set_up_listener( self.transport, '-'.join( ['ironic-neutron-agent-member-manager-pool', self.agent_id])) self.member_manager = HashRingMemberManagerNotificationEndpoint() self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.ironic_client = ironic_client.get_client() self.reported_nodes = {} LOG.info('Agent networking-baremetal initialized.')
def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state( ctx, self.agent_state, True) if agent_status == n_const.AGENT_REVIVED: LOG.info("Agent has just been revived. " "Scheduling full sync") self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() self.run() return except Exception: LOG.exception("Failed reporting state!") # Ensure that we resync our state the next time we successfully # connect to the server because it may have moved networks # off of this agent. self.needs_resync = True return if self.agent_state.pop('start_flag', None): self.run()
def context(self): # TODO(kevinbenton): the context should really be passed in to each of # these methods so a call can be tracked all of the way through the # system but that will require a larger refactor to pass the context # everywhere. We just generate a new one here on each call so requests # can be independently tracked server side. return context.get_admin_context_without_session()
def __init__(self): """Initializes local configuration of the current agent. :param conf: dict or dict-like object containing the configuration details used by this Agent. If None is specified, default values are used instead. """ self._agent_id = None self._topic = topics.AGENT self._cache_lock = threading.Lock() self._refresh_cache = False self._host = CONF.get("host") self._agent_state = {} self._context = neutron_context.get_admin_context_without_session() self._utils = utilsfactory.get_networkutils() self._utils.init_caches() # The following attributes will be initialized by the # `_setup_rpc` method. self._client = None self._connection = None self._endpoints = [] self._plugin_rpc = None self._sg_plugin_rpc = None self._state_rpc = None agent_config = CONF.get("AGENT", {}) self._polling_interval = agent_config.get('polling_interval', 2)
def __init__(self): self.agent_type = nsxv3_constants.NSXV3_AGENT_TYPE LOG.info("Initializing Mechanism Driver Type=" + str(self.agent_type)) self.context = context.get_admin_context_without_session() sg_enabled = securitygroups_rpc.is_firewall_enabled() LOG.info("Security Gruop Enabled=" + str(sg_enabled)) self.vif_type = portbindings.VIF_TYPE_OVS self.vif_details = { portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2 } self.rpc = nsxv3_rpc.NSXv3AgentRpcClient(self.context) self.trunk = nsxv3_trunk.NSXv3TrunkDriver.create() self.qos = nsxv3_qos.NSXv3QosDriver.create(self.rpc) self.logging = nsxv3_logging.NSXv3LogDriver.create(self.rpc) # Register the log driver at Neutron logging api drivers manager importutils.import_module('neutron.services.logapi.common.sg_validate') manager.register(resources.SECURITY_GROUP, self.logging.register_callback_handler) LOG.info("Successfully registered NSXV3 log driver.") super(VMwareNSXv3MechanismDriver, self).__init__(self.agent_type, self.vif_type, self.vif_details) LOG.info("Initialized Mechanism Driver Type = " + str(self.agent_type))
def __init__(self, data): self._n_context = n_context.get_admin_context_without_session() self._data = data self._topic = data.pop('plugin_topic', None) self._interval = data.pop('report_interval', 0) self._state_rpc = n_agent_rpc.PluginReportStateAPI( self._topic)
def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state( ctx, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info("Agent has just been revived. " "Scheduling full sync") self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() self.run() return except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") if self.agent_state.pop('start_flag', None): self.run()
def start(self): self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing # stores all configured ports on agent self.network_ports = collections.defaultdict(list) # flag to do a sync after revival self.fullsync = False self.context = context.get_admin_context_without_session() self.setup_rpc() self.init_extension_manager(self.connection) configurations = {'extensions': self.ext_manager.names()} configurations.update(self.mgr.get_agent_configurations()) #TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': self.agent_binary, 'host': cfg.CONF.host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': self.agent_type, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True} report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) capabilities.notify_init_event(self.agent_type, self) # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() self.daemon_loop()
def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state( ctx, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info("Agent has just been revived. " "Scheduling full sync") self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() self.run() return except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") if self.agent_state.pop('start_flag', None): self.run()
def _report_state(self): try: ctx = context.get_admin_context_without_session() self.state_rpc.report_state(ctx, self.agent_state, True) self.agent_state['start_flag'] = False except Exception: LOG.exception("Failed reporting state!") self.handle_report_state_failure()
def setUp(self): super(LoggingExtensionTestFramework, self).setUp() cfg.CONF.set_override('extensions', ['log'], group='agent') self.context = neutron_context.get_admin_context_without_session() self._set_resource_rpc_mock() if self.firewall_name != 'openvswitch': self.skipTest("Logging extension doesn't support firewall driver" " %s at that time " % self.firewall_name) self.log_driver = self.initialize_ovs_fw_log()
def initialize(self): super(AristaHAScaleSimulationDriver, self).initialize() self.context = context.get_admin_context_without_session() # Subscribe to port updates to force ports to active after binding # since a fake virt driver is being used, so OVS will never see # the libvirt interfaces come up, triggering the OVS provisioning self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) registry.subscribe(self._port_update_callback, resources.PORT, events.AFTER_UPDATE)
def setUp(self): super(LoggingExtensionTestFramework, self).setUp() cfg.CONF.set_override('extensions', ['log'], group='agent') self.context = neutron_context.get_admin_context_without_session() self._set_resource_rpc_mock() if self.firewall_name != 'openvswitch': self.skipTest("Logging extension doesn't support firewall driver" " %s at that time " % self.firewall_name) self.log_driver = self.initialize_ovs_fw_log()
def __init__(self, trunk_manager): self.timeout = DEFAULT_WAIT_FOR_PORT_TIMEOUT self._context = n_context.get_admin_context_without_session() self.trunk_manager = trunk_manager self.trunk_rpc = agent.TrunkStub() registry.subscribe(self.process_trunk_port_events, ovs_agent_constants.OVSDB_RESOURCE, events.AFTER_READ)
def __init__(self, host, conf=None): super(BgpDrAgent, self).__init__() self.initialize_driver(conf) self.needs_resync_reasons = collections.defaultdict(list) self.needs_full_sync_reason = None self.cache = BgpSpeakerCache() self.context = context.get_admin_context_without_session() self.plugin_rpc = BgpDrPluginApi(bgp_consts.BGP_PLUGIN, self.context, host)
def __init__(self, host, conf=None): super(BgpDrAgent, self).__init__() self.initialize_driver(conf) self.needs_resync_reasons = collections.defaultdict(list) self.needs_full_sync_reason = None self.cache = BgpSpeakerCache() self.context = context.get_admin_context_without_session() self.plugin_rpc = BgpDrPluginApi(bgp_consts.BGP_PLUGIN, self.context, host)
def _setup_rpc(self): self.context = context.get_admin_context_without_session() # Set GBP rpc API self.of_rpc = rpc.GBPServerRpcApi(rpc.TOPIC_OPFLEX) self.topic = topics.AGENT self.endpoints = [self] consumers = [[rpc.TOPIC_OPFLEX, rpc.ENDPOINT, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, start_listening=True)
def initialize(self, connection, driver_type): LOG.debug("Isoflat agent initialize called") self.context = qcontext.get_admin_context_without_session() self._setup_rpc() self.driver = manager.NeutronManager.load_class_for_provider( 'neutron_isoflat.isoflat.agent_drivers', driver_type)(self) self.driver.consume_api(self.agent_api) self.driver.setup_isoflat_bridges() self.driver.save_bridge_mappings() self.driver.initialize()
def __init__(self, vsphere_hostname, vsphere_login, vsphere_password, bridge_mappings, polling_interval): super(DVSAgent, self).__init__() self.agent_state = { 'binary': 'neutron-dvs-agent', 'host': cfg.CONF.host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': { 'bridge_mappings': bridge_mappings, 'vsphere_hostname': vsphere_hostname }, 'agent_type': 'DVS agent', 'start_flag': True } report_interval = cfg.CONF.DVS_AGENT.report_interval self.polling_interval = polling_interval # Security group agent support self.context = context.get_admin_context_without_session() self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = dvs_rpc.DVSSecurityGroupRpc( self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) self.setup_rpc() self.run_daemon_loop = True self.iter_num = 0 self.network_map = dvs_util.create_network_map_from_config( cfg.CONF.ML2_VMWARE, pg_cache=True) uplink_map = dvs_util.create_uplink_map_from_config( cfg.CONF.ML2_VMWARE, self.network_map) for phys, dvs in six.iteritems(self.network_map): if phys in uplink_map: dvs.load_uplinks(phys, uplink_map[phys]) self.updated_ports = set() self.deleted_ports = set() self.known_ports = set() self.added_ports = set() self.booked_ports = set() LOG.info(_LI("Agent out of sync with plugin!")) connected_ports = self._get_dvs_ports() self.added_ports = connected_ports if cfg.CONF.DVS.clean_on_restart: self._clean_up_vsphere_extra_resources(connected_ports) self.fullsync = False # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def _pull_all_local_ip_associations(self): context = lib_ctx.get_admin_context_without_session() assoc_list = self.resource_rpc.bulk_pull( context, resources.LOCAL_IP_ASSOCIATION) for assoc in assoc_list: port_id = assoc.fixed_port_id lip_id = assoc.local_ip_id self.local_ip_updates['added'][port_id][lip_id] = assoc # Notify agent about port update to handle Local IP flows self._notify_port_updated(context, port_id)
def __init__(self): self.vif_type = dvs_const.DVS sg_enabled = securitygroups_rpc.is_firewall_enabled() self.vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: sg_enabled} self.context = context.get_admin_context_without_session() self.dvs_notifier = dvs_agent_rpc_api.DVSClientAPI(self.context) LOG.info(_LI('DVS_notifier')) super(VMwareDVSMechanismDriver, self).__init__( dvs_const.AGENT_TYPE_DVS, self.vif_type, self.vif_details)
def initialize(self, connection, driver_type): """Initialize agent extension.""" LOG.info("P4 Agent starting.") self.agent_driver = manager.NeutronManager.load_class_for_provider( 'networking_p4.p4.agent_drivers', 'bmv2')() # TODO: driver_type is hardcoded self.agent_driver.consume_api(self.agent_api) self.rpc_ctx = n_context.get_admin_context_without_session() self.agent_driver.initialize(self.rpc_ctx) LOG.info("P4 BMv2 started") self._setup_rpc()
def __init__(self): self.vif_type = dvs_const.DVS sg_enabled = securitygroups_rpc.is_firewall_enabled() self.vif_details = { portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: sg_enabled } self.context = context.get_admin_context_without_session() self.dvs_notifier = dvs_agent_rpc_api.DVSClientAPI(self.context) LOG.info(_LI('DVS_notifier')) super(VMwareDVSMechanismDriver, self).__init__(dvs_const.AGENT_TYPE_DVS, self.vif_type, self.vif_details)
def __init__(self, vsphere_hostname, vsphere_login, vsphere_password, bridge_mappings, polling_interval): super(DVSAgent, self).__init__() self.agent_state = { 'binary': 'neutron-dvs-agent', 'host': cfg.CONF.host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': {'bridge_mappings': bridge_mappings, 'vsphere_hostname': vsphere_hostname}, 'agent_type': 'DVS agent', 'start_flag': True} report_interval = cfg.CONF.DVS_AGENT.report_interval self.polling_interval = polling_interval # Security group agent support self.context = context.get_admin_context_without_session() self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = dvs_rpc.DVSSecurityGroupRpc( self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) self.setup_rpc() self.run_daemon_loop = True self.iter_num = 0 self.network_map = dvs_util.create_network_map_from_config( cfg.CONF.ML2_VMWARE, pg_cache=True) uplink_map = dvs_util.create_uplink_map_from_config( cfg.CONF.ML2_VMWARE, self.network_map) for phys, dvs in six.iteritems(self.network_map): if phys in uplink_map: dvs.load_uplinks(phys, uplink_map[phys]) self.updated_ports = set() self.deleted_ports = set() self.known_ports = set() self.added_ports = set() self.booked_ports = set() LOG.info(_LI("Agent out of sync with plugin!")) connected_ports = self._get_dvs_ports() self.added_ports = connected_ports if cfg.CONF.DVS.clean_on_restart: self._clean_up_vsphere_extra_resources(connected_ports) self.fullsync = False # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def __init__(self, physical_devices_mappings, exclude_devices, polling_interval, rp_bandwidths, rp_inventory_defaults, rp_hypervisors): self.polling_interval = polling_interval self.network_ports = collections.defaultdict(list) self.conf = cfg.CONF self.device_mappings = physical_devices_mappings self.exclude_devices = exclude_devices self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) # Stores port update notifications for processing in the main loop self.updated_devices = set() # Stores <mac, pci_slot> pairs for ports whose binding has been # activated. self.activated_bindings = set() self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc) self._setup_rpc() self.ext_manager = self._create_agent_extension_manager( self.connection) configurations = { 'device_mappings': physical_devices_mappings, n_constants.RP_BANDWIDTHS: rp_bandwidths, n_constants.RP_INVENTORY_DEFAULTS: rp_inventory_defaults, 'resource_provider_hypervisors': rp_hypervisors, 'extensions': self.ext_manager.names() } # TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': n_constants.AGENT_PROCESS_NIC_SWITCH, 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True } # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0
def __init__(self, host, conf=None): self.conf = conf or cfg.CONF self._load_drivers() self.context = context.get_admin_context_without_session() self.metering_loop = loopingcall.FixedIntervalLoopingCall( self._metering_loop) measure_interval = self.conf.measure_interval self.last_report = 0 self.metering_loop.start(interval=measure_interval) self.host = host self.label_tenant_id = {} self.routers = {} self.metering_infos = {} super(MeteringAgent, self).__init__(host=host)
def __init__(self, conf): """Initialize BIG-IQ Agent Manager.""" super(F5BIGIQAgentManager, self).__init__(conf) LOG.debug("Initializing BIG-IQ Agent Manager") self.conf = conf self.context = ncontext.get_admin_context_without_session() self.serializer = None filter_names = [name for name in self.conf.bigip_filters.split(",")] self.scheduler = scheduler.BIGIPScheduler(filter_names) # TODO: replace this map with a db self._lb_bigip_map = {} self.agent_host = self.conf.host + ":" + self.conf.agent_id global PERIODIC_TASK_INTERVAL PERIODIC_TASK_INTERVAL = self.conf.periodic_interval # Initialize agent configurations agent_configurations = ({'bigiq_host': self.conf.bigiq_host}) # Initialize agent-state to a default values self.admin_state_up = self.conf.start_agent_admin_state_up self.agent_state = { 'binary': constants.AGENT_BINARY_NAME, 'host': self.agent_host, 'topic': constants.TOPIC_LBAASV2_BIGIQ_AGENT, 'agent_type': constants.LBAASV2_BIGIQ_AGENT_TYPE, 'configurations': agent_configurations, 'start_flag': True } # Setup RPC for communications to and from controller self._setup_rpc() # Mark this agent admin_state_up per startup policy if (self.admin_state_up): self.plugin_rpc.set_agent_admin_state(self.admin_state_up) # Start state reporting of agent to Neutron report_interval = self.conf.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def setUp(self): super(LocalIPAgentExtensionTestCase, self).setUp() self.context = context.get_admin_context_without_session() self.local_ip_ext = local_ip_ext.LocalIPAgentExtension() self.plugin_rpc = mock.Mock() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( int_br=mock.Mock(), tun_br=mock.Mock(), phys_brs=None, plugin_rpc=self.plugin_rpc) self.local_ip_ext.consume_api(self.agent_api) with mock.patch.object(self.local_ip_ext, '_pull_all_local_ip_associations'): self.local_ip_ext.initialize(mock.Mock(), 'ovs') self.int_br = self.local_ip_ext.int_br
def __init__(self, host, conf=None): self.conf = conf or cfg.CONF self._load_drivers() self.context = context.get_admin_context_without_session() self.metering_loop = loopingcall.FixedIntervalLoopingCall( self._metering_loop ) measure_interval = self.conf.measure_interval self.last_report = 0 self.metering_loop.start(interval=measure_interval) self.host = host self.label_tenant_id = {} self.routers = {} self.metering_infos = {} super(MeteringAgent, self).__init__(host=host)
def _setup_rpc(self): self.topic = topics.AGENT self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = api_sg_rpc.SecurityGroupServerRpcApi( topics.PLUGIN) self.context = q_context.get_admin_context_without_session() self.endpoints = [self] consumers = [[topics.PORT, topics.UPDATE], [topics.SECURITY_GROUP, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def __init__(self, physical_devices_mappings, exclude_devices, polling_interval, rp_bandwidths, rp_inventory_defaults): self.polling_interval = polling_interval self.network_ports = collections.defaultdict(list) self.conf = cfg.CONF self.device_mappings = physical_devices_mappings self.exclude_devices = exclude_devices self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) # Stores port update notifications for processing in the main loop self.updated_devices = set() # Stores <mac, pci_slot> pairs for ports whose binding has been # activated. self.activated_bindings = set() self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc) self._setup_rpc() self.ext_manager = self._create_agent_extension_manager( self.connection) configurations = {'device_mappings': physical_devices_mappings, n_constants.RP_BANDWIDTHS: rp_bandwidths, n_constants.RP_INVENTORY_DEFAULTS: rp_inventory_defaults, 'extensions': self.ext_manager.names()} # TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': 'neutron-sriov-nic-agent', 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True} # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0
def plug_interface(self, network_id, network_segment, device_name, device_owner): LOG.info("KaloomKVSManager plug_interface called ") self.kvs_rpc = kvs_utils.kvsPluginApi(a_const.TOPIC_KNID) context = _context.get_admin_context_without_session() result = self.kvs_rpc.get_knid(context, network_id) LOG.info("KaloomKVSManager rpc get_knid called %s", result) if a_const.KVS_KNID not in result: LOG.error("KaloomKVSManager rpc get_knid failed result: %s", result) return False knid = result[a_const.KVS_KNID] if device_name.startswith(constants.TAP_DEVICE_PREFIX): kvs_device_name = device_name elif device_owner == "network:router_gateway": kvs_device_name = namespaces.EXTERNAL_DEV_PREFIX + device_name elif device_owner == "network:router_interface": kvs_device_name = namespaces.INTERNAL_DEV_PREFIX + device_name else: kvs_device_name = self.get_vhost_path(device_name) success, port_index = kvs_net.attach_interface( network_id, network_segment.network_type, network_segment.physical_network, knid, kvs_device_name, device_owner, network_segment.mtu, self.vhostuser_socket_dir, a_const.KVS_VHOSTUSER_PREFIX) #adding mac entry only for vhostuser interface #not required for vdev interface if not kvs_device_name.startswith( (constants.TAP_DEVICE_PREFIX, namespaces.INTERNAL_DEV_PREFIX, namespaces.EXTERNAL_DEV_PREFIX)) and success is True: result = self.kvs_rpc.get_mac(context, device_name) if a_const.KVS_MAC in result: mac = result[a_const.KVS_MAC] LOG.info("KaloomKVSManager rpc get_mac called %s", result) return kvs_net.add_mac_entry(knid, mac, port_index) else: LOG.error("KaloomKVSManager rpc get_mac failed result: %s", result) return False else: return success
def _setup_rpc(self): self.topic = topics.AGENT self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = api_sg_rpc.SecurityGroupServerRpcApi( topics.PLUGIN) self.context = q_context.get_admin_context_without_session() self.endpoints = [self] consumers = [[topics.PORT, topics.UPDATE], [topics.SECURITY_GROUP, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def _setup_server_rpc(self): self.agent_id = 'zvm_agent_%s' % self._host self.topic = topics.AGENT self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self.context = context.get_admin_context_without_session() self.endpoints = [self] consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers) report_interval = CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def __init__(self, physical_devices_mappings, exclude_devices, polling_interval): self.polling_interval = polling_interval self.network_ports = collections.defaultdict(list) self.conf = cfg.CONF self.device_mappings = physical_devices_mappings self.exclude_devices = exclude_devices self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) # Stores port update notifications for processing in the main loop self.updated_devices = set() self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc) self._setup_rpc() self.ext_manager = self._create_agent_extension_manager( self.connection) configurations = { 'device_mappings': physical_devices_mappings, 'extensions': self.ext_manager.names() } #TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': 'neutron-sriov-nic-agent', 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True } # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0
def _setup_server_rpc(self): self.agent_id = 'zvm_agent_%s' % self._host self.topic = topics.AGENT self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self.context = context.get_admin_context_without_session() self.endpoints = [self] consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers) report_interval = CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def _init_state_reporting(self): self.context = context.get_admin_context_without_session() self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-metadata-agent', 'host': cfg.CONF.host, 'topic': 'N/A', 'configurations': { 'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket, 'nova_metadata_host': cfg.CONF.nova_metadata_host, 'nova_metadata_port': cfg.CONF.nova_metadata_port, 'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats, }, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_METADATA} report_interval = cfg.CONF.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval)
def __init__(self, vpn_service, host): # TODO(pc_m): Once all driver implementations no longer need # vpn_service argument, replace with just config argument. self.host = host self.conn = n_rpc.create_connection() context = ctx.get_admin_context_without_session() node_topic = '%s.%s' % (topics.CISCO_IPSEC_AGENT_TOPIC, self.host) self.service_state = {} self.endpoints = [self] self.conn.create_consumer(node_topic, self.endpoints, fanout=False) self.conn.consume_in_threads() self.agent_rpc = (CiscoCsrIPsecVpnDriverApi( topics.CISCO_IPSEC_DRIVER_TOPIC)) self.periodic_report = loopingcall.FixedIntervalLoopingCall( self.report_status, context) self.periodic_report.start( interval=vpn_service.conf.cisco_csr_ipsec.status_check_interval) LOG.debug("Device driver initialized for %s", node_topic)
def __init__(self, vpn_service, host): # TODO(pc_m) Replace vpn_service with config arg, once all driver # implementations no longer need vpn_service. self.conf = vpn_service.conf self.host = host self.conn = n_rpc.Connection() self.context = context.get_admin_context_without_session() self.topic = topics.IPSEC_AGENT_TOPIC node_topic = '%s.%s' % (self.topic, self.host) self.processes = {} self.routers = {} self.process_status_cache = {} self.endpoints = [self] self.conn.create_consumer(node_topic, self.endpoints, fanout=False) self.conn.consume_in_threads() self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC) self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall( self.report_status, self.context) self.process_status_cache_check.start( interval=self.conf.ipsec.ipsec_status_check_interval)
def __init__(self, host, conf=None): if conf: self.conf = conf else: self.conf = cfg.CONF self.router_info = {} self._check_config_params() self.process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='router') self.driver = common_utils.load_interface_driver(self.conf) self._context = n_context.get_admin_context_without_session() self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) self.fullsync = True self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE # Get the list of service plugins from Neutron Server # This is the first place where we contact neutron-server on startup # so retry in case its not ready to respond. while True: try: self.neutron_service_plugins = ( self.plugin_rpc.get_service_plugin_list(self.context)) except oslo_messaging.MessagingTimeout as e: LOG.warning('l3-agent cannot contact neutron server ' 'to retrieve service plugins enabled. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.', {'msg': e}) continue break self.init_extension_manager(self.plugin_rpc) self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, self.metadata_driver) self._queue = queue.ResourceProcessingQueue() super(L3NATAgent, self).__init__(host=self.conf.host) self.target_ex_net_id = None self.use_ipv6 = ipv6_utils.is_enabled_and_bind_by_default() self.pd = pd.PrefixDelegation(self.context, self.process_monitor, self.driver, self.plugin_rpc.process_prefix_update, self.create_pd_router_update, self.conf) # Consume network updates to trigger router resync consumers = [[topics.NETWORK, topics.UPDATE]] agent_rpc.create_consumers([self], topics.AGENT, consumers) # We set HA network port status to DOWN to let l2 agent update it # to ACTIVE after wiring. This allows us to spawn keepalived only # when l2 agent finished wiring the port. try: self.plugin_rpc.update_all_ha_network_port_statuses(self.context) except Exception: LOG.exception('update_all_ha_network_port_statuses failed')
def initialize(self): LOG.debug("Experimental L2 population driver") self.rpc_ctx = n_context.get_admin_context_without_session()
def __init__(self, host, conf=None): if conf: self.conf = conf else: self.conf = cfg.CONF self.router_info = {} self._check_config_params() self.process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='router') self.driver = common_utils.load_interface_driver(self.conf) self._context = n_context.get_admin_context_without_session() self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) self.fullsync = True self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE # Get the list of service plugins from Neutron Server # This is the first place where we contact neutron-server on startup # so retry in case its not ready to respond. while True: try: self.neutron_service_plugins = ( self.plugin_rpc.get_service_plugin_list(self.context)) except oslo_messaging.RemoteError as e: LOG.warning(_LW('l3-agent cannot check service plugins ' 'enabled at the neutron server when ' 'startup due to RPC error. It happens ' 'when the server does not support this ' 'RPC API. If the error is ' 'UnsupportedVersion you can ignore this ' 'warning. Detail message: %s'), e) self.neutron_service_plugins = None except oslo_messaging.MessagingTimeout as e: LOG.warning(_LW('l3-agent cannot contact neutron server ' 'to retrieve service plugins enabled. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.'), {'msg': e}) continue break self.init_extension_manager(self.plugin_rpc) self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, self.metadata_driver) self._queue = queue.RouterProcessingQueue() super(L3NATAgent, self).__init__(host=self.conf.host) self.target_ex_net_id = None self.use_ipv6 = ipv6_utils.is_enabled_and_bind_by_default() self.pd = pd.PrefixDelegation(self.context, self.process_monitor, self.driver, self.plugin_rpc.process_prefix_update, self.create_pd_router_update, self.conf)
def __init__(self, conf): self.conf = conf self._cache = cache.get_cache(self.conf) self.plugin_rpc = MetadataPluginAPI(topics.PLUGIN) self.context = context.get_admin_context_without_session()
def __init__(self, host, conf=None): if conf: self.conf = conf else: self.conf = cfg.CONF self.router_info = {} self._check_config_params() self.process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='router') self.driver = common_utils.load_interface_driver(self.conf) self._context = n_context.get_admin_context_without_session() self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) self.fullsync = True self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE # Get the HA router count from Neutron Server # This is the first place where we contact neutron-server on startup # so retry in case its not ready to respond. while True: try: self.ha_router_count = int( self.plugin_rpc.get_host_ha_router_count(self.context)) except oslo_messaging.MessagingTimeout as e: LOG.warning('l3-agent cannot contact neutron server ' 'to retrieve HA router count. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.', {'msg': e}) continue break LOG.info("Agent HA routers count %s", self.ha_router_count) self.init_extension_manager(self.plugin_rpc) self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, self.metadata_driver) # L3 agent router processing green pool self._pool = eventlet.GreenPool(size=ROUTER_PROCESS_GREENLET_MIN) self._queue = queue.ResourceProcessingQueue() super(L3NATAgent, self).__init__(host=self.conf.host) self.target_ex_net_id = None self.use_ipv6 = ipv6_utils.is_enabled_and_bind_by_default() self.pd = pd.PrefixDelegation(self.context, self.process_monitor, self.driver, self.plugin_rpc.process_prefix_update, self.create_pd_router_update, self.conf) # Consume network updates to trigger router resync consumers = [[topics.NETWORK, topics.UPDATE]] agent_rpc.create_consumers([self], topics.AGENT, consumers) self._check_ha_router_process_status()
def __init__(self, trunk_manager): self.timeout = DEFAULT_WAIT_FOR_PORT_TIMEOUT self._context = n_context.get_admin_context_without_session() self.trunk_manager = trunk_manager self.trunk_rpc = agent.TrunkStub()