def __init__(self): ndb.initialize() self.ofc = ofc_manager.OFCManager() self.base_binding_dict = self._get_base_binding_dict() portbindings_base.register_port_dict_function() neutron_extensions.append_api_extensions_path(extensions.__path__) self.setup_rpc() self.l3_rpc_notifier = nec_router.L3AgentNotifyAPI() self.network_scheduler = importutils.import_object( config.CONF.network_scheduler_driver ) self.router_scheduler = importutils.import_object( config.CONF.router_scheduler_driver ) nec_router.load_driver(self, self.ofc) self.port_handlers = { 'create': { const.DEVICE_OWNER_ROUTER_GW: self.create_router_port, const.DEVICE_OWNER_ROUTER_INTF: self.create_router_port, 'default': self.activate_port_if_ready, }, 'delete': { const.DEVICE_OWNER_ROUTER_GW: self.delete_router_port, const.DEVICE_OWNER_ROUTER_INTF: self.delete_router_port, 'default': self.deactivate_port, } }
def __init__(self): super(NuagePlugin, self).__init__() neutron_extensions.append_api_extensions_path(extensions.__path__) config.nuage_register_cfg_opts() self.nuageclient_init() net_partition = cfg.CONF.RESTPROXY.default_net_partition_name self._create_default_net_partition(net_partition)
def __init__(self, server_timeout=None): super(NeutronRestProxyV2, self).__init__() LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'), version_string_with_vcs()) pl_config.register_config() # Include the BigSwitch Extensions path in the api_extensions neutron_extensions.append_api_extensions_path(extensions.__path__) self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route # init network ctrl connections self.servers = servermanager.ServerPool(server_timeout) # init dhcp support self.topic = topics.PLUGIN self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( self._dhcp_agent_notifier ) self.conn = rpc.create_connection(new=True) self.callbacks = RpcProxy() self.dispatcher = self.callbacks.create_rpc_dispatcher() self.conn.create_consumer(self.topic, self.dispatcher, fanout=False) # Consume from all consumers in a thread self.conn.consume_in_thread() if cfg.CONF.RESTPROXY.sync_data: self._send_all_data() LOG.debug(_("NeutronRestProxyV2: initialization done"))
def __init__(self, server_timeout=None): super(NeutronRestProxyV2, self).__init__() LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'), version_string_with_vcs()) pl_config.register_config() self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) # Include the BigSwitch Extensions path in the api_extensions neutron_extensions.append_api_extensions_path(extensions.__path__) self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route # init network ctrl connections self.servers = servermanager.ServerPool(server_timeout) self.servers.get_topo_function = self._get_all_data self.servers.get_topo_function_args = {'get_ports': True, 'get_floating_ips': True, 'get_routers': True} self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) # setup rpc for security and DHCP agents self._setup_rpc() if cfg.CONF.RESTPROXY.sync_data: self._send_all_data() LOG.debug(_("NeutronRestProxyV2: initialization done"))
def __init__(self): # Dynamically change the validators so that they are applicable to # the MidoNet implementation of L2GW. l2gw_validators.validate_gwdevice_list = (l2gw_midonet_validators. validate_gwdevice_list) neutron_extensions.append_api_extensions_path(l2gateway_ext.__path__) super(MidonetL2GatewayPlugin, self).__init__()
def __init__(self): super(MidonetL3ServicePlugin, self).__init__() # Instantiate MidoNet API client self.client = c_base.load_client(cfg.CONF.MIDONET) neutron_extensions.append_api_extensions_path(extensions.__path__)
def __init__(self): super(MidonetL3ServicePlugin, self).__init__() # Instantiate MidoNet API client self.client = c_base.load_client(cfg.CONF.MIDONET) # Avoid any side effect from DVR getting set to true cfg.CONF.set_override("router_distributed", False) neutron_extensions.append_api_extensions_path(extensions.__path__)
def __init__(self): extensions.append_api_extensions_path(extensions_pkg.__path__) super(ApicL3ServicePlugin, self).__init__() self.synchronizer = None # NB(tbachman): the mechanism driver depends on the existence # of the _apic_driver member. If this member is changed or # deleted, the code in the mechanism driver should be changed # as well. self._apic_driver = apic_driver.ApicL3Driver(self)
def __init__(self): super(NuageFWaaSPlugin, self).__init__() from neutron.api import extensions as neutron_extensions from neutron_fwaas import extensions neutron_extensions.append_api_extensions_path(extensions.__path__) self.agent_rpc = NuageNoOpAgent() enterprise_name = cfg.CONF.RESTPROXY.default_net_partition_name netpart_db = nuagedb.get_net_partition_by_name(db_api.get_session(), enterprise_name) self.enterprise_id = netpart_db.id
def setUp(self): extensions.append_api_extensions_path(dr_extensions.__path__) service_plugins = {ext_bgp.BGP_EXT_ALIAS: BGP_PLUGIN} super(TestNSXvBgpPlugin, self).setUp(service_plugins=service_plugins) self.bgp_plugin = bgp_plugin.NSXvBgpPlugin() self.nsxv_driver = self.bgp_plugin.drivers['nsx-v'] self.nsxv_driver._validate_gateway_network = mock.Mock() self.nsxv_driver._validate_bgp_configuration_on_peer_esg = ( mock.Mock()) self.plugin = directory.get_plugin() self.l3plugin = self.plugin self.plugin.init_is_complete = True self.context = context.get_admin_context() self.project_id = 'dummy_project'
def __init__(self): """Load the model class.""" self._model_name = config.CISCO.model_class self._model = importutils.import_object(self._model_name) native_bulk_attr_name = ("_%s__native_bulk_support" % self._model.__class__.__name__) self.__native_bulk_support = getattr(self._model, native_bulk_attr_name, False) neutron_extensions.append_api_extensions_path(extensions.__path__) # Extend the fault map self._extend_fault_map() LOG.debug("Plugin initialization complete")
def __init__(self): super(NsxDvsV2, self).__init__() config.validate_config_options() LOG.debug('Driver support: DVS: %s' % dvs_utils.dvs_is_enabled()) neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH]) self._dvs = dvs.DvsManager() # Common driver code self.base_binding_dict = { pbin.VIF_TYPE: pbin.VIF_TYPE_DVS, pbin.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details pbin.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self.setup_dhcpmeta_access()
def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. cfg.CONF.set_override('mechanism_drivers', ['logger_plus', 'test'], group='ml2') cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:1099'], group='ml2_type_vlan') extensions.append_api_extensions_path( gbpservice.neutron.extensions.__path__) super(Ml2PlusPluginV2TestCase, self).setUp(PLUGIN_NAME) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.port_create_status = 'DOWN' self.plugin = directory.get_plugin()
def setUp(self, core_plugin=None, sc_plugin=None, service_plugins=None, ext_mgr=None): extensions.append_api_extensions_path(gbp.neutron.extensions.__path__) if not sc_plugin: sc_plugin = DB_GP_PLUGIN_KLASS self.plugin = importutils.import_object(sc_plugin) if not service_plugins: service_plugins = {'sc_plugin_name': sc_plugin} super(ServiceChainDbTestCase, self).setUp( plugin=core_plugin, ext_mgr=ext_mgr, service_plugins=service_plugins ) if not ext_mgr: ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def __init__(self): """Override initialization to avoid any RPC setup as MidoNet does not rely on any agent to implement FWaaS. Instead, set the rpc handling to the _MidonetFirewallDriver class so that it handles the FWaaS update events. """ # Register the FWaaS extensions path neutron_extensions.append_api_extensions_path(extensions.__path__) # Although callbacks are unnecessary in midonet, use FirewallCallbacks # because it contains useful methods for DB updates. self.callbacks = fw_plugin.FirewallCallbacks(self) self.client = c_base.load_client(cfg.CONF.MIDONET) self.agent_rpc = _MidonetFirewallDriver(self.client, self.callbacks) self.endpoints = [self.callbacks] # So that tests don't complain firewall_db.subscribe()
def __init__(self): super(MidonetMixinBase, self).__init__() # Instantiate MidoNet API client self.client = c_base.load_client(cfg.CONF.MIDONET) neutron_extensions.append_api_extensions_path(extensions.__path__) self.setup_rpc() self.base_binding_dict = { portbindings.VIF_TYPE: const.VIF_TYPE_MIDONET, portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL, portbindings.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver )
def __init__(self): self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() dvs_utils.dvs_register_exceptions() super(NsxDvsV2, self).__init__() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.dvs_extension_drivers else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = nsx_managers.ExtensionManager( extension_drivers=extension_drivers) LOG.debug('Driver support: DVS: %s' % dvs_utils.dvs_is_enabled()) self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) neutron_extensions.append_api_extensions_path( [vmware_nsx.NSX_EXT_PATH]) self.cfg_group = 'dvs' # group name for dvs section in nsx.ini self._dvs = dvs.SingleDvsManager() self.setup_dhcpmeta_access()
def __init__(self): super(NECPluginV2Impl, self).__init__() self.ofc = ofc_manager.OFCManager(self.safe_reference) self.l2mgr = l2manager.L2Manager(self.safe_reference) self.base_binding_dict = self._get_base_binding_dict() portbindings_base.register_port_dict_function() neutron_extensions.append_api_extensions_path(extensions.__path__) self.setup_rpc() self.l3_rpc_notifier = router_plugin.L3AgentNotifyAPI() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver ) router_plugin.load_driver(self.safe_reference, self.ofc) self.start_periodic_dhcp_agent_status_check()
def __init__(self): """Load the model class.""" self._model = importutils.import_object(config.CISCO.model_class) if hasattr(self._model, "MANAGE_STATE") and self._model.MANAGE_STATE: self._master = False LOG.debug(_("Model %s manages state"), config.CISCO.model_class) native_bulk_attr_name = ("_%s__native_bulk_support" % self._model.__class__.__name__) self.__native_bulk_support = getattr(self._model, native_bulk_attr_name, False) if hasattr(self._model, "supported_extension_aliases"): self.supported_extension_aliases.extend( self._model.supported_extension_aliases) neutron_extensions.append_api_extensions_path(extensions.__path__) # Extend the fault map self._extend_fault_map() LOG.debug(_("Plugin initialization complete"))
def __init__(self): super(MidonetMixin, self).__init__() # Instantiate MidoNet API client conf = cfg.CONF.MIDONET neutron_extensions.append_api_extensions_path(extensions.__path__) self.api_cli = client.MidonetClient(conf.midonet_uri, conf.username, conf.password, project_id=conf.project_id) self.setup_rpc() self.base_binding_dict = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET, portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL, portbindings.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver )
from neutron_lib.api import extensions from neutron_lib.api import validators as lib_validators from neutron_lib.db import constants as db_const from neutron_lib import exceptions as neutron_exc from neutron_lib.services import base as service_base from oslo_config import cfg from neutron.api import extensions as neutron_ext from neutron.api.v2 import resource_helper from networking_sfc._i18n import _ from networking_sfc import extensions as sfc_extensions from networking_sfc.extensions import flowclassifier as ext_fc cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config') neutron_ext.append_api_extensions_path(sfc_extensions.__path__) SFC_EXT = "sfc" SFC_PREFIX = "/sfc" # Default Chain Parameters DEFAULT_CHAIN_CORRELATION = 'mpls' DEFAULT_CHAIN_SYMMETRY = False DEFAULT_CHAIN_PARAMETERS = { 'correlation': DEFAULT_CHAIN_CORRELATION, 'symmetric': DEFAULT_CHAIN_SYMMETRY } # Default SF Parameters DEFAULT_SF_PARAMETERS = {'correlation': None, 'weight': 1}
def __init__(self): super(NetworkProfilePlugin, self).__init__() api_extensions.append_api_extensions_path(extensions.__path__) # Initialize N1KV client self.n1kvclient = n1kv_client.Client()
def __init__(self): LOG.info(_LI("APIC AIM L3 Plugin __init__")) extensions.append_api_extensions_path(extensions_pkg.__path__) self._mechanism_driver = None super(ApicL3Plugin, self).__init__()
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions as neutron_extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib.plugins import directory import abc from networking_bigswitch.plugins.bigswitch import constants from networking_bigswitch.plugins.bigswitch import extensions as bsn_extensions # Ensure the extension is loaded at startup neutron_extensions.append_api_extensions_path(bsn_extensions.__path__) RESOURCE_ATTRIBUTE_MAP = { 'networktemplates': { 'id': { 'allow_post': False, 'allow_put': False, 'validate': { 'type:string': None }, 'is_visible': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'validate': {
def __init__(self): super(PolicyProfilePlugin, self).__init__() api_extensions.append_api_extensions_path(extensions.__path__) # Initialize N1KV client self.n1kvclient = n1kv_client.Client() eventlet.spawn(self._poll_policy_profiles)
# License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import extensions as api_extensions from neutron_lib.api import validators from neutron.api import extensions from neutron.api.v2 import resource_helper from networking_l2gw import extensions as l2gw_extensions from networking_l2gw.services.l2gateway.common import constants from networking_l2gw.services.l2gateway.common import l2gw_validators extensions.append_api_extensions_path(l2gw_extensions.__path__) RESOURCE_ATTRIBUTE_MAP = { constants.L2_GATEWAYS: { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'devices': {'allow_post': True, 'allow_put': True, 'validate': {'type:l2gwdevice_list': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}
def initialize(self): neutron_extensions.append_api_extensions_path(extensions.__path__) LOG.info(_("HPEIronicCredentialExtDriver initialization complete"))
def setUp(self): """ Setup method for n1kv plugin tests. First step is to define an acceptable response from the VSM to our requests. This needs to be done BEFORE the setUp() function of the super-class is called. This default here works for many cases. If you need something extra, please define your own setUp() function in your test class, and set your DEFAULT_RESPONSE value also BEFORE calling the setUp() of the super-function (this one here). If you have set a value already, it will not be overwritten by this code. """ if not self.DEFAULT_RESP_BODY: self.DEFAULT_RESP_BODY = ( """<?xml version="1.0" encoding="utf-8"?> <set name="events_set"> <instance name="1" url="/api/hyper-v/events/1"> <properties> <cmd>configure terminal ; port-profile type vethernet grizzlyPP (SUCCESS) </cmd> <id>42227269-e348-72ed-bdb7-7ce91cd1423c</id> <time>1369223611</time> <name>grizzlyPP</name> </properties> </instance> <instance name="2" url="/api/hyper-v/events/2"> <properties> <cmd>configure terminal ; port-profile type vethernet havanaPP (SUCCESS) </cmd> <id>3fc83608-ae36-70e7-9d22-dec745623d06</id> <time>1369223661</time> <name>havanaPP</name> </properties> </instance> </set> """) # Creating a mock HTTP connection object for httplib. The N1KV client # interacts with the VSM via HTTP. Since we don't have a VSM running # in the unit tests, we need to 'fake' it by patching the HTTP library # itself. We install a patch for a fake HTTP connection class. # Using __name__ to avoid having to enter the full module path. http_patcher = patch(n1kv_client.httplib2.__name__ + ".Http") FakeHttpConnection = http_patcher.start() # Now define the return values for a few functions that may be called # on any instance of the fake HTTP connection class. instance = FakeHttpConnection.return_value instance.getresponse.return_value = (FakeResponse( self.DEFAULT_RESP_CODE, self.DEFAULT_RESP_BODY, 'application/xml')) instance.request.return_value = (instance.getresponse.return_value, self.DEFAULT_RESP_BODY) # Patch some internal functions in a few other parts of the system. # These help us move along, without having to mock up even more systems # in the background. # Return a dummy VSM IP address get_vsm_hosts_patcher = patch(n1kv_client.__name__ + ".Client._get_vsm_hosts") fake_get_vsm_hosts = get_vsm_hosts_patcher.start() fake_get_vsm_hosts.return_value = ["127.0.0.1"] # Return dummy user profiles get_cred_name_patcher = patch(cdb.__name__ + ".get_credential_name") fake_get_cred_name = get_cred_name_patcher.start() fake_get_cred_name.return_value = {"user_name": "admin", "password": "******"} n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm neutron_extensions.append_api_extensions_path(extensions.__path__) ext_mgr = NetworkProfileTestExtensionManager() # Save the original RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): self.saved_attr_map[resource] = attrs.copy() # Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs. attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update( n1kv.EXTENDED_ATTRIBUTES_2_0["networks"]) attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update( n1kv.EXTENDED_ATTRIBUTES_2_0["ports"]) self.addCleanup(self.restore_resource_attribute_map) self.addCleanup(db.clear_db) super(N1kvPluginTestCase, self).setUp(self._plugin_name, ext_mgr=ext_mgr) # Create some of the database entries that we require. self._make_test_profile() self._make_test_policy_profile()
def setUp(self): """ Setup method for n1kv plugin tests. First step is to define an acceptable response from the VSM to our requests. This needs to be done BEFORE the setUp() function of the super-class is called. This default here works for many cases. If you need something extra, please define your own setUp() function in your test class, and set your DEFAULT_RESPONSE value also BEFORE calling the setUp() of the super-function (this one here). If you have set a value already, it will not be overwritten by this code. """ if not self.DEFAULT_RESP_BODY: self.DEFAULT_RESP_BODY = ( """<?xml version="1.0" encoding="utf-8"?> <set name="events_set"> <instance name="1" url="/api/hyper-v/events/1"> <properties> <cmd>configure terminal ; port-profile type vethernet grizzlyPP (SUCCESS) </cmd> <id>42227269-e348-72ed-bdb7-7ce91cd1423c</id> <time>1369223611</time> <name>grizzlyPP</name> </properties> </instance> <instance name="2" url="/api/hyper-v/events/2"> <properties> <cmd>configure terminal ; port-profile type vethernet havanaPP (SUCCESS) </cmd> <id>3fc83608-ae36-70e7-9d22-dec745623d06</id> <time>1369223661</time> <name>havanaPP</name> </properties> </instance> </set> """) # Creating a mock HTTP connection object for httplib. The N1KV client # interacts with the VSM via HTTP. Since we don't have a VSM running # in the unit tests, we need to 'fake' it by patching the HTTP library # itself. We install a patch for a fake HTTP connection class. # Using __name__ to avoid having to enter the full module path. http_patcher = patch(n1kv_client.httplib2.__name__ + ".Http") FakeHttpConnection = http_patcher.start() self.addCleanup(http_patcher.stop) # Now define the return values for a few functions that may be called # on any instance of the fake HTTP connection class. instance = FakeHttpConnection.return_value instance.getresponse.return_value = (FakeResponse( self.DEFAULT_RESP_CODE, self.DEFAULT_RESP_BODY, 'application/xml')) instance.request.return_value = (instance.getresponse.return_value, self.DEFAULT_RESP_BODY) # Patch some internal functions in a few other parts of the system. # These help us move along, without having to mock up even more systems # in the background. # Return a dummy VSM IP address get_vsm_hosts_patcher = patch(n1kv_client.__name__ + ".Client._get_vsm_hosts") fake_get_vsm_hosts = get_vsm_hosts_patcher.start() self.addCleanup(get_vsm_hosts_patcher.stop) fake_get_vsm_hosts.return_value = ["127.0.0.1"] # Return dummy user profiles get_cred_name_patcher = patch(cdb.__name__ + ".get_credential_name") fake_get_cred_name = get_cred_name_patcher.start() self.addCleanup(get_cred_name_patcher.stop) fake_get_cred_name.return_value = {"user_name": "admin", "password": "******"} n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm neutron_extensions.append_api_extensions_path(extensions.__path__) self.addCleanup(cfg.CONF.reset) ext_mgr = NetworkProfileTestExtensionManager() # Save the original RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): self.saved_attr_map[resource] = attrs.copy() # Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs. attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update( n1kv_profile.EXTENDED_ATTRIBUTES_2_0["networks"]) attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update( n1kv_profile.EXTENDED_ATTRIBUTES_2_0["ports"]) self.addCleanup(self.restore_resource_attribute_map) self.addCleanup(db.clear_db) super(N1kvPluginTestCase, self).setUp(self._plugin_name, ext_mgr=ext_mgr) # Create some of the database entries that we require. self._make_test_profile() self._make_test_policy_profile()
from neutron.common import exceptions as nexc from neutron.plugins.common import constants from neutron.quota import resource_registry from neutron.services import service_base from oslo_config import cfg from oslo_log import log as logging import six import gbpservice.neutron.extensions import gbpservice.neutron.extensions.group_policy # noqa from gbpservice.neutron.services.servicechain.common import constants as scc # The code below is a monkey patch of key Neutron's modules. This is needed for # the GBP service to be loaded correctly. GBP extensions' path is added # to Neutron's so that it's found at extension scanning time. extensions.append_api_extensions_path(gbpservice.neutron.extensions.__path__) LOG = logging.getLogger(__name__) # Service Chain Exceptions class ServiceProfileNotFound(nexc.NotFound): message = _("ServiceProfile %(profile_id)s could not be found") class ServiceProfileInUse(nexc.NotFound): message = _("Unable to complete operation, ServiceProfile " "%(profile_id)s is in use") class ServiceChainNodeNotFound(nexc.NotFound): message = _("ServiceChainNode %(sc_node_id)s could not be found")
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import resource_helper import networking_l2gw.extensions from networking_l2gw.services.l2gateway.common import constants from networking_l2gw.services.l2gateway.common import l2gw_validators extensions.append_api_extensions_path(networking_l2gw.extensions.__path__) RESOURCE_ATTRIBUTE_MAP = { constants.L2_GATEWAYS: { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'validate': { 'type:string': None }, 'is_visible': True,
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron import manager import abc from bsnstacklib.plugins.bigswitch import extensions as bsn_extensions # Ensure the extension is loaded at startup extensions.append_api_extensions_path(bsn_extensions.__path__) RESOURCE_ATTRIBUTE_MAP = { 'networktemplates': { 'id': { 'allow_post': False, 'allow_put': False, 'validate': { 'type:string': None }, 'is_visible': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'validate': {
def setUp(self): """ Setup method for n1kv plugin tests. First step is to define an acceptable response from the VSM to our requests. This needs to be done BEFORE the setUp() function of the super-class is called. This default here works for many cases. If you need something extra, please define your own setUp() function in your test class, and set your DEFAULT_RESPONSE value also BEFORE calling the setUp() of the super-function (this one here). If you have set a value already, it will not be overwritten by this code. """ if not self.DEFAULT_RESP_BODY: self.DEFAULT_RESP_BODY = { "icehouse-pp": { "properties": { "name": "icehouse-pp", "id": "some-uuid-1" } }, "havana_pp": { "properties": { "name": "havana_pp", "id": "some-uuid-2" } }, "dhcp_pp": { "properties": { "name": "dhcp_pp", "id": "some-uuid-3" } }, } # Creating a mock HTTP connection object for requests lib. The N1KV # client interacts with the VSM via HTTP. Since we don't have a VSM # running in the unit tests, we need to 'fake' it by patching the HTTP # library itself. We install a patch for a fake HTTP connection class. # Using __name__ to avoid having to enter the full module path. http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request") FakeHttpConnection = http_patcher.start() # Now define the return values for a few functions that may be called # on any instance of the fake HTTP connection class. self.resp_headers = {"content-type": "application/json"} FakeHttpConnection.return_value = (FakeResponse( self.DEFAULT_RESP_CODE, self.DEFAULT_RESP_BODY, self.resp_headers)) # Patch some internal functions in a few other parts of the system. # These help us move along, without having to mock up even more systems # in the background. # Return a dummy VSM IP address mock.patch(n1kv_client.__name__ + ".Client._get_vsm_hosts", new=lambda self: "127.0.0.1").start() # Return dummy user profiles mock.patch(cdb.__name__ + ".get_credential_name", new=lambda self: { "user_name": "admin", "password": "******" }).start() n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm neutron_extensions.append_api_extensions_path(extensions.__path__) ext_mgr = NetworkProfileTestExtensionManager() # Save the original RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): self.saved_attr_map[resource] = attrs.copy() # Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs. attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update( n1kv.EXTENDED_ATTRIBUTES_2_0["networks"]) attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update( n1kv.EXTENDED_ATTRIBUTES_2_0["ports"]) self.addCleanup(self.restore_resource_attribute_map) super(N1kvPluginTestCase, self).setUp(self._plugin_name, ext_mgr=ext_mgr) # Create some of the database entries that we require. self._make_test_profile() self._make_test_policy_profile()
def initialize(self): api_extensions.append_api_extensions_path(extensions.__path__)
def __init__(self, **args): super(TestNetworkGatewayPlugin, self).__init__(**args) extensions.append_api_extensions_path([NSXEXT_PATH])
def __init__(self, **args): super(TestNetworkGatewayPlugin, self).__init__(**args) extensions.append_api_extensions_path([NVP_EXT_PATH])
from neutron.services.service_base import ServicePluginBase from oslo_log import log from networking_bgpvpn.neutron import extensions as bgpvpn_ext from networking_bgpvpn.neutron.services.common import constants LOG = log.getLogger(__name__) # Regular expression to validate Route Target list format # ["<asn1>:<nn1>","<asn2>:<nn2>", ...] with asn and nn in range 0-65535 RT_REGEX = (r'^((?:0|[1-9]\d{0,3}|[1-5]\d{4}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]' r'\d|6553[0-5]):(?:0|[1-9]\d{0,3}|[1-5]\d{4}|6[0-4]\d{3}|65[0-4]\d' r'{2}|655[0-2]\d|6553[0-5]))$') extensions.append_api_extensions_path(bgpvpn_ext.__path__) n_const.EXT_TO_SERVICE_MAPPING['bgpvpn'] = constants.BGPVPN class BGPVPNNotFound(n_exc.NotFound): message = _("BGPVPN %(id)s could not be found") class BGPVPNNetAssocNotFound(n_exc.NotFound): message = _("BGPVPN network association %(id)s could not be found " "for BGPVPN %(bgpvpn_id)s") class BGPVPNRouterAssocNotFound(n_exc.NotFound): message = _("BGPVPN router association %(id)s could not be found " "for BGPVPN %(bgpvpn_id)s")
def initialize(self): # self.network_extension = 'Test_Network_Extension' # self.subnet_extension = 'Test_Subnet_Extension' # self.port_extension = 'Test_Port_Extension' extensions.append_api_extensions_path(test_ext.__path__)
def initialize(self): extensions_api.append_api_extensions_path(rk_extensions.__path__)
def initialize(self): extensions.append_api_extensions_path(test_extensions.__path__)
def initialize(self): LOG.info(_LI("APIC AIM ED initializing")) extensions.append_api_extensions_path(extensions_pkg.__path__)
def __init__(self): LOG.info("APIC AIM L3 Plugin __init__") extensions.append_api_extensions_path(extensions_pkg.__path__) self._mechanism_driver = None super(ApicL3Plugin, self).__init__()
def __init__(self): # Include the Big Switch Extensions path in the api_extensions neutron_extensions.append_api_extensions_path(extensions.__path__) super(L3RestProxy, self).__init__() self.servers = servermanager.ServerPool.get_instance()
def setUp(self): """ Setup method for n1kv plugin tests. First step is to define an acceptable response from the VSM to our requests. This needs to be done BEFORE the setUp() function of the super-class is called. This default here works for many cases. If you need something extra, please define your own setUp() function in your test class, and set your DEFAULT_RESPONSE value also BEFORE calling the setUp() of the super-function (this one here). If you have set a value already, it will not be overwritten by this code. """ if not self.DEFAULT_RESP_BODY: self.DEFAULT_RESP_BODY = { "icehouse-pp": {"properties": {"name": "icehouse-pp", "id": "some-uuid-1"}}, "havana_pp": {"properties": {"name": "havana_pp", "id": "some-uuid-2"}}, "dhcp_pp": {"properties": {"name": "dhcp_pp", "id": "some-uuid-3"}}, } # Creating a mock HTTP connection object for requests lib. The N1KV # client interacts with the VSM via HTTP. Since we don't have a VSM # running in the unit tests, we need to 'fake' it by patching the HTTP # library itself. We install a patch for a fake HTTP connection class. # Using __name__ to avoid having to enter the full module path. http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request") FakeHttpConnection = http_patcher.start() # Now define the return values for a few functions that may be called # on any instance of the fake HTTP connection class. self.resp_headers = {"content-type": "application/json"} FakeHttpConnection.return_value = (FakeResponse( self.DEFAULT_RESP_CODE, self.DEFAULT_RESP_BODY, self.resp_headers)) # Patch some internal functions in a few other parts of the system. # These help us move along, without having to mock up even more systems # in the background. # Return a dummy VSM IP address mock.patch(n1kv_client.__name__ + ".Client._get_vsm_hosts", new=lambda self: "127.0.0.1").start() # Return dummy user profiles mock.patch(cdb.__name__ + ".get_credential_name", new=lambda self: {"user_name": "admin", "password": "******"}).start() n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm neutron_extensions.append_api_extensions_path(extensions.__path__) ext_mgr = NetworkProfileTestExtensionManager() # Save the original RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): self.saved_attr_map[resource] = attrs.copy() # Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs. attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update( n1kv.EXTENDED_ATTRIBUTES_2_0["networks"]) attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update( n1kv.EXTENDED_ATTRIBUTES_2_0["ports"]) self.addCleanup(self.restore_resource_attribute_map) self.addCleanup(db.clear_db) super(N1kvPluginTestCase, self).setUp(self._plugin_name, ext_mgr=ext_mgr) # Create some of the database entries that we require. self._make_test_profile() self._make_test_policy_profile()
import six from oslo_config import cfg from neutron_lib.api import extensions as api_ext from neutron_lib.services import base as service_base from neutron.api import extensions as ext from neutron.api.v2 import resource_helper from neutron_classifier.common import resources as classifier_resources from neutron_classifier import extensions cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config') ext.append_api_extensions_path(extensions.__path__) EXT_NAME = "neutron_classifier" def validate_string(String): if String is None: String = '' return String RESOURCE_ATTRIBUTE_MAP = { 'classification_type': classifier_resources.CLASSIFICATION_TYPE_RESOURCE_MAP, 'classification_groups': classifier_resources.CLASSIFICATION_GROUP_RESOURCE_MAP,
from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexc from neutron.openstack.common import log as logging from neutron.plugins.common import constants from neutron.services import service_base import gbp.neutron.extensions from gbp.neutron.services.grouppolicy.common import constants as gp_constants # The code below is a monkey patch of key Neutron's modules. This is needed for # the GBP service to be loaded correctly. GBP extensions' path is added # to Neutron's so that it's found at extension scanning time. extensions.append_api_extensions_path(gbp.neutron.extensions.__path__) constants.GROUP_POLICY = "GROUP_POLICY" constants.COMMON_PREFIXES["GROUP_POLICY"] = "/grouppolicy" constants.EXT_TO_SERVICE_MAPPING['gp'] = constants.GROUP_POLICY constants.ALLOWED_SERVICES.append(constants.GROUP_POLICY) LOG = logging.getLogger(__name__) # Group Policy Exceptions class PolicyTargetNotFound(nexc.NotFound): message = _("Policy Target %(policy_target_id)s could not be found") class PolicyTargetGroupNotFound(nexc.NotFound):
class FortinetFirewallPlugin( firewall_db.Firewall_db_mixin, firewall_router_insertion_db.FirewallRouterInsertionDbMixin): """Implementation of the Neutron Firewall Service Plugin. This class manages the workflow of FWaaS request/response. Most DB related works are implemented in class firewall_db.Firewall_db_mixin. """ neutron_extensions.append_api_extensions_path(extensions.__path__) supported_extension_aliases = ["fwaas", "fwaasrouterinsertion"] path_prefix = fw_ext.API_PREFIX def __init__(self): """Do the initialization for the firewall service plugin here.""" self._fortigate = config.fgt_info self._driver = config.get_apiclient() self.task_manager = tasks.TaskManager() self.task_manager.start() firewall_db.subscribe() def _rpc_update_firewall(self, context, firewall_id): status_update = {"firewall": {"status": n_consts.PENDING_UPDATE}} super(FortinetFirewallPlugin, self).update_firewall( context, firewall_id, status_update) fw_with_rules = self._make_firewall_dict_with_rules(context, firewall_id) # this is triggered on an update to fw rule or policy, no # change in associated routers. fw_with_rules['add-router-ids'] = self.get_firewall_routers( context, firewall_id) fw_with_rules['del-router-ids'] = [] self._apply_firewall(context, **fw_with_rules) def _rpc_update_firewall_policy(self, context, firewall_policy_id): firewall_policy = self.get_firewall_policy(context, firewall_policy_id) if firewall_policy: for firewall_id in firewall_policy['firewall_list']: self._rpc_update_firewall(context, firewall_id) def _ensure_update_firewall(self, context, firewall_id): fwall = self.get_firewall(context, firewall_id) if fwall['status'] in [n_consts.PENDING_CREATE, n_consts.PENDING_UPDATE, n_consts.PENDING_DELETE]: raise fw_exc.FirewallInPendingState(firewall_id=firewall_id, pending_state=fwall['status']) def _ensure_update_firewall_policy(self, context, firewall_policy_id): firewall_policy = self.get_firewall_policy(context, firewall_policy_id) if firewall_policy and 'firewall_list' in firewall_policy: for firewall_id in firewall_policy['firewall_list']: self._ensure_update_firewall(context, firewall_id) def _ensure_update_firewall_rule(self, context, firewall_rule_id): fw_rule = self.get_firewall_rule(context, firewall_rule_id) if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']: self._ensure_update_firewall_policy(context, fw_rule['firewall_policy_id']) def _get_routers_for_create_firewall(self, tenant_id, context, firewall): # pop router_id as this goes in the router association db # and not firewall db LOG.debug("# _get_routers_for_create_firewall called Fortinet_plugin") router_ids = firewall['firewall'].pop('router_ids', None) if router_ids == n_consts.ATTR_NOT_SPECIFIED: # old semantics router-ids keyword not specified pick up # all routers on tenant. l3_plugin = directory.get_plugin(n_consts.L3) ctx = neutron_context.get_admin_context() routers = l3_plugin.get_routers(ctx) router_ids = [ router['id'] for router in routers if router['tenant_id'] == tenant_id] # validation can still fail this if there is another fw # which is associated with one of these routers. self.validate_firewall_routers_not_in_use(context, router_ids) return router_ids else: if not router_ids: # This indicates that user specifies no routers. return [] else: # some router(s) provided. self.validate_firewall_routers_not_in_use(context, router_ids) return router_ids def create_firewall(self, context, firewall): LOG.debug("create_firewall() called Fortinet_plugin") tenant_id = firewall['firewall']['tenant_id'] fw_new_rtrs = self._get_routers_for_create_firewall( tenant_id, context, firewall) if not fw_new_rtrs: # no messaging to agent needed, and fw needs to go # to INACTIVE(no associated rtrs) state. status = n_consts.INACTIVE fw = super(FortinetFirewallPlugin, self).create_firewall( context, firewall, status) fw['router_ids'] = [] return fw else: fw = super(FortinetFirewallPlugin, self).create_firewall( context, firewall) fw['router_ids'] = fw_new_rtrs fw_with_rules = ( self._make_firewall_dict_with_rules(context, fw['id'])) fw_with_rtrs = {'fw_id': fw['id'], 'router_ids': fw_new_rtrs} self.set_routers_for_firewall(context, fw_with_rtrs) fw_with_rules['add-router-ids'] = fw_new_rtrs fw_with_rules['del-router-ids'] = [] self._apply_firewall(context, **fw_with_rules) return fw def update_firewall(self, context, id, firewall): LOG.debug("Fortinet_plugin update_firewall() called, " "id is %(id)s, firewall is %(fw)s", {'id': id, 'fw': firewall}) self._ensure_update_firewall(context, id) # pop router_id as this goes in the router association db # and not firewall db router_ids = firewall['firewall'].pop('router_ids', None) fw_current_rtrs = self.get_firewall_routers(context, id) if router_ids is not None: if router_ids == []: # This indicates that user is indicating no routers. fw_new_rtrs = [] else: self.validate_firewall_routers_not_in_use( context, router_ids, id) fw_new_rtrs = router_ids self.update_firewall_routers(context, {'fw_id': id, 'router_ids': fw_new_rtrs}) else: # router-ids keyword not specified for update pick up # existing routers. fw_new_rtrs = self.get_firewall_routers(context, id) if not fw_new_rtrs and not fw_current_rtrs: # no messaging to agent needed, and we need to continue # in INACTIVE state firewall['firewall']['status'] = n_consts.INACTIVE fw = super(FortinetFirewallPlugin, self).update_firewall( context, id, firewall) fw['router_ids'] = [] return fw else: firewall['firewall']['status'] = n_consts.PENDING_UPDATE fw = super(FortinetFirewallPlugin, self).update_firewall( context, id, firewall) fw['router_ids'] = fw_new_rtrs fw_with_rules = ( self._make_firewall_dict_with_rules(context, fw['id'])) # determine rtrs to add fw to and del from fw_with_rules['add-router-ids'] = fw_new_rtrs fw_with_rules['del-router-ids'] = list( set(fw_current_rtrs).difference(set(fw_new_rtrs))) # last-router drives agent to ack with status to set state to INACTIVE fw_with_rules['last-router'] = not fw_new_rtrs LOG.debug("## update_firewall %s: Add Routers: %s, Del Routers: %s", fw['id'], fw_with_rules['add-router-ids'], fw_with_rules['del-router-ids']) self._apply_firewall(context, **fw_with_rules) #self.agent_rpc.update_firewall(context, fw_with_rules) return fw def update_firewall_for_delete_router(self, context, router_id): LOG.debug("fwaas delete_router() called, router_id: %(rtid)s", {'rtid': router_id}) cls = firewall_router_insertion_db.FirewallRouterAssociation db_fw_rt = fortinet_db.query_record(context, cls, router_id=router_id) if not db_fw_rt: return None firewall = {u'firewall': {'router_ids': []}} return self.update_firewall(context, db_fw_rt.fw_id, firewall) def delete_db_firewall_object(self, context, id): super(FortinetFirewallPlugin, self).delete_firewall(context, id) def delete_firewall(self, context, id): LOG.debug("Fortinet_plugin delete_firewall() called, fw_id %(id)s", {'id': id}) fw_with_rules = ( self._make_firewall_dict_with_rules(context, id)) status = {"firewall": {"status": n_consts.PENDING_DELETE}} super(FortinetFirewallPlugin, self).update_firewall( context, id, status) # Reflect state change in fw_with_rules fw_with_rules['del-router-ids'] = self.get_firewall_routers( context, id) self._apply_firewall(context, **fw_with_rules) self.delete_db_firewall_object(context, id) def update_firewall_policy(self, context, id, firewall_policy): LOG.debug("update_firewall_policy called, " "id =%(id)s, firewall_policy=%(fp)s", {'id': id, 'fp': firewall_policy}) self._ensure_update_firewall_policy(context, id) firewall_policy_old = self.get_firewall_policy(context, id) firewall_rule_ids = firewall_policy_old.get('firewall_rules', []) tenant_id = firewall_policy_old.get('tenant_id', None) fwp = super(FortinetFirewallPlugin, self).update_firewall_policy(context, id, firewall_policy) for fwr_id in firewall_rule_ids: fw_rule = self.get_firewall_rule(context, fwr_id) self._delete_firewall_rule(context, tenant_id, **fw_rule) self._rpc_update_firewall_policy(context, id) return fwp def create_firewall_rule(self, context, firewall_rule): """ :param context: :param firewall_rule: firewall_rule={'firewall_rule': {... }} :return: """ LOG.debug("create_firewall_rule() firewall_rule=%(fwr)s", {'fwr': firewall_rule}) return super(FortinetFirewallPlugin, self).create_firewall_rule(context, firewall_rule) def delete_firewall_rule(self, context, id): super(FortinetFirewallPlugin, self).delete_firewall_rule(context, id) def update_firewall_rule(self, context, id, firewall_rule): LOG.debug("update_firewall_rule() id: %(id)s, " "firewall_rule: %(firewall_rule)s", {'id': id, 'firewall_rule': firewall_rule}) try: fwr = self._update_firewall_rule_dict(context, id, firewall_rule) self._update_firewall_rule(context, id, fwr) self._ensure_update_firewall_rule(context, id) fwr = super(FortinetFirewallPlugin, self).update_firewall_rule(context, id, firewall_rule) utils.update_status(self, context, t_consts.TaskStatus.COMPLETED) return fwr except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("update_firewall_rule %(fwr)s failed"), {'fwr': firewall_rule}) utils._rollback_on_err(self, context, e) def insert_rule(self, context, id, rule_info): self._ensure_update_firewall_policy(context, id) try: fwp = super(FortinetFirewallPlugin, self).insert_rule(context, id, rule_info) self._rpc_update_firewall_policy(context, id) utils.update_status(self, context, t_consts.TaskStatus.COMPLETED) return fwp except Exception as e: with excutils.save_and_reraise_exception(): self.remove_rule(context, id, rule_info) utils._rollback_on_err(self, context, e) def remove_rule(self, context, id, rule_info): LOG.debug("Fortinet_plugin remove_rule() called") self._ensure_update_firewall_policy(context, id) if rule_info.get('firewall_rule_id', None): firewall_rule = self._get_firewall_rule( context, rule_info['firewall_rule_id']) fwr = self._make_firewall_rule_dict(firewall_rule) self._delete_firewall_rule(context, fwr['tenant_id'], **fwr) fwp = super(FortinetFirewallPlugin, self).remove_rule( context, id, rule_info) self._rpc_update_firewall_policy(context, id) return fwp def get_firewalls(self, context, filters=None, fields=None): LOG.debug("fwaas get_firewalls() called, filters=%(filters)s, " "fields=%(fields)s", {'filters': filters, 'fields': fields}) fw_list = super(FortinetFirewallPlugin, self).get_firewalls( context, filters, fields) for fw in fw_list: fw_current_rtrs = self.get_firewall_routers(context, fw['id']) fw['router_ids'] = fw_current_rtrs return fw_list def get_firewall(self, context, id, fields=None): LOG.debug("fwaas get_firewall() called") res = super(FortinetFirewallPlugin, self).get_firewall( context, id, fields) fw_current_rtrs = self.get_firewall_routers(context, id) res['router_ids'] = fw_current_rtrs return res def _apply_firewall(self, context, **fw_with_rules): tenant_id = fw_with_rules['tenant_id'] default_fwr = self._make_default_firewall_rule_dict(tenant_id) try: if fw_with_rules.get('del-router-ids', None): for fwr in list(fw_with_rules.get('firewall_rule_list', None)): self._delete_firewall_rule(context, tenant_id, **fwr) if default_fwr: self._delete_firewall_rule( context, tenant_id, **default_fwr) self.update_firewall_status( context, fw_with_rules['id'], n_consts.INACTIVE) if fw_with_rules.get('add-router-ids', None): vdom = getattr( fortinet_db.Fortinet_ML2_Namespace.query_one( context, tenant_id=tenant_id), 'vdom', None) if not vdom: raise fw_exc.FirewallInternalDriverError( driver='Fortinet_fwaas_plugin') if default_fwr: self._add_firewall_rule(context, tenant_id, **default_fwr) for fwr in reversed( list(fw_with_rules.get('firewall_rule_list', None))): self._add_firewall_rule(context, tenant_id, **fwr) self.update_firewall_status( context, fw_with_rules['id'], n_consts.ACTIVE) else: self.update_firewall_status( context, fw_with_rules['id'], n_consts.INACTIVE) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("apply_firewall %(fws)s failed"), {'fws': fw_with_rules}) utils._rollback_on_err(self, context, e) utils.update_status(self, context, t_consts.TaskStatus.COMPLETED) def _add_firewall_rule(self, context, fwp_tenant_id, **fwr): """ :param obj: :param context: :param kwargs: dictionary, firewall rule firewall_rule: {'source_ip_address': u'192.176.10.0/24',... } :return: """ LOG.debug("# _add_firewall_rule() called") namespace = fortinet_db.Fortinet_ML2_Namespace.query_one( context, tenant_id=fwp_tenant_id) vdom = getattr(namespace, 'vdom', None) if not vdom or not fwr: return None inf_int, inf_ext = utils.get_vlink_intf( self, context, vdom=namespace.vdom) srcaddr = self._add_fwr_ip_address( context, vdom, place='source_ip_address', **fwr) dstaddr = self._add_fwr_ip_address( context, vdom, place='destination_ip_address', **fwr) service = self._add_fwr_service(context, vdom, **fwr) action = self._get_fwr_action(**fwr) profiles = self._get_fwp_profiles(action) match_vip = 'enable' name = fwr.get('name', '') # add a basic firewall rule('accept': incoming, 'deny': bidirectional) fortinet_fwp = utils.add_fwpolicy_to_head(self, context, vdom=vdom, srcaddr=srcaddr['name'], srcintf='any', dstaddr=dstaddr['name'], dstintf='any', service=service['name'], match_vip=match_vip, action=action, comments=name, **profiles) utils.add_record(self, context, fortinet_db.Fortinet_FW_Rule_Association, fwr_id=fwr['id'], fortinet_pid=fortinet_fwp.id, type=constants.TYPE_INT) if action in ['accept']: # if allow, for the outgoing traffic it need to enable nat fortinet_fwp = utils.add_fwpolicy_to_head(self, context, vdom=vdom, srcaddr=srcaddr['name'], srcintf='any', dstaddr=dstaddr['name'], dstintf=inf_int, nat='enable', service=service['name'], action=action, comments=name, **profiles) utils.add_record(self, context, fortinet_db.Fortinet_FW_Rule_Association, fwr_id=fwr['id'], fortinet_pid=fortinet_fwp.id, type=constants.TYPE_EXT) def _update_firewall_rule(self, context, id, firewall_rule): LOG.debug("# _add_firewall_rule() called") fwps_int = fortinet_db.Fortinet_FW_Rule_Association.query_all( context, fwr_id=id, type=constants.TYPE_INT) fwps_ext = fortinet_db.Fortinet_FW_Rule_Association.query_all( context, fwr_id=id, type=constants.TYPE_EXT) if fwps_ext and fwps_int: fwps = fwps_int + fwps_ext else: fwps = fwps_int or fwps_ext if not fwps: return firewall_rule.setdefault('id', id) srcaddr = self._make_fortinet_fwaddress_dict( place='source_ip_address', **firewall_rule) dstaddr = self._make_fortinet_fwaddress_dict( place='destination_ip_address', **firewall_rule) service = self._make_fortinet_fwservice_dict(**firewall_rule) action = self._get_fwr_action(**firewall_rule) profiles = self._get_fwp_profiles(action) for fwp in fwps_int: vdom = fwp.fortinet_policy.vdom if service['name'] != 'ALL': utils.set_fwservice(self, context, vdom=vdom, **service) if srcaddr['name'] != 'all': utils.set_fwaddress(self, context, vdom=vdom, **srcaddr) if dstaddr['name'] != 'all': utils.set_fwaddress(self, context, vdom=vdom, **dstaddr) # check whether related firewall policies need to update fwp = fwps_int[0].fortinet_policy name = firewall_rule.setdefault('name', fwp.comments) if fwp.srcaddr == srcaddr['name'] and fwp.action == action and \ fwp.dstaddr == dstaddr['name'] and fwp.service == service['name']: return if action in ['accept']: for fwp in fwps: fortinet_fwp = utils.set_fwpolicy(self, context, id=fwp.fortinet_pid, srcaddr=srcaddr['name'], dstaddr=dstaddr['name'], service=service['name'], action=action, comments=name, **profiles) if not fwps_ext: inf_int, inf_ext = utils.get_vlink_intf( self, context, vdom=fortinet_fwp.vdom) utils.add_fwaas_subpolicy(self, context, before=fortinet_fwp.edit_id, vdom=fortinet_fwp.vdom, srcaddr=srcaddr['name'], dstaddr=dstaddr['name'], dstintf=inf_int, nat='enable', service=service['name'], action=action, comments=name, fwr_id=id, type=constants.TYPE_EXT, **profiles) elif action in ['deny']: for fwp_ext in fwps_ext: utils.delete_fwaas_subpolicy(self, context, fwr_id=fwp_ext.fwr_id, fortinet_pid=fwp_ext.fortinet_pid) for fwp in fwps_int: utils.set_fwpolicy(self, context, id=fwp.fortinet_pid, srcaddr=srcaddr['name'], dstaddr=dstaddr['name'], service=service['name'], action=action, comments=name, **profiles) for fwp in fwps_int: vdom = fwp.fortinet_policy.vdom if service['name'] == 'ALL': #delete all old services if exist utils.delete_fwservice(self, context, vdom=vdom, name=id) if srcaddr['name'] == 'all': name = constants.PREFIX['source_ip_address'] + id utils.delete_fwaddress(self, context, vdom=vdom, name=name) if dstaddr['name'] == 'all': name = constants.PREFIX['destination_ip_address'] + id utils.delete_fwaddress(self, context, vdom=vdom, name=name) def _delete_firewall_rule(self, context, fwp_tenant_id, **fwr): """ :param obj: :param context: :param kwargs: dictionary, firewall rule firewall_rule: {'source_ip_address': u'192.176.10.0/24',... } :return: """ # need to consider shared firewall rules LOG.debug("# _delete_firewall_rule() called") namespace = fortinet_db.Fortinet_ML2_Namespace.query_one( context, tenant_id=fwp_tenant_id) if not namespace: return None fwp_assed = fortinet_db.Fortinet_FW_Rule_Association.query_all( context, fwr_id=fwr['id']) for fwp in fwp_assed: fortinet_db.delete_record( context, fortinet_db.Fortinet_FW_Rule_Association, fwr_id=fwp.fwr_id, fortinet_pid=fwp.fortinet_pid) utils.delete_fwpolicy( self, context, id=fwp.fortinet_pid, vdom=namespace.vdom) if fwr.get('source_ip_address', None): srcaddr = constants.PREFIX['source_ip_address'] + fwr['id'] utils.delete_fwaddress( self, context, vdom=namespace.vdom, name=srcaddr) if fwr.get('destination_ip_address', None): dstaddr = constants.PREFIX['destination_ip_address'] + fwr['id'] utils.delete_fwaddress( self, context, vdom=namespace.vdom, name=dstaddr) self._delete_fwr_service(context, namespace.vdom, **fwr) def _update_firewall_rule_dict(self, context, id, firewall_rule): fwr = firewall_rule.get('firewall_rule', firewall_rule) fwr_db = self._get_firewall_rule(context, id) if fwr_db.firewall_policy_id: fwp_db = self._get_firewall_policy(context, fwr_db.firewall_policy_id) if 'shared' in fwr and not fwr['shared']: if fwr_db['tenant_id'] != fwp_db['tenant_id']: raise fw_exc.FirewallRuleInUse(firewall_rule_id=id) fwr.setdefault('source_port', self._get_port_range_from_min_max_ports( fwr_db['source_port_range_min'], fwr_db['source_port_range_max'])) fwr.setdefault('destination_port', self._get_port_range_from_min_max_ports( fwr_db['destination_port_range_min'], fwr_db['destination_port_range_max'])) keys = ['name', 'protocol', 'action', 'shared', 'ip_version', 'source_ip_address', 'destination_ip_address', 'enabled'] for key in keys: fwr.setdefault(key, fwr_db[key]) return fwr def _make_default_firewall_rule_dict(self, tenant_id): if tenant_id and self._fortigate["enable_default_fwrule"]: return {'id': tenant_id, 'tenant_id': tenant_id, 'name': '_default_rule_deny_all', 'description': '_default_rule_deny_all', 'protocol': None, 'source_ip_address': None, 'source_port': None, 'destination_ip_address': None, 'destination_port': None, 'action': 'deny'} else: return {} def _add_fwr_ip_address(self, context, vdom, place='source_ip_address', **fwr): fwaddress = self._make_fortinet_fwaddress_dict( place=place, vdom=vdom, **fwr) utils.add_fwaddress(self, context, **fwaddress) return fwaddress def _make_fortinet_fwaddress_dict(self, place='source_ip_address', vdom=None, **fwr): fwaddress = {} if place not in ['source_ip_address', 'destination_ip_address']: raise ValueError("_add_fwr_ip_address() value error of where") if vdom: fwaddress.setdefault('vdom', vdom) if fwr.get(place, None): fwaddress.setdefault('name', constants.PREFIX[place] + fwr['id']) fwaddress.setdefault('subnet', utils.get_subnet(fwr[place])) else: fwaddress.setdefault('name', 'all') return fwaddress def _add_fwr_service(self, context, vdom, **fwr): kw_service = self._make_fortinet_fwservice_dict(vdom=vdom, **fwr) utils.add_fwservice(self, context, **kw_service) return kw_service def _make_fortinet_fwservice_dict(self, vdom=None, **fwr): LOG.debug("_make_fortinet_fwservice_dict() fwr=%(fwr)s", {'fwr': fwr}) kw_service = {} if vdom: kw_service.setdefault('vdom', vdom) if fwr['protocol'] in ['any', None] and \ not fwr['destination_port'] and not fwr['source_port']: # SamSu: The firewall service 'all' was already added by default kw_service.setdefault('name', 'ALL') else: portrange = ':'.join([ utils.port_range(fwr['destination_port']), utils.port_range(fwr['source_port'])]) if fwr['protocol'] in ['tcp']: kw_service.setdefault('tcp_portrange', portrange) elif fwr['protocol'] in ['udp']: kw_service.setdefault('udp_portrange', portrange) elif fwr['protocol'] in ['icmp']: kw_service.setdefault('protocol', 'ICMP') kw_service.setdefault('name', fwr['id']) kw_service.setdefault('comment', fwr.get('name', '')) return kw_service def _delete_fwr_service(self, context, vdom, **fwr): LOG.debug("# _delete_fwr_service() fwr=%(fwr)s", {'fwr': fwr}) if fwr['protocol'] in ['any', None] and \ not fwr['destination_port'] and not fwr['source_port']: return None else: return utils.delete_fwservice( self, context, vdom=vdom, name=fwr['id']) def _get_fwr_action(self, **fwr): if fwr.get('action', None) in ['allow']: action = 'accept' else: action = 'deny' return action def _get_fwp_profiles(self, action): profiles = { 'av_profile': None, 'webfilter_profile': None, 'ips_sensor': None, 'application_list': None, 'ssl_ssh_profile': None } if action in ['allow', 'accept']: for key in profiles: profiles[key] = self._fortigate[key] return profiles def _get_fip_before_id(self, context, fwr_id): fwp_assed = fortinet_db.Fortinet_FW_Rule_Association.query_one( context, type=constants.TYPE_EXT, fwr_id=fwr_id) if not fwp_assed: fwp_assed = fortinet_db.Fortinet_FW_Rule_Association.query_one( context, type=constants.TYPE_INT, fwr_id=fwr_id) fwp = fortinet_db.query_record(context, fortinet_db.Fortinet_Firewall_Policy, id=fwp_assed.fortinet_pid) return getattr(fwp, 'edit_id', None) def _get_fips_in_fw(self, context, tenant_id, fw_net): fw_fips = [] if not fw_net: return fw_fips namespace = fortinet_db.Fortinet_ML2_Namespace.query_one( context, tenant_id=tenant_id) if not namespace: return fw_fips db_fips = fortinet_db.query_records( context, l3_db.FloatingIP, tenant_id=tenant_id, status=n_consts.FLOATINGIP_STATUS_ACTIVE) for fip in db_fips: if getattr(fip, 'fixed_ip_address', None) and \ IPAddress(fip.fixed_ip_address) in IPNetwork(fw_net): fw_fips.append((fip.id, fip.floating_ip_address)) return fw_fips