def test_memoize_not_exists(self, mock_open, exists): # Reset the flag to appear that we haven't looked for it yet. netutils._IS_IPV6_ENABLED = None enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled) enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled)
def test_memoize_disabled(self, exists): # Reset the flag to appear that we haven't looked for it yet. netutils._IS_IPV6_ENABLED = None with mock.patch('six.moves.builtins.open', return_value=mock_file_content('1')): enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled) # The second call should not use open again with mock.patch('six.moves.builtins.open', side_effect=AssertionError('should not be called')): enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled)
def test_memoize_enabled(self, exists): # Reset the flag to appear that we haven't looked for it yet. netutils._IS_IPV6_ENABLED = None with mock.patch('six.moves.builtins.open', return_value=mock_file_content('0')) as mock_open: enabled = netutils.is_ipv6_enabled() self.assertTrue(mock_open.called) self.assertTrue(netutils._IS_IPV6_ENABLED) self.assertTrue(enabled) # The second call should not use open again with mock.patch('six.moves.builtins.open', side_effect=AssertionError('should not be called')): enabled = netutils.is_ipv6_enabled() self.assertTrue(enabled)
def apply_metadata_nat_rules(router, proxy): for c, r in proxy.metadata_filter_rules(proxy.metadata_port, proxy.metadata_access_mark): router.iptables_manager.ipv4['filter'].add_rule(c, r) if netutils.is_ipv6_enabled(): for c, r in proxy.metadata_filter_rules(proxy.metadata_port, proxy.metadata_access_mark): router.iptables_manager.ipv6['filter'].add_rule(c, r) for c, r in proxy.metadata_nat_rules(proxy.metadata_port): router.iptables_manager.ipv4['nat'].add_rule(c, r) if netutils.is_ipv6_enabled(): for c, r in proxy.metadata_nat_rules( proxy.metadata_port, metadata_address=(constants.METADATA_V6_IP + '/128')): router.iptables_manager.ipv6['nat'].add_rule(c, r) router.iptables_manager.apply()
def after_router_added(resource, event, l3_agent, **kwargs): router = kwargs['router'] proxy = l3_agent.metadata_driver ipv6_enabled = netutils.is_ipv6_enabled() for c, r in proxy.metadata_filter_rules(proxy.metadata_port, proxy.metadata_access_mark): router.iptables_manager.ipv4['filter'].add_rule(c, r) if ipv6_enabled: for c, r in proxy.metadata_filter_rules(proxy.metadata_port, proxy.metadata_access_mark): router.iptables_manager.ipv6['filter'].add_rule(c, r) for c, r in proxy.metadata_nat_rules(proxy.metadata_port): router.iptables_manager.ipv4['nat'].add_rule(c, r) if ipv6_enabled: for c, r in proxy.metadata_nat_rules( proxy.metadata_port, metadata_address=(constants.METADATA_V6_IP + '/128')): router.iptables_manager.ipv6['nat'].add_rule(c, r) router.iptables_manager.apply() if not isinstance(router, ha_router.HaRouter): spawn_kwargs = {} if ipv6_enabled: spawn_kwargs['bind_address'] = '::' proxy.spawn_monitored_metadata_proxy( l3_agent.process_monitor, router.ns_name, proxy.metadata_port, l3_agent.conf, router_id=router.router_id, **spawn_kwargs)
def enable_isolated_metadata_proxy(self, network): # The proxy might work for either a single network # or all the networks connected via a router # to the one passed as a parameter kwargs = {'network_id': network.id} # When the metadata network is enabled, the proxy might # be started for the router attached to the network if self.conf.enable_metadata_network: router_ports = [ port for port in network.ports if (port.device_owner in constants.ROUTER_INTERFACE_OWNERS) ] if router_ports: # Multiple router ports should not be allowed if len(router_ports) > 1: LOG.warning( "%(port_num)d router ports found on the " "metadata access network. Only the port " "%(port_id)s, for router %(router_id)s " "will be considered", { 'port_num': len(router_ports), 'port_id': router_ports[0].id, 'router_id': router_ports[0].device_id }) all_subnets = self.dhcp_driver_cls._get_all_subnets(network) if self.dhcp_driver_cls.has_metadata_subnet(all_subnets): kwargs = {'router_id': router_ports[0].device_id} self._metadata_routers[network.id] = ( router_ports[0].device_id) if netutils.is_ipv6_enabled(): try: dhcp_ifaces = [ self.call_driver('get_metadata_bind_interface', network, port=p) for p in network.ports if (p.device_owner == constants.DEVICE_OWNER_DHCP and p.admin_state_up) ] if len(dhcp_ifaces) == 1: kwargs['bind_interface'] = dhcp_ifaces[0] kwargs['bind_address_v6'] = constants.METADATA_V6_IP else: LOG.error( 'Unexpected number of DHCP interfaces for metadata ' 'proxy, expected 1, got %s', len(dhcp_ifaces)) except AttributeError: LOG.warning( 'Cannot serve metadata on IPv6 because DHCP driver ' 'does not implement method ' 'get_metadata_bind_interface(): %s', self.dhcp_driver_cls) metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy( self._process_monitor, network.namespace, constants.METADATA_PORT, self.conf, bind_address=constants.METADATA_V4_IP, **kwargs)
def __init__(self, namespace=None): self.iptables = iptables_manager.IptablesManager( state_less=True, use_ipv6=netutils.is_ipv6_enabled(), namespace=namespace) # TODO(majopela, shihanzhang): refactor out ipset to a separate # driver composed over this one self.ipset = ipset_manager.IpsetManager(namespace=namespace) # list of port which has security group self.filtered_ports = {} self.unfiltered_ports = {} self.trusted_ports = [] self.ipconntrack = ip_conntrack.get_conntrack( self.iptables.get_rules_for_table, self.filtered_ports, self.unfiltered_ports, namespace=namespace, zone_per_port=self.CONNTRACK_ZONE_PER_PORT) self._add_fallback_chain_v4v6() self._defer_apply = False self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None # List of security group rules for ports residing on this host self.sg_rules = {} self.pre_sg_rules = None # List of security group member ips for ports residing on this host self.sg_members = collections.defaultdict( lambda: collections.defaultdict(list)) self.pre_sg_members = None self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset self.updated_rule_sg_ids = set() self.updated_sg_members = set() self.devices_with_updated_sg_members = collections.defaultdict(list) self._iptables_protocol_name_map = {} self._check_netfilter_for_bridges()
def initialize(self): LOG.info("Initializing Linux bridge QoS extension") if self.agent_api: self.iptables_manager = self.agent_api.get_iptables_manager() if not self.iptables_manager: # If agent_api can't provide iptables_manager, it can be # created here for extension needs self.iptables_manager = iptables_manager.IptablesManager( state_less=True, use_ipv6=netutils.is_ipv6_enabled()) self.iptables_manager.initialize_mangle_table()
def create_iptables_managers(self): """Creates iptables managers if the are not already created Returns True if any manager is created """ created = False if self.router['distributed'] and self.snat_iptables_manager is None: # If distributed routers then we need to apply the # metering agent label rules in the snat namespace as well. snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.id) # Check for namespace existence before we assign the # snat_iptables_manager if ip_lib.network_namespace_exists(snat_ns_name): self.snat_iptables_manager = iptables_manager.IptablesManager( namespace=snat_ns_name, binary_name=WRAP_NAME, state_less=True, use_ipv6=netutils.is_ipv6_enabled()) created = True if self.iptables_manager is None: # Check of namespace existence before we assign the # iptables_manager # NOTE(Swami): If distributed routers, all external traffic on a # compute node will flow through the rfp interface in the router # namespace. if ip_lib.network_namespace_exists(self.ns_name): self.iptables_manager = iptables_manager.IptablesManager( namespace=self.ns_name, binary_name=WRAP_NAME, state_less=True, use_ipv6=netutils.is_ipv6_enabled()) created = True return created
def after_router_updated(resource, event, l3_agent, **kwargs): router = kwargs['router'] proxy = l3_agent.metadata_driver if (not proxy.monitors.get(router.router_id) and not isinstance(router, ha_router.HaRouter)): spawn_kwargs = {} if netutils.is_ipv6_enabled(): spawn_kwargs['bind_address'] = '::' proxy.spawn_monitored_metadata_proxy(l3_agent.process_monitor, router.ns_name, proxy.metadata_port, l3_agent.conf, router_id=router.router_id, **spawn_kwargs)
def _ensure_datapath_checksum(self, namespace): """Ensure the correct checksum in the metadata packets in DPDK bridges (LP#1904871) In DPDK deployments (integration bridge datapath_type == "netdev"), the checksum between the metadata namespace and OVS is not correctly populated. """ if (self.ovs_idl.db_get( 'Bridge', self.ovn_bridge, 'datapath_type').execute() != ovn_const.CHASSIS_DATAPATH_NETDEV): return iptables_mgr = iptables_manager.IptablesManager( use_ipv6=netutils.is_ipv6_enabled(), nat=False, namespace=namespace, external_lock=False) rule = '-p tcp -m tcp -j CHECKSUM --checksum-fill' iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', rule, wrap=False) iptables_mgr.apply()
def _update_metadata_proxy(self, ri, router_id, state): # NOTE(slaweq): Since the metadata proxy is spawned in the qrouter # namespace and not in the snat namespace, even standby DVR-HA # routers needs to serve metadata requests to local ports. if state == 'primary' or ri.router.get('distributed', False): LOG.debug('Spawning metadata proxy for router %s', router_id) spawn_kwargs = {} if netutils.is_ipv6_enabled(): spawn_kwargs['bind_address'] = '::' self.metadata_driver.spawn_monitored_metadata_proxy( self.process_monitor, ri.ns_name, self.conf.metadata_port, self.conf, router_id=ri.router_id, **spawn_kwargs) else: LOG.debug('Closing metadata proxy for router %s', router_id) self.metadata_driver.destroy_monitored_metadata_proxy( self.process_monitor, ri.router_id, self.conf, ri.ns_name)
def serve_ipa_api(self): """Serve the API until an extension terminates it.""" if netutils.is_ipv6_enabled(): # Listens to both IP versions, assuming IPV6_V6ONLY isn't enabled, # (the default behaviour in linux) simple_server.WSGIServer.address_family = socket.AF_INET6 server = simple_server.WSGIServer((self.listen_address.hostname, self.listen_address.port), simple_server.WSGIRequestHandler) server.set_app(self.api) if not self.standalone and self.api_url: # Don't start heartbeating until the server is listening self.heartbeater.start() while self.serve_api: try: server.handle_request() except BaseException as e: msg = "Failed due to an unknown exception. Error %s" % e LOG.exception(msg) raise errors.IronicAPIError(msg) LOG.info('shutting down')
def __init__(self, host, conf=None): if conf: self.conf = conf else: self.conf = cfg.CONF self.check_config() self.router_info = {} self.router_factory = RouterFactory() self._register_router_cls(self.router_factory) self._check_config_params() self.process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='router') self._context = n_context.get_admin_context_without_session() self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) self.driver = common_utils.load_interface_driver( self.conf, get_networks_callback=functools.partial( self.plugin_rpc.get_networks, self.context)) self.fullsync = True self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE self._exiting = False # Get the HA router count from Neutron Server # This is the first place where we contact neutron-server on startup # so retry in case its not ready to respond. while True: try: self.ha_router_count = int( self.plugin_rpc.get_host_ha_router_count(self.context)) except oslo_messaging.MessagingTimeout as e: LOG.warning('l3-agent cannot contact neutron server ' 'to retrieve HA router count. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.', {'msg': e}) continue break LOG.info("Agent HA routers count %s", self.ha_router_count) self.init_extension_manager(self.plugin_rpc) self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, self.metadata_driver) # L3 agent router processing green pool self._pool_size = ROUTER_PROCESS_GREENLET_MIN self._pool = eventlet.GreenPool(size=self._pool_size) self._queue = queue.ResourceProcessingQueue() super(L3NATAgent, self).__init__(host=self.conf.host) self.target_ex_net_id = None self.use_ipv6 = netutils.is_ipv6_enabled() self.pd = pd.PrefixDelegation(self.context, self.process_monitor, self.driver, self.plugin_rpc.process_prefix_update, self.create_pd_router_update, self.conf) # Consume network updates to trigger router resync consumers = [[topics.NETWORK, topics.UPDATE]] agent_rpc.create_consumers([self], topics.AGENT, consumers) self._check_ha_router_process_status()
def get_wildcard_address(): if netutils.is_ipv6_enabled(): return "::" return "0.0.0.0"
class TestWSGIServerWithSSL(WsgiTestCase): """WSGI server with SSL tests.""" def setUp(self): super(TestWSGIServerWithSSL, self).setUp() self.conf_fixture.register_opts(_options.ssl_opts, sslutils.config_section) cert_file_name = os.path.join(SSL_CERT_DIR, 'certificate.crt') key_file_name = os.path.join(SSL_CERT_DIR, 'privatekey.key') eventlet.monkey_patch(os=False, thread=False) self.config(cert_file=cert_file_name, key_file=key_file_name, group=sslutils.config_section) def test_ssl_server(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = wsgi.Server(self.conf, "fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) cli = eventlet.connect(("localhost", fake_ssl_server.port)) ca_certs_name = os.path.join(SSL_CERT_DIR, 'ca.crt') cli = eventlet.wrap_ssl(cli, ca_certs=ca_certs_name) cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nContent-length:4\r\n\r\nPING') response = cli.read(8192) self.assertEqual(response[-4:], "PONG") fake_ssl_server.stop() fake_ssl_server.wait() def test_two_servers(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = wsgi.Server(self.conf, "fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) fake_server = wsgi.Server(self.conf, "fake", test_app, host="127.0.0.1", port=0) fake_server.start() self.assertNotEqual(0, fake_server.port) cli = eventlet.connect(("localhost", fake_ssl_server.port)) cli = eventlet.wrap_ssl(cli, ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt')) cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nContent-length:4\r\n\r\nPING') response = cli.read(8192) self.assertEqual(response[-4:], "PONG") cli = eventlet.connect(("localhost", fake_server.port)) cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nContent-length:4\r\n\r\nPING') response = cli.recv(8192) self.assertEqual(response[-4:], "PONG") fake_ssl_server.stop() fake_ssl_server.wait() @testtools.skipIf(platform.mac_ver()[0] != '', 'SO_REUSEADDR behaves differently ' 'on OSX, see bug 1436895') def test_socket_options_for_ssl_server(self): # test normal socket options has set properly self.config(tcp_keepidle=500) server = wsgi.Server(self.conf, "test_socket_options", None, host="127.0.0.1", port=0, use_ssl=True) server.start() sock = server.socket self.assertEqual( 1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual( 1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual( CONF.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) server.stop() server.wait() @testtools.skipIf(not netutils.is_ipv6_enabled(), "no ipv6 support") def test_app_using_ipv6_and_ssl(self): greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = wsgi.Server(self.conf, "fake_ssl", hello_world, host="::1", port=0, use_ssl=True) server.start() response = requests.get('https://[::1]:%d/' % server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt')) self.assertEqual(greetings, response.text) server.stop() server.wait()
class TestWSGIServer(WsgiTestCase): """WSGI server tests.""" def setUp(self): super(TestWSGIServer, self).setUp() def test_no_app(self): server = wsgi.Server(self.conf, "test_app", None) self.assertEqual("test_app", server.name) def test_custom_max_header_line(self): self.config(max_header_line=4096) # Default value is 16384 wsgi.Server(self.conf, "test_custom_max_header_line", None) self.assertEqual(eventlet.wsgi.MAX_HEADER_LINE, self.conf.max_header_line) def test_start_random_port(self): server = wsgi.Server(self.conf, "test_random_port", None, host="127.0.0.1", port=0) server.start() self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(not netutils.is_ipv6_enabled(), "no ipv6 support") def test_start_random_port_with_ipv6(self): server = wsgi.Server(self.conf, "test_random_port", None, host="::1", port=0) server.start() self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(platform.mac_ver()[0] != '', 'SO_REUSEADDR behaves differently ' 'on OSX, see bug 1436895') def test_socket_options_for_simple_server(self): # test normal socket options has set properly self.config(tcp_keepidle=500) server = wsgi.Server(self.conf, "test_socket_options", None, host="127.0.0.1", port=0) server.start() sock = server.socket self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(self.conf.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) self.assertFalse(server._server.dead) server.stop() server.wait() self.assertTrue(server._server.dead) @testtools.skipIf(not hasattr(socket, "AF_UNIX"), 'UNIX sockets not supported') def test_server_with_unix_socket(self): socket_file = self.get_temp_file_path('sock') socket_mode = 0o644 server = wsgi.Server(self.conf, "test_socket_options", None, socket_family=socket.AF_UNIX, socket_mode=socket_mode, socket_file=socket_file) self.assertEqual(socket_file, server.socket.getsockname()) self.assertEqual(socket_mode, os.stat(socket_file).st_mode & 0o777) server.start() self.assertFalse(server._server.dead) server.stop() server.wait() self.assertTrue(server._server.dead) def test_server_pool_waitall(self): # test pools waitall method gets called while stopping server server = wsgi.Server(self.conf, "test_server", None, host="127.0.0.1") server.start() with mock.patch.object(server._pool, 'waitall') as mock_waitall: server.stop() server.wait() mock_waitall.assert_called_once_with() def test_uri_length_limit(self): eventlet.monkey_patch(os=False, thread=False) server = wsgi.Server(self.conf, "test_uri_length_limit", None, host="127.0.0.1", max_url_len=16384, port=33337) server.start() self.assertFalse(server._server.dead) uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x') resp = requests.get(uri, proxies={"http": ""}) eventlet.sleep(0) self.assertNotEqual(requests.codes.REQUEST_URI_TOO_LARGE, resp.status_code) uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x') resp = requests.get(uri, proxies={"http": ""}) eventlet.sleep(0) self.assertEqual(requests.codes.REQUEST_URI_TOO_LARGE, resp.status_code) server.stop() server.wait() def test_reset_pool_size_to_default(self): server = wsgi.Server(self.conf, "test_resize", None, host="127.0.0.1", max_url_len=16384) server.start() # Stopping the server, which in turn sets pool size to 0 server.stop() self.assertEqual(0, server._pool.size) # Resetting pool size to default server.reset() server.start() self.assertEqual(CONF.wsgi_default_pool_size, server._pool.size) def test_client_socket_timeout(self): self.config(client_socket_timeout=5) # mocking eventlet spawn method to check it is called with # configured 'client_socket_timeout' value. with mock.patch.object(eventlet, 'spawn') as mock_spawn: server = wsgi.Server(self.conf, "test_app", None, host="127.0.0.1", port=0) server.start() _, kwargs = mock_spawn.call_args self.assertEqual(self.conf.client_socket_timeout, kwargs['socket_timeout']) server.stop() def test_wsgi_keep_alive(self): self.config(wsgi_keep_alive=False) # mocking eventlet spawn method to check it is called with # configured 'wsgi_keep_alive' value. with mock.patch.object(eventlet, 'spawn') as mock_spawn: server = wsgi.Server(self.conf, "test_app", None, host="127.0.0.1", port=0) server.start() _, kwargs = mock_spawn.call_args self.assertEqual(self.conf.wsgi_keep_alive, kwargs['keepalive']) server.stop()
class TestWSGIServerWithSSL(WsgiTestCase): """WSGI server with SSL tests.""" def setUp(self): super(TestWSGIServerWithSSL, self).setUp() self.conf_fixture.register_opts(_options.ssl_opts, sslutils.config_section) cert_file_name = os.path.join(SSL_CERT_DIR, 'certificate.crt') key_file_name = os.path.join(SSL_CERT_DIR, 'privatekey.key') eventlet.monkey_patch(os=False, thread=False) self.config(cert_file=cert_file_name, key_file=key_file_name, group=sslutils.config_section) @testtools.skipIf(six.PY3, "bug/1482633: test hangs on Python 3") def test_ssl_server(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = wsgi.Server(self.conf, "fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) response = requests.post( 'https://127.0.0.1:%s/' % fake_ssl_server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING') self.assertEqual('PONG', response.text) fake_ssl_server.stop() fake_ssl_server.wait() @testtools.skipIf(six.PY3, "bug/1482633: test hangs on Python 3") def test_two_servers(self): def test_app(env, start_response): start_response('200 OK', {}) return ['PONG'] fake_ssl_server = wsgi.Server(self.conf, "fake_ssl", test_app, host="127.0.0.1", port=0, use_ssl=True) fake_ssl_server.start() self.assertNotEqual(0, fake_ssl_server.port) fake_server = wsgi.Server(self.conf, "fake", test_app, host="127.0.0.1", port=0) fake_server.start() self.assertNotEqual(0, fake_server.port) response = requests.post( 'https://127.0.0.1:%s/' % fake_ssl_server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING') self.assertEqual('PONG', response.text) response = requests.post( 'http://127.0.0.1:%s/' % fake_server.port, data='PING') self.assertEqual('PONG', response.text) fake_ssl_server.stop() fake_ssl_server.wait() fake_server.stop() fake_server.wait() @testtools.skipIf(platform.mac_ver()[0] != '', 'SO_REUSEADDR behaves differently ' 'on OSX, see bug 1436895') @testtools.skipIf(six.PY3, "bug/1482633: test hangs on Python 3") def test_socket_options_for_ssl_server(self): # test normal socket options has set properly self.config(tcp_keepidle=500) server = wsgi.Server(self.conf, "test_socket_options", None, host="127.0.0.1", port=0, use_ssl=True) server.start() sock = server.socket self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)) self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertEqual(CONF.tcp_keepidle, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)) server.stop() server.wait() @testtools.skipIf(not netutils.is_ipv6_enabled(), "no ipv6 support") @testtools.skipIf(six.PY3, "bug/1482633: test hangs on Python 3") def test_app_using_ipv6_and_ssl(self): greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = wsgi.Server(self.conf, "fake_ssl", hello_world, host="::1", port=0, use_ssl=True) server.start() response = requests.get('https://[::1]:%d/' % server.port, verify=os.path.join(SSL_CERT_DIR, 'ca.crt')) self.assertEqual(greetings, response.text) server.stop() server.wait()
class TestWSGIServer(test.TestCase): """WSGI server tests.""" def test_no_app(self): server = manila.wsgi.Server("test_app", None, host="127.0.0.1", port=0) self.assertEqual("test_app", server.name) def test_start_random_port(self): server = manila.wsgi.Server("test_random_port", None, host="127.0.0.1") server.start() self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(not netutils.is_ipv6_enabled(), "Test requires an IPV6 configured interface") @testtools.skipIf(utils.is_eventlet_bug105(), 'Eventlet bug #105 affect test results.') def test_start_random_port_with_ipv6(self): server = manila.wsgi.Server("test_random_port", None, host="::1") server.start() self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_start_with_default_tcp_options(self): server = manila.wsgi.Server("test_tcp_options", None, host="127.0.0.1") self.mock_object(netutils, 'set_tcp_keepalive') server.start() netutils.set_tcp_keepalive.assert_called_once_with( mock.ANY, tcp_keepalive=True, tcp_keepalive_count=None, tcp_keepalive_interval=None, tcp_keepidle=600) def test_start_with_custom_tcp_options(self): CONF.set_default("tcp_keepalive", False) CONF.set_default("tcp_keepalive_count", 33) CONF.set_default("tcp_keepalive_interval", 22) CONF.set_default("tcp_keepidle", 11) server = manila.wsgi.Server("test_tcp_options", None, host="127.0.0.1") self.mock_object(netutils, 'set_tcp_keepalive') server.start() netutils.set_tcp_keepalive.assert_called_once_with( mock.ANY, tcp_keepalive=False, tcp_keepalive_count=33, tcp_keepalive_interval=22, tcp_keepidle=11) def test_app(self): self.mock_object(eventlet, 'spawn', mock.Mock(side_effect=eventlet.spawn)) greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = manila.wsgi.Server("test_app", hello_world, host="127.0.0.1", port=0) server.start() response = urllib.request.urlopen('http://127.0.0.1:%d/' % server.port) self.assertEqual(six.b(greetings), response.read()) # Verify provided parameters to eventlet.spawn func eventlet.spawn.assert_called_once_with( func=eventlet.wsgi.server, sock=mock.ANY, site=server.app, protocol=server._protocol, custom_pool=server._pool, log=server._logger, socket_timeout=server.client_socket_timeout, keepalive=manila.wsgi.CONF.wsgi_keep_alive, ) server.stop() @ddt.data(0, 0.1, 1, None) def test_init_server_with_socket_timeout(self, client_socket_timeout): CONF.set_default("client_socket_timeout", client_socket_timeout) server = manila.wsgi.Server("test_app", lambda *args, **kwargs: None, host="127.0.0.1", port=0) self.assertEqual(client_socket_timeout, server.client_socket_timeout) @testtools.skipIf(six.PY3, "bug/1482633") def test_app_using_ssl(self): CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = manila.wsgi.Server("test_app", hello_world, host="127.0.0.1", port=0) server.start() if hasattr(ssl, '_create_unverified_context'): response = urllib.request.urlopen( 'https://127.0.0.1:%d/' % server.port, context=ssl._create_unverified_context()) else: response = urllib.request.urlopen('https://127.0.0.1:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() @testtools.skipIf(not netutils.is_ipv6_enabled(), "Test requires an IPV6 configured interface") @testtools.skipIf(utils.is_eventlet_bug105(), 'Eventlet bug #105 affect test results.') @testtools.skipIf(six.PY3, "bug/1482633") def test_app_using_ipv6_and_ssl(self): CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = manila.wsgi.Server("test_app", hello_world, host="::1", port=0) server.start() if hasattr(ssl, '_create_unverified_context'): response = urllib.request.urlopen( 'https://[::1]:%d/' % server.port, context=ssl._create_unverified_context()) else: response = urllib.request.urlopen('https://[::1]:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() def test_reset_pool_size_to_default(self): server = manila.wsgi.Server("test_resize", None, host="127.0.0.1") server.start() # Stopping the server, which in turn sets pool size to 0 server.stop() self.assertEqual(0, server._pool.size) # Resetting pool size to default server.reset() server.start() self.assertEqual(1000, server._pool.size)
def test_disabled_non_exists(self): self.mock_exists.return_value = False enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled) self.assertFalse(self.mock_read.called)
def test_enabled(self, mock_open, exists): enabled = netutils.is_ipv6_enabled() self.assertTrue(enabled)
class TestWSGIServer(base.BaseTestCase): """WSGI server tests.""" def test_start_random_port(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="127.0.0.1") self.assertNotEqual(0, server.port) server.stop() server.wait() @mock.patch('oslo_service.service.ProcessLauncher') def test_start_multiple_workers(self, ProcessLauncher): launcher = ProcessLauncher.return_value server = wsgi.Server("test_multiple_processes") server.start(None, 0, host="127.0.0.1", workers=2) launcher.launch_service.assert_called_once_with(mock.ANY, workers=2) server.stop() launcher.stop.assert_called_once_with() server.wait() launcher.wait.assert_called_once_with() @testtools.skipIf(not netutils.is_ipv6_enabled(), 'IPv6 support disabled on host') def test_start_random_port_with_ipv6(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="::1") self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_ipv6_listen_called_with_scope(self): server = wsgi.Server("test_app") with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen: with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr: mock_get_addr.return_value = [ (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2)) ] with mock.patch.object(server, 'pool') as mock_pool: server.start(None, 1234, host="fe80::204:acff:fe96:da87%eth0") mock_get_addr.assert_called_once_with( "fe80::204:acff:fe96:da87%eth0", 1234, socket.AF_UNSPEC, socket.SOCK_STREAM) mock_listen.assert_called_once_with( ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2), family=socket.AF_INET6, backlog=cfg.CONF.backlog) mock_pool.spawn.assert_has_calls([ mock.call(server._run, None, mock_listen.return_value.dup.return_value) ]) def test_app(self): greetings = b'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app") server.start(hello_world, 0, host="127.0.0.1") response = open_no_proxy('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() def test_disable_ssl(self): CONF.set_default('use_ssl', True) greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app", disable_ssl=True) server.start(hello_world, 0, host="127.0.0.1") response = open_no_proxy('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings.encode('utf-8'), response.read()) server.stop() @mock.patch.object(wsgi, 'eventlet') def test__run(self, eventlet_mock): server = wsgi.Server('test') server._run("app", "socket") eventlet_mock.wsgi.server.assert_called_once_with( 'socket', 'app', max_size=server.num_threads, log=mock.ANY, keepalive=CONF.wsgi_keep_alive, log_format=CONF.wsgi_log_format, socket_timeout=server.client_socket_timeout)
def test_disabled_non_exists(self, mock_open, exists): enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled)
def test_memoize(self): self.mock_read.return_value = "0" netutils.is_ipv6_enabled() enabled = netutils.is_ipv6_enabled() self.assertTrue(enabled) self.mock_read.assert_called_once_with()
class L3HATestCase(framework.L3AgentTestFramework): def test_ha_router_update_floatingip_statuses(self): self._test_update_floatingip_statuses( self.generate_router_info(enable_ha=True)) def test_keepalived_state_change_notification(self): enqueue_mock = mock.patch.object( self.agent, 'enqueue_state_change', side_effect=self.change_router_state).start() router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router, 'primary') self.fail_ha_router(router) self.wait_until_ha_router_has_state(router, 'backup') def enqueue_call_count_match(): LOG.debug("enqueue_mock called %s times.", enqueue_mock.call_count) return enqueue_mock.call_count in [2, 3] common_utils.wait_until_true(enqueue_call_count_match) calls = [args[0] for args in enqueue_mock.call_args_list] self.assertEqual((router.router_id, 'primary'), calls[-2]) self.assertEqual((router.router_id, 'backup'), calls[-1]) def _expected_rpc_report(self, expected): calls = (args[0][1] for args in self.agent.plugin_rpc.update_ha_routers_states.call_args_list) # Get the last state reported for each router actual_router_states = {} for call in calls: for router_id, state in call.items(): actual_router_states[router_id] = state return actual_router_states == expected def test_keepalived_state_change_bulk_rpc(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self.fail_ha_router(router1) router_info = self.generate_router_info(enable_ha=True) router2 = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router1, 'backup') self.wait_until_ha_router_has_state(router2, 'primary') common_utils.wait_until_true( lambda: self._expected_rpc_report( {router1.router_id: 'standby', router2.router_id: 'active'})) def test_ha_router_lifecycle(self): router_info = self._router_lifecycle(enable_ha=True) # ensure everything was cleaned up self._router_lifecycle(enable_ha=True, router_info=router_info) def test_conntrack_disassociate_fip_ha_router(self): self._test_conntrack_disassociate_fip(ha=True) def test_ipv6_ha_router_lifecycle(self): self._router_lifecycle(enable_ha=True, ip_version=constants.IP_VERSION_6) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet(self): self.agent.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') self._router_lifecycle(enable_ha=True, ip_version=constants.IP_VERSION_6, v6_ext_gw_with_sub=False) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet_for_router_advts(self): # Verify that router gw interface is configured to receive Router # Advts from upstream router when no external gateway is configured. self._router_lifecycle(enable_ha=True, dual_stack=True, v6_ext_gw_with_sub=False) def _test_ipv6_router_advts_and_fwd_helper(self, state, enable_v6_gw, expected_ra, expected_forwarding): # Schedule router to l3 agent, and then add router gateway. Verify # that router gw interface is configured to receive Router Advts and # IPv6 forwarding is enabled. router_info = l3_test_common.prepare_router_data( enable_snat=True, enable_ha=True, dual_stack=True, enable_gw=False) router = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router, 'primary') if state == 'backup': self.fail_ha_router(router) self.wait_until_ha_router_has_state(router, 'backup') _ext_dev_name, ex_port = l3_test_common.prepare_ext_gw_test( mock.Mock(), router, dual_stack=enable_v6_gw) router_info['gw_port'] = ex_port router.process() self._assert_ipv6_accept_ra(router, expected_ra) # As router is going first to primary and than to backup mode, # ipv6_forwarding should be enabled on "all" interface always after # that transition self._assert_ipv6_forwarding(router, expected_forwarding, True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ipv6_router_advts_and_fwd_after_router_state_change_primary(self): # Check that RA and forwarding are enabled when there's no IPv6 # gateway. self._test_ipv6_router_advts_and_fwd_helper('primary', enable_v6_gw=False, expected_ra=True, expected_forwarding=True) # Check that RA is disabled and forwarding is enabled when an IPv6 # gateway is configured. self._test_ipv6_router_advts_and_fwd_helper('primary', enable_v6_gw=True, expected_ra=False, expected_forwarding=True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ipv6_router_advts_and_fwd_after_router_state_change_backup(self): # Check that both RA and forwarding are disabled on backup instances self._test_ipv6_router_advts_and_fwd_helper('backup', enable_v6_gw=False, expected_ra=False, expected_forwarding=False) self._test_ipv6_router_advts_and_fwd_helper('backup', enable_v6_gw=True, expected_ra=False, expected_forwarding=False) def test_keepalived_configuration(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) expected = self.get_expected_keepalive_configuration(router) self.assertEqual(expected, router.keepalived_manager.get_conf_on_disk()) # Add a new FIP and change the GW IP address router.router = copy.deepcopy(router.router) existing_fip = '19.4.4.2' new_fip = '19.4.4.3' self._add_fip(router, new_fip) subnet_id = framework._uuid() fixed_ips = [{'ip_address': '19.4.4.10', 'prefixlen': 24, 'subnet_id': subnet_id}] subnets = [{'id': subnet_id, 'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.5'}] router.router['gw_port']['subnets'] = subnets router.router['gw_port']['fixed_ips'] = fixed_ips router.process() # Get the updated configuration and assert that both FIPs are in, # and that the GW IP address was updated. new_config = router.keepalived_manager.config.get_config_str() old_gw = '0.0.0.0/0 via 19.4.4.1' new_gw = '0.0.0.0/0 via 19.4.4.5' old_external_device_ip = '19.4.4.4' new_external_device_ip = '19.4.4.10' self.assertIn(existing_fip, new_config) self.assertIn(new_fip, new_config) self.assertNotIn(old_gw, new_config) self.assertIn(new_gw, new_config) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self.assertNotIn('%s/24 dev %s' % (old_external_device_ip, external_device_name), new_config) self.assertIn('%s/24 dev %s' % (new_external_device_ip, external_device_name), new_config) def test_ha_router_conf_on_restarted_agent(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self._add_fip(router1, '192.168.111.12') restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) common_utils.wait_until_true( lambda: self.floating_ips_configured(router1)) self.assertIn( router1._get_primary_vip(), self._get_addresses_on_device( router1.ns_name, router1.get_ha_device_name())) def test_ha_router_ipv6_radvd_status(self): router_info = self.generate_router_info( ip_version=constants.IP_VERSION_6, enable_ha=True) router1 = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router1, 'primary') common_utils.wait_until_true(lambda: router1.radvd.enabled) def _check_lla_status(router, expected): internal_devices = router.router[constants.INTERFACE_KEY] for device in internal_devices: lladdr = ip_lib.get_ipv6_lladdr(device['mac_address']) exists = ip_lib.device_exists_with_ips_and_mac( router.get_internal_device_name(device['id']), [lladdr], device['mac_address'], router.ns_name) self.assertEqual(expected, exists) _check_lla_status(router1, True) device_name = router1.get_ha_device_name() ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name) ha_device.link.set_down() self.wait_until_ha_router_has_state(router1, 'backup') common_utils.wait_until_true( lambda: not router1.radvd.enabled, timeout=10) _check_lla_status(router1, False) def test_ha_router_process_ipv6_subnets_to_existing_port(self): router_info = self.generate_router_info(enable_ha=True, ip_version=constants.IP_VERSION_6) router = self.manage_router(self.agent, router_info) def verify_ip_in_keepalived_config(router, iface): config = router.keepalived_manager.config.get_config_str() ip_cidrs = common_utils.fixed_ip_cidrs(iface['fixed_ips']) for ip_addr in ip_cidrs: self.assertIn(ip_addr, config) interface_id = router.router[constants.INTERFACE_KEY][0]['id'] slaac = constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} # Add a second IPv6 subnet to the router internal interface. self._add_internal_interface_by_subnet(router.router, count=1, ip_version=constants.IP_VERSION_6, ipv6_subnet_modes=[slaac_mode], interface_id=interface_id) router.process() self.wait_until_ha_router_has_state(router, 'primary') # Verify that router internal interface is present and is configured # with IP address from both the subnets. internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(2, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) # Remove one subnet from the router internal iface interfaces = copy.deepcopy(router.router.get( constants.INTERFACE_KEY, [])) fixed_ips, subnets = [], [] fixed_ips.append(interfaces[0]['fixed_ips'][0]) subnets.append(interfaces[0]['subnets'][0]) interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets}) router.router[constants.INTERFACE_KEY] = interfaces router.process() # Verify that router internal interface has a single ipaddress internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(1, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) def test_delete_external_gateway_on_standby_router(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router, 'primary') self.fail_ha_router(router) self.wait_until_ha_router_has_state(router, 'backup') # The purpose of the test is to simply make sure no exception is raised port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) router.external_gateway_removed(port, interface_name) def test_removing_floatingip_immediately(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) ex_gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_interface_name(ex_gw_port) self.wait_until_ha_router_has_state(router, 'primary') self._add_fip(router, '172.168.1.20', fixed_address='10.0.0.3') router.process() router.router[constants.FLOATINGIP_KEY] = [] # The purpose of the test is to simply make sure no exception is raised # Because router.process will consume the FloatingIpSetupException, # call the configure_fip_addresses directly here router.configure_fip_addresses(interface_name) def test_ha_port_status_update(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router1 = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router1, 'backup') router1.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router1.router) self.wait_until_ha_router_has_state(router1, 'primary') def test_ha_router_namespace_has_ip_nonlocal_bind_disabled(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) try: ip_nonlocal_bind_value = ip_lib.get_ip_nonlocal_bind( router.router_namespace.name) except RuntimeError as rte: stat_message = 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind' if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network namespaces." % ( ip_lib.IP_NONLOCAL_BIND)) raise self.assertEqual(0, ip_nonlocal_bind_value) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_addr_gen_mode(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) def check_gw_lla_status(expected): lladdr = ip_lib.get_ipv6_lladdr( external_port['mac_address']) exists = ip_lib.device_exists_with_ips_and_mac( external_device_name, [lladdr], external_port['mac_address'], router.ns_name) self.assertEqual(expected, exists) self.wait_until_ha_router_has_state(router, 'backup') self._wait_until_addr_gen_mode_has_state( router.ns_name, 1) check_gw_lla_status(False) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) self.wait_until_ha_router_has_state(router, 'primary') self._wait_until_addr_gen_mode_has_state( router.ns_name, 1) check_gw_lla_status(True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_namespace_has_ipv6_forwarding_disabled(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self.wait_until_ha_router_has_state(router, 'backup') self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, 0) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) self.wait_until_ha_router_has_state(router, 'primary') self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, 1) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_without_gw_ipv6_forwarding_state(self): router_info = self.generate_router_info( enable_ha=True, enable_gw=False) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) self.wait_until_ha_router_has_state(router, 'backup') self._wait_until_ipv6_forwarding_has_state(router.ns_name, 'all', 0) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) self.wait_until_ha_router_has_state(router, 'primary') self._wait_until_ipv6_forwarding_has_state(router.ns_name, 'all', 1) def test_router_interface_mtu_update(self): original_mtu = 1450 router_info = self.generate_router_info(False) router_info['_interfaces'][0]['mtu'] = original_mtu router_info['gw_port']['mtu'] = original_mtu router = self.manage_router(self.agent, router_info) interface_name = router.get_internal_device_name( router_info['_interfaces'][0]['id']) gw_interface_name = router.get_external_device_name( router_info['gw_port']['id']) self.assertEqual( original_mtu, ip_lib.IPDevice(interface_name, router.ns_name).link.mtu) self.assertEqual( original_mtu, ip_lib.IPDevice(gw_interface_name, router.ns_name).link.mtu) updated_mtu = original_mtu + 1 router_info_copy = copy.deepcopy(router_info) router_info_copy['_interfaces'][0]['mtu'] = updated_mtu router_info_copy['gw_port']['mtu'] = updated_mtu self.agent._process_updated_router(router_info_copy) self.assertEqual( updated_mtu, ip_lib.IPDevice(interface_name, router.ns_name).link.mtu) self.assertEqual( updated_mtu, ip_lib.IPDevice(gw_interface_name, router.ns_name).link.mtu) def test_ha_router_update_ecmp_routes(self): dest_cidr = '8.8.8.0/24' nexthop1 = '19.4.4.4' nexthop2 = '19.4.4.5' router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) router.router['routes'] = [ {'destination': dest_cidr, 'nexthop': nexthop1}, {'destination': dest_cidr, 'nexthop': nexthop2}] self.agent._process_updated_router(router.router) config = router.keepalived_manager.config.get_config_str() self.assertIn(dest_cidr, config) self.assertIn(nexthop1, config) self.assertIn(nexthop2, config) # Delete one route router.router['routes'] = [ {'destination': dest_cidr, 'nexthop': nexthop1}] self.agent._process_updated_router(router.router) config = router.keepalived_manager.config.get_config_str() self.assertIn(dest_cidr, config) self.assertIn(nexthop1, config) self.assertNotIn(nexthop2, config)
def test_enabled(self): self.mock_read.return_value = "0" enabled = netutils.is_ipv6_enabled() self.assertTrue(enabled)
from oslo_utils import netutils print(netutils.escape_ipv6('fe80::f493:20ff:fe5b:6cf')) print( netutils.get_ipv6_addr_by_EUI64('fe80::d480:b0ff:fe33:1543/64', 'f2:2c:d8:c3:73:fb')) print(netutils.get_my_ipv4()) print(netutils.is_ipv6_enabled()) print(netutils.is_valid_cidr('10.10.10.10/24')) code_list = [] for n in range(-5, 5): code_list.append(netutils.is_valid_icmp_code(n)) print(code_list) print( netutils.urlsplit( 'https://foxfox.mybluemix.net.com:8443/index.html?auto=off')) # SplitResult(scheme='https', netloc='foxfox.mybluemix.net.com:8443', path='/index.html', query='auto=off', fragment='')
def test_disabled(self): self.mock_read.return_value = "1" enabled = netutils.is_ipv6_enabled() self.assertFalse(enabled)
def run(self): """Run the Ironic Python Agent.""" # Get the UUID so we can heartbeat to Ironic. Raises LookupNodeError # if there is an issue (uncaught, restart agent) self.started_at = _time() # Cached hw managers at runtime, not load time. See bug 1490008. hardware.load_managers() # Operator-settable delay before hardware actually comes up. # Helps with slow RAID drivers - see bug 1582797. if self.hardware_initialization_delay > 0: LOG.info('Waiting %d seconds before proceeding', self.hardware_initialization_delay) time.sleep(self.hardware_initialization_delay) if not self.standalone: # Inspection should be started before call to lookup, otherwise # lookup will fail due to unknown MAC. uuid = None if cfg.CONF.inspection_callback_url: uuid = inspector.inspect() if self.api_url: self._wait_for_interface() content = self.api_client.lookup_node( hardware_info=hardware.dispatch_to_managers( 'list_hardware_info'), timeout=self.lookup_timeout, starting_interval=self.lookup_interval, node_uuid=uuid) LOG.debug('Received lookup results: %s', content) self.node = content['node'] LOG.info('Lookup succeeded, node UUID is %s', self.node['uuid']) hardware.cache_node(self.node) self.heartbeat_timeout = content['config']['heartbeat_timeout'] # Update config with values from Ironic config = content.get('config', {}) if config.get('metrics'): for opt, val in config.items(): setattr(cfg.CONF.metrics, opt, val) if config.get('metrics_statsd'): for opt, val in config.items(): setattr(cfg.CONF.metrics_statsd, opt, val) elif cfg.CONF.inspection_callback_url: LOG.info('No ipa-api-url configured, Heartbeat and lookup ' 'skipped for inspector.') else: LOG.error('Neither ipa-api-url nor inspection_callback_url' 'found, please check your pxe append parameters.') if netutils.is_ipv6_enabled(): # Listens to both IP versions, assuming IPV6_V6ONLY isn't enabled, # (the default behaviour in linux) simple_server.WSGIServer.address_family = socket.AF_INET6 wsgi = simple_server.make_server( self.listen_address.hostname, self.listen_address.port, self.api, server_class=simple_server.WSGIServer) if not self.standalone and self.api_url: # Don't start heartbeating until the server is listening self.heartbeater.start() try: wsgi.serve_forever() except BaseException: LOG.exception('shutting down') if not self.standalone and self.api_url: self.heartbeater.stop()