def dynamic_ipv6_iface(): if running_on_ovirt_ci(): pytest.skip('Using dnsmasq for ipv6 RA is unstable on CI') with veth_pair() as (server, client): ipwrapper.addrAdd(server, IPV6_ADDR1, IPV6_PREFIX_LENGTH, family=6) ipwrapper.linkSet(server, ['up']) with dnsmasq_run(server, ipv6_slaac_prefix=IPV6_NET_ADDR): with wait_for_ipv6(client): ipwrapper.linkSet(client, ['up']) yield client
def dynamic_ipv6_iface(): if running_on_ovirt_ci(): pytest.skip('Using dnsmasq for ipv6 RA is unstable on CI') with veth_pair() as (server, client): with wait_for_ipv6(server, IPV6_ADDR1, IPV6_PREFIX_LENGTH): Interface.from_existing_dev_name(server).add_ip( IPV6_ADDR1, IPV6_PREFIX_LENGTH, IpFamily.IPv6) client_interface = Interface.from_existing_dev_name(client) client_interface.down() with dnsmasq_run(server, ipv6_slaac_prefix=IPV6_NET_ADDR): with wait_for_ipv6(client): client_interface.up() yield client
class TestNetinfo(object): def test_speed_on_an_iface_that_does_not_support_speed(self): assert nic.speed('lo') == 0 def test_speed_in_range(self): for d in nics.nics(): s = nic.speed(d) assert not s < 0 assert s in ETHTOOL_SPEEDS or s == 0 @mock.patch.object(ipwrapper.Link, '_fakeNics', ['veth_*', 'dummy_*']) def test_fake_nics(self): with veth_pair() as (v1a, v1b): with dummy_device() as d1: fakes = set([d1, v1a, v1b]) _nics = nics.nics() errmsg = 'Fake devices {} are not listed in nics {}' assert fakes.issubset(_nics), errmsg.format(fakes, _nics) with veth_pair(prefix='mehv_') as (v2a, v2b): with dummy_device(prefix='mehd_') as d2: hiddens = set([d2, v2a, v2b]) _nics = nics.nics() errmsg = 'Some of hidden devices {} is shown in nics {}' assert not hiddens.intersection(_nics), errmsg.format( hiddens, _nics) @pytest.mark.xfail( condition=running_on_ovirt_ci(), raises=AssertionError, reason='Bond options scanning is fragile on CI', strict=False, ) def test_get_bonding_options(self, bond_module): INTERVAL = '12345' bond_name = random_iface_name() if not bond_module: pytest.skip('Bonding is not available') with open(BONDING_MASTERS, 'w') as bonds: bonds.write('+' + bond_name) bonds.flush() try: # no error is anticipated but let's make sure we can clean up assert self._bond_opts_without_mode(bond_name) == {}, ( 'This test fails when a new bonding option is added to ' 'the kernel. Please run vdsm-tool dump-bonding-options` ' 'and retest.') with open(bonding.BONDING_OPT % (bond_name, 'miimon'), 'w') as opt: opt.write(INTERVAL) assert self._bond_opts_without_mode(bond_name) == { 'miimon': INTERVAL } finally: bonds.write('-' + bond_name) @staticmethod def _bond_opts_without_mode(bond_name): opts = Bond(bond_name).options opts.pop('mode') return opts @ipv6_broken_on_travis_ci def test_ip_info(self, nic0): nic0_interface = Interface.from_existing_dev_name(nic0) with waitfor.waitfor_ipv4_addr(nic0, address=IPV4_ADDR1_CIDR): nic0_interface.add_ip(IPV4_ADDR1, IPV4_PREFIX_LENGTH, IpFamily.IPv4) with waitfor.waitfor_ipv4_addr(nic0, address=IPV4_ADDR2_CIDR): nic0_interface.add_ip(IPV4_ADDR2, IPV4_PREFIX_LENGTH, IpFamily.IPv4) with waitfor.waitfor_ipv6_addr(nic0, address=IPV6_ADDR_CIDR): nic0_interface.add_ip(IPV6_ADDR, IPV6_PREFIX_LENGTH, IpFamily.IPv6) # 32 bit addresses are reported slashless by netlink with waitfor.waitfor_ipv4_addr(nic0, address=IPV4_ADDR3): nic0_interface.add_ip(IPV4_ADDR3, 32, IpFamily.IPv4) assert addresses.getIpInfo(nic0) == ( IPV4_ADDR1, IPV4_NETMASK, [IPV4_ADDR1_CIDR, IPV4_ADDR2_CIDR, IPV4_ADDR3_CIDR], [IPV6_ADDR_CIDR], ) assert addresses.getIpInfo(nic0, ipv4_gateway=IPV4_GATEWAY1) == ( IPV4_ADDR1, IPV4_NETMASK, [IPV4_ADDR1_CIDR, IPV4_ADDR2_CIDR, IPV4_ADDR3_CIDR], [IPV6_ADDR_CIDR], ) assert addresses.getIpInfo(nic0, ipv4_gateway=IPV4_GATEWAY2) == ( IPV4_ADDR2, IPV4_NETMASK, [IPV4_ADDR1_CIDR, IPV4_ADDR2_CIDR, IPV4_ADDR3_CIDR], [IPV6_ADDR_CIDR], ) @pytest.mark.parametrize( "ip_addr, ip_netmask", [ pytest.param(IPV4_ADDR1, IPV4_NETMASK, id="IPV4"), pytest.param( IPV6_ADDR, IPV6_PREFIX_LENGTH, id="IPV6", marks=ipv6_broken_on_travis_ci, ), ], ) def test_routes_device_to(self, ip_addr, ip_netmask, nic0): addr_in_net = ipaddress.ip_address(ip_addr) + 1 ip_version = addr_in_net.version Interface.from_existing_dev_name(nic0).add_ip(ip_addr, ip_netmask, family=ip_version) assert routes.getRouteDeviceTo(str(addr_in_net)) == nic0
class TestNetlinkEventMonitor(object): TIMEOUT = 5 def test_iterate_after_events(self): with monitor.object_monitor(timeout=self.TIMEOUT) as mon: dummy = Dummy() dummy_name = dummy.create() dummy.remove() for event in mon: if event.get('name') == dummy_name: break def test_iterate_while_events(self): """Tests if monitor is able to catch event while iterating. Before the iteration we start _set_and_remove_device, which is delayed for .2 seconds. Then iteration starts and wait for new dummy. """ dummy = Dummy() dummy_name = dummy.create() def _set_and_remove_device(): time.sleep(0.2) dummy.up() dummy.remove() with monitor.object_monitor(timeout=self.TIMEOUT) as mon: add_device_thread = _start_thread(_set_and_remove_device) for event in mon: if event.get('name') == dummy_name: break add_device_thread.join() def test_stopped(self): with monitor.object_monitor(timeout=self.TIMEOUT) as mon: dummy = Dummy() dummy_name = dummy.create() dummy.remove() found = any(event.get('name') == dummy_name for event in mon) assert found, 'Expected event was not caught.' def test_event_groups(self): with monitor.object_monitor(timeout=self.TIMEOUT, groups=('ipv4-ifaddr', )) as mon_a: with monitor.object_monitor(timeout=self.TIMEOUT, groups=('link', 'ipv4-route')) as mon_l_r: dummy = Dummy() dummy.create() dummy.set_ip(IP_ADDRESS, IP_CIDR) dummy.up() dummy.remove() for event in mon_a: assert '_addr' in event['event'], ("Caught event '%s' is not " "related to address." % event['event']) for event in mon_l_r: link_or_route = ('_link' in event['event'] or '_route' in event['event']) assert link_or_route, ("Caught event '%s' is not related " "to link or route." % event['event']) def test_iteration(self): with monitor.object_monitor(timeout=self.TIMEOUT) as mon: iterator = iter(mon) # Generate events to avoid blocking dummy = Dummy() dummy.create() next(iterator) dummy.remove() next(iterator) with pytest.raises(StopIteration): while True: next(iterator) @pytest.mark.xfail( condition=running_on_ovirt_ci(), raises=AssertionError, reason='Sometimes we miss some events on CI', strict=False, ) def test_events_keys(self): def _simplify_event(event): """ Strips event keys except event, address, name, destination, family. """ allow = set(['event', 'address', 'name', 'destination', 'family']) return {k: v for (k, v) in event.items() if k in allow} def _expected_events(nic, address, cidr): events_add = [ { 'event': 'new_link', 'name': nic }, { 'event': 'new_addr', 'address': address + '/' + cidr }, { 'event': 'new_link', 'name': nic }, ] events_del = [ { 'address': address + '/' + cidr, 'event': 'del_addr' }, { 'destination': address, 'event': 'del_route' }, { 'event': 'del_link', 'name': nic }, ] events_ipv6 = [ { 'event': 'new_addr', 'family': 'inet6' }, { 'event': 'del_addr', 'family': 'inet6' }, ] if is_disabled_ipv6(): return deque(events_add + events_del) else: return deque(events_add + events_ipv6 + events_del) with monitor.object_monitor(timeout=self.TIMEOUT, silent_timeout=True) as mon: dummy = Dummy() dummy_name = dummy.create() dummy.set_ip(IP_ADDRESS, IP_CIDR) dummy.up() dummy.remove() expected_events = _expected_events(dummy_name, IP_ADDRESS, IP_CIDR) _expected = list(expected_events) _caught = [] expected = expected_events.popleft() for event in mon: _caught.append(event) if _is_subdict(expected, event): expected = expected_events.popleft() if len(expected_events) == 0: break assert 0 == len(expected_events), ( 'Expected events have not ' 'been caught (in the right order).\n' 'Expected:\n%s.\nCaught:\n%s.' % ( '\n'.join([str(d) for d in _expected]), '\n'.join([str(_simplify_event(d)) for d in _caught]), ), ) def test_timeout(self): with pytest.raises(monitor.MonitorError): try: with monitor.object_monitor(timeout=0.01) as mon: for event in mon: pass except monitor.MonitorError as e: assert e.args[0] == monitor.E_TIMEOUT raise assert mon.is_stopped() def test_timeout_silent(self): with monitor.object_monitor(timeout=0.01, silent_timeout=True) as mon: for event in mon: pass assert mon.is_stopped() def test_timeout_not_triggered(self): time_start = monotonic_time() with monitor.object_monitor(timeout=self.TIMEOUT) as mon: dummy = Dummy() dummy.create() dummy.remove() for event in mon: break assert (monotonic_time() - time_start) <= self.TIMEOUT assert mon.is_stopped() def test_passing_invalid_groups(self): with pytest.raises(AttributeError): monitor.object_monitor(groups=('blablabla', )) monitor.object_monitor(groups=('link', )) def test_ifla_event(self, bond): slaves = iter(bond.slaves) bond.set_options({'mode': '1'}) bond.up() bond.set_options({'active_slave': next(slaves)}) with monitor.ifla_monitor(timeout=self.TIMEOUT, groups=('link', )) as mon: bond.set_options({'active_slave': next(slaves)}) for event in mon: if event.get('IFLA_EVENT') == 'IFLA_EVENT_BONDING_FAILOVER': break
class TestReuseBond(object): def test_detach_used_bond_from_bridge(self, adapter, switch, nic0): NETCREATE = { NETWORK1_NAME: { 'bonding': BOND_NAME, 'switch': switch }, NETWORK2_NAME: { 'bonding': BOND_NAME, 'vlan': VLAN2, 'switch': switch, }, } BONDCREATE = {BOND_NAME: {'nics': [nic0], 'switch': switch}} with adapter.setupNetworks(NETCREATE, BONDCREATE, NOCHK): NETEDIT = { NETWORK1_NAME: { 'bonding': BOND_NAME, 'vlan': VLAN1, 'switch': switch, } } adapter.setupNetworks(NETEDIT, {}, NOCHK) adapter.assertBond(BOND_NAME, BONDCREATE[BOND_NAME]) @nftestlib.parametrize_bridged def test_add_vlaned_network_on_existing_bond(self, adapter, switch, bridged, nic0): NETBASE = { NETWORK1_NAME: { 'bonding': BOND_NAME, 'bridged': False, 'switch': switch, } } BONDBASE = {BOND_NAME: {'nics': [nic0], 'switch': switch}} with adapter.setupNetworks(NETBASE, BONDBASE, NOCHK): with nftestlib.monitor_stable_link_state(BOND_NAME): NETVLAN = { NETWORK2_NAME: { 'bonding': BOND_NAME, 'bridged': bridged, 'vlan': VLAN1, 'switch': switch, } } with adapter.setupNetworks(NETVLAN, {}, NOCHK): adapter.assertNetwork(NETWORK1_NAME, NETBASE[NETWORK1_NAME]) adapter.assertNetwork(NETWORK2_NAME, NETVLAN[NETWORK2_NAME]) @pytest.mark.xfail( reason='Unstable on oVirt CI', strict=False, condition=running_on_ovirt_ci(), ) def test_add_net_on_existing_external_bond_preserving_mac( self, adapter, switch, nic0, nic1): bond = bond_without_remove(slaves=[nic0, nic1]) Interface.from_existing_dev_name(bond).set_mac_address(HWADDRESS) NETBASE = { NETWORK1_NAME: { 'bonding': bond, 'bridged': False, 'switch': switch, } } with adapter.setupNetworks(NETBASE, {}, NOCHK): adapter.assertNetwork(NETWORK1_NAME, NETBASE[NETWORK1_NAME]) adapter.assertBond( bond, { 'nics': [nic0, nic1], 'hwaddr': HWADDRESS, 'switch': switch }, ) adapter.setupNetworks({}, {bond: {'remove': True}}, NOCHK)
IPv6_CIDR = '64' IPv4_NETMASK = '255.255.255.0' IPv6_ADDRESS_AND_PREFIX_LEN = IPv6_ADDRESS2 + '/' + IPv6_CIDR DHCPv4_RANGE_FROM = '192.0.3.2' DHCPv4_RANGE_TO = '192.0.3.253' DHCPv6_RANGE_FROM = 'fdb3:84e5:4ff4:55e3::a' DHCPv6_RANGE_TO = 'fdb3:84e5:4ff4:55e3::64' IPv4_DNS = ['1.1.1.1', '2.2.2.2'] unstable_dhcpv6_on_ovirt_ci = pytest.mark.xfail( reason='Unstable DHCPv6 response on oVirt CI', raises=nftestlib.MissingDynamicIPv6Address, strict=False, condition=running_on_ovirt_ci(), ) class NetworkIPConfig(object): def __init__( self, name, ipv4_address=None, ipv4_prefix_length=None, ipv6_address=None, ipv6_prefix_length=None, ): self.name = name self.ipv4_address = ipv4_address self.ipv4_prefix_length = ipv4_prefix_length
class TestConfigureOutbound(object): # TODO: # test remove_outbound def test_single_non_vlan(self, dummy): qos.configure_outbound(HOST_QOS_OUTBOUND, dummy, None) tc_entities = self._analyse_qos_and_general_assertions(dummy) tc_classes, tc_filters, tc_qdiscs = tc_entities assert tc_classes.classes == [] assert len(tc_qdiscs.leaf_qdiscs) == 1 assert self._non_vlan_qdisc(tc_qdiscs.leaf_qdiscs) is not None self._assert_parent(tc_qdiscs.leaf_qdiscs, tc_classes.default_class) assert len(tc_filters.tagged_filters) == 0 @pytest.mark.xfail( condition=running_on_ovirt_ci() or running_on_travis_ci(), reason='does not work on CI with nmstate', strict=False, ) @pytest.mark.parametrize('repeating_calls', [1, 2]) @mock.patch('vdsm.network.netinfo.bonding.permanent_address', lambda: {}) def test_single_vlan(self, dummy, vlan16, repeating_calls): for _ in range(repeating_calls): qos.configure_outbound(HOST_QOS_OUTBOUND, dummy, VLAN16_TAG) tc_entities = self._analyse_qos_and_general_assertions(dummy) tc_classes, tc_filters, tc_qdiscs = tc_entities assert len(tc_classes.classes) == 1 assert len(tc_qdiscs.leaf_qdiscs) == 2 vlan_qdisc = self._vlan_qdisc(tc_qdiscs.leaf_qdiscs, VLAN16_TAG) vlan_class = self._vlan_class(tc_classes.classes, VLAN16_TAG) self._assert_parent([vlan_qdisc], vlan_class) tag_filters = tc_filters.tagged_filters assert len(tag_filters) == 1 assert int(tag_filters[0]['basic']['value']) == VLAN16_TAG @pytest.mark.xfail( condition=running_on_ovirt_ci() or running_on_travis_ci(), reason='does not work on CI with nmstate', strict=False, ) @mock.patch('vdsm.network.netinfo.bonding.permanent_address', lambda: {}) def test_multiple_vlans(self, dummy, vlan16, vlan17): for vlan_tag in (VLAN16_TAG, VLAN17_TAG): qos.configure_outbound(HOST_QOS_OUTBOUND, dummy, vlan_tag) tc_entities = self._analyse_qos_and_general_assertions(dummy) tc_classes, tc_filters, tc_qdiscs = tc_entities assert len(tc_classes.classes) == 2 assert len(tc_qdiscs.leaf_qdiscs) == 3 v1_qdisc = self._vlan_qdisc(tc_qdiscs.leaf_qdiscs, VLAN16_TAG) v2_qdisc = self._vlan_qdisc(tc_qdiscs.leaf_qdiscs, VLAN17_TAG) v1_class = self._vlan_class(tc_classes.classes, VLAN16_TAG) v2_class = self._vlan_class(tc_classes.classes, VLAN17_TAG) self._assert_parent([v1_qdisc], v1_class) self._assert_parent([v2_qdisc], v2_class) assert len(tc_filters.tagged_filters) == 2 current_tagged_filters_flow_id = set( f['basic']['flowid'] for f in tc_filters.tagged_filters) expected_flow_ids = set('%s%x' % (qos._ROOT_QDISC_HANDLE, vlan_tag) for vlan_tag in (VLAN16_TAG, VLAN17_TAG)) assert current_tagged_filters_flow_id == expected_flow_ids @requires_iperf3 @pytest.mark.xfail(reason='Not maintained stress test', run=False) def test_iperf_upper_limit(self, requires_tc): # Upper limit is not an accurate measure. This is because it converges # over time and depends on current machine hardware (CPU). # Hence, it is hard to make hard assertions on it. The test should run # at least 60 seconds (the longer the better) and the user should # inspect the computed average rate and optionally the additional # traffic data that was collected in client.out in order to be # convinced QOS is working properly. limit_kbps = 1000 # 1 Mbps (in kbps) server_ip = '192.0.2.1' client_ip = '192.0.2.10' qos_out = {'ul': {'m2': limit_kbps}, 'ls': {'m2': limit_kbps}} # using a network namespace is essential since otherwise the kernel # short-circuits the traffic and bypasses the veth devices and the # classfull qdisc. with network_namespace( 'server_ns') as ns, bridge_device() as bridge, veth_pair() as ( server_peer, server_dev, ), veth_pair() as ( client_dev, client_peer, ): # iperf server and its veth peer lie in a separate network # namespace link_set_netns(server_dev, ns) bridge.add_port(server_peer) bridge.add_port(client_peer) netns_exec(ns, ['ip', 'link', 'set', 'dev', server_dev, 'up']) Interface.from_existing_dev_name(client_dev).add_ip( client_ip, 24, IpFamily.IPv4) netns_exec( ns, [ 'ip', '-4', 'addr', 'add', 'dev', server_dev, '%s/24' % server_ip, ], ) qos.configure_outbound(qos_out, client_peer, None) with running(IperfServer(server_ip, network_ns=ns)): client = IperfClient(server_ip, client_ip, test_time=60) client.start() max_rate = max([ float(interval['streams'][0]['bits_per_second']) // (2**10) for interval in client.out['intervals'] ]) assert 0 < max_rate < limit_kbps * 1.5 def _analyse_qos_and_general_assertions(self, device_name): tc_classes = self._analyse_classes(device_name) tc_qdiscs = self._analyse_qdiscs(device_name) tc_filters = self._analyse_filters(device_name) self._assertions_on_classes(tc_classes.classes, tc_classes.default_class, tc_classes.root_class) self._assertions_on_qdiscs(tc_qdiscs.ingress_qdisc, tc_qdiscs.root_qdisc) self._assertions_on_filters(tc_filters.untagged_filters, tc_filters.tagged_filters) return tc_classes, tc_filters, tc_qdiscs def _analyse_classes(self, device_name): all_classes = list(tc.classes(device_name)) root_class = self._root_class(all_classes) default_class = self._default_class(all_classes) all_classes.remove(root_class) all_classes.remove(default_class) return TcClasses(all_classes, default_class, root_class) def _analyse_qdiscs(self, device_name): all_qdiscs = list(tc.qdiscs(device_name)) ingress_qdisc = self._ingress_qdisc(all_qdiscs) root_qdisc = self._root_qdisc(all_qdiscs) leaf_qdiscs = self._leaf_qdiscs(all_qdiscs) assert len(leaf_qdiscs) + 2 == len(all_qdiscs) return TcQdiscs(leaf_qdiscs, ingress_qdisc, root_qdisc) def _analyse_filters(self, device_name): filters = list(tc._filters(device_name)) untagged_filters = self._untagged_filters(filters) tagged_filters = self._tagged_filters(filters) return TcFilters(untagged_filters, tagged_filters) def _assertions_on_classes(self, all_classes, default_class, root_class): assert all( cls.get('kind') == qos._SHAPING_QDISC_KIND for cls in all_classes), str(all_classes) self._assertions_on_root_class(root_class) self._assertions_on_default_class(default_class) if not all_classes: # only a default class self._assert_upper_limit(default_class) else: for cls in all_classes: self._assert_upper_limit(cls) def _assertions_on_qdiscs(self, ingress_qdisc, root_qdisc): assert root_qdisc['kind'] == qos._SHAPING_QDISC_KIND self._assert_root_handle(root_qdisc) assert ingress_qdisc['handle'] == tc.QDISC_INGRESS def _assertions_on_filters(self, untagged_filters, tagged_filters): assert all(f['protocol'] == 'all' for f in tagged_filters) self._assert_parent_handle(tagged_filters + untagged_filters, qos._ROOT_QDISC_HANDLE) assert len(untagged_filters) == 1, untagged_filters assert untagged_filters[0]['protocol'] == 'all' def _assert_upper_limit(self, default_class): dclass = default_class[qos._SHAPING_QDISC_KIND]['ul']['m2'] assert dclass == HOST_QOS_OUTBOUND['ul']['m2'] def _assertions_on_default_class(self, default_class): self._assert_parent_handle([default_class], qos._ROOT_QDISC_HANDLE) assert default_class['leaf'] == DEFAULT_CLASSID + ':' dclass = default_class[qos._SHAPING_QDISC_KIND]['ls'] assert dclass == HOST_QOS_OUTBOUND['ls'] def _assertions_on_root_class(self, root_class): assert root_class is not None self._assert_root_handle(root_class) def _assert_root_handle(self, entity): assert entity['handle'] == qos._ROOT_QDISC_HANDLE def _assert_parent(self, entities, parent): assert all(e['parent'] == parent['handle'] for e in entities) def _assert_parent_handle(self, entities, parent_handle): assert all(e['parent'] == parent_handle for e in entities) def _root_class(self, classes): return _find_entity(lambda c: c.get('root'), classes) def _default_class(self, classes): default_cls_handle = qos._ROOT_QDISC_HANDLE + DEFAULT_CLASSID return _find_entity(lambda c: c['handle'] == default_cls_handle, classes) def _ingress_qdisc(self, qdiscs): return _find_entity(lambda q: q['kind'] == 'ingress', qdiscs) def _root_qdisc(self, qdiscs): return _find_entity(lambda q: q.get('root'), qdiscs) def _leaf_qdiscs(self, qdiscs): return [ qdisc for qdisc in qdiscs if qdisc['kind'] == qos._FAIR_QDISC_KIND ] def _untagged_filters(self, filters): predicate = lambda f: f.get('u32', {}).get('match', {}) == { 'mask': 0, 'value': 0, 'offset': 0, } return list(f for f in filters if predicate(f)) def _tagged_filters(self, filters): def tagged(f): return f.get('basic', {}).get('object') == 'vlan' return list(f for f in filters if tagged(f)) def _vlan_qdisc(self, qdiscs, vlan_tag): handle = '%x:' % vlan_tag return _find_entity(lambda q: q['handle'] == handle, qdiscs) def _vlan_class(self, classes, vlan_tag): handle = qos._ROOT_QDISC_HANDLE + '%x' % vlan_tag return _find_entity(lambda c: c['handle'] == handle, classes) def _non_vlan_qdisc(self, qdiscs): handle = DEFAULT_CLASSID + ':' return _find_entity(lambda q: q['handle'] == handle, qdiscs)
class TestBridge(object): @nftestlib.parametrize_switch def test_add_bridge_with_stp(self, adapter, switch, nic0): if switch == 'ovs': pytest.xfail('stp is currently not implemented for ovs') NETCREATE = { NETWORK_NAME: { 'nic': nic0, 'switch': switch, 'stp': True } } with adapter.setupNetworks(NETCREATE, {}, nftestlib.NOCHK): adapter.assertNetworkExists(NETWORK_NAME) adapter.assertNetworkBridged(NETWORK_NAME) adapter.assertBridgeOpts(NETWORK_NAME, NETCREATE[NETWORK_NAME]) @nftestlib.parametrize_legacy_switch def test_add_bridge_with_custom_opts(self, adapter, switch, nic0): NET_ATTRS = { 'nic': nic0, 'switch': switch, 'custom': { 'bridge_opts': 'multicast_snooping=0 multicast_router=0' }, } NETCREATE = {NETWORK_NAME: NET_ATTRS} with adapter.setupNetworks(NETCREATE, {}, nftestlib.NOCHK): adapter.assertBridgeOpts(NETWORK_NAME, NET_ATTRS) @pytest.mark.xfail( reason='Unstable on oVirt CI', strict=False, condition=running_on_ovirt_ci(), ) @nftestlib.parametrize_legacy_switch def test_create_network_over_an_existing_unowned_bridge( self, adapter, switch, nic0): with _create_linux_bridge(NETWORK_NAME) as brname: NETCREATE = { brname: { 'bridged': True, 'nic': nic0, 'switch': switch } } with adapter.setupNetworks(NETCREATE, {}, nftestlib.NOCHK): adapter.assertNetwork(brname, NETCREATE[brname]) @pytest.mark.xfail( reason='Unstable link on oVirt CI', raises=nftestlib.UnexpectedLinkStateChangeError, strict=False, condition=running_on_ovirt_ci(), ) @nftestlib.parametrize_legacy_switch def test_create_network_and_reuse_existing_owned_bridge( self, adapter, switch, nic0, nic1, hidden_nic): NETSETUP1 = {NETWORK_NAME: {'nic': nic0, 'switch': switch}} NETSETUP2 = {NETWORK_NAME: {'nic': nic1, 'switch': switch}} with adapter.setupNetworks(NETSETUP1, {}, nftestlib.NOCHK): nftestlib.attach_dev_to_bridge(hidden_nic, NETWORK_NAME) with nftestlib.monitor_stable_link_state(NETWORK_NAME): adapter.setupNetworks(NETSETUP2, {}, nftestlib.NOCHK) adapter.assertNetwork(NETWORK_NAME, NETSETUP2[NETWORK_NAME]) @nftestlib.parametrize_legacy_switch def test_reconfigure_bridge_with_vanished_port(self, adapter, switch, nic0): NETCREATE = { NETWORK_NAME: { 'nic': nic0, 'bridged': True, 'switch': switch } } with adapter.setupNetworks(NETCREATE, {}, nftestlib.NOCHK): with dummy_device() as nic1: NETCREATE[NETWORK_NAME]['nic'] = nic1 adapter.setupNetworks(NETCREATE, {}, nftestlib.NOCHK) adapter.refresh_netinfo() assert adapter.netinfo.networks[NETWORK_NAME]['ports'] == [] NETCREATE[NETWORK_NAME]['nic'] = nic0 adapter.setupNetworks(NETCREATE, {}, nftestlib.NOCHK) net_ports = adapter.netinfo.networks[NETWORK_NAME]['ports'] assert net_ports == [nic0]
class TestNetinfo(object): def test_netmask_conversions(self): path = os.path.join(os.path.dirname(__file__), "netmaskconversions") with open(path) as netmaskFile: for line in netmaskFile: if line.startswith('#'): continue bitmask, address = [value.strip() for value in line.split()] assert prefix2netmask(int(bitmask)) == address pytest.raises(ValueError, prefix2netmask, -1) pytest.raises(ValueError, prefix2netmask, 33) @mock.patch.object(nic, 'speed') @mock.patch.object(bond_speed, 'properties') def test_bond_speed(self, mock_properties, speed_mock): values = ( ('bond1', [1000], 1000), ('bond2', [1000, 2000], 3000), ('bond3', [1000, 2000], 1000), ('bond4', [1000, 1000], 0), ('bond5', [1000, 2000], 0), ) bonds_opts = { 'bond1': { 'mode': ['active-backup', '1'], 'slaves': ('dummy1', 'dummy2'), 'active_slave': 'dummy1', }, 'bond2': { 'mode': ['balance-xor', '2'], 'slaves': ('dummy1', 'dummy2'), }, 'bond3': { 'mode': ['broadcast', '3'], 'slaves': ('dummy1', 'dummy2'), }, 'bond4': {'mode': ['802.3ad', '4']}, 'bond5': { 'mode': ['active-backup', '1'], 'slaves': ('dummy1', 'dummy2'), }, } for bond_name, nics_speeds, expected_speed in values: mock_properties.return_value = bonds_opts[bond_name] speed_mock.side_effect = nics_speeds assert bond_speed.speed(bond_name) == expected_speed @mock.patch.object(nic, 'iface') @mock.patch.object(nics.io, 'open') def test_valid_nic_speed(self, mock_io_open, mock_iface): IS_UP = True values = ( (b'0', IS_UP, 0), (b'-10', IS_UP, 0), (six.b(str(2 ** 16 - 1)), IS_UP, 0), (six.b(str(2 ** 32 - 1)), IS_UP, 0), (b'123', IS_UP, 123), (b'', IS_UP, 0), (b'', not IS_UP, 0), (b'123', not IS_UP, 0), ) for passed, is_nic_up, expected in values: mock_io_open.return_value = io.BytesIO(passed) mock_iface.return_value.is_oper_up.return_value = is_nic_up assert nic.speed('fake_nic') == expected def test_dpdk_device_speed(self): assert nic.speed('dpdk0') == 0 def test_dpdk_operstate_always_up(self): assert nics.operstate('dpdk0') == nics.OPERSTATE_UP @pytest.mark.xfail( condition=running_on_ovirt_ci(), raises=KeyError, reason='does not work on CI with nmstate', strict=False, ) @mock.patch.object(bonding, 'permanent_address', lambda: {}) @mock.patch('vdsm.network.netinfo.cache.RunningConfig') def test_get_non_existing_bridge_info( self, mock_runningconfig, current_state_mock ): # Getting info of non existing bridge should not raise an exception, # just log a traceback. If it raises an exception the test will fail as # it should. mock_runningconfig.return_value.networks = {'fake': {'bridged': True}} get() @mock.patch.object(bonding, 'permanent_address', lambda: {}) @mock.patch('vdsm.network.netinfo.cache.getLinks') @mock.patch('vdsm.network.netinfo.cache.RunningConfig') def test_get_empty(self, mock_networks, mock_getLinks, current_state_mock): result = {} result.update(get()) assert result['networks'] == {} assert result['bridges'] == {} assert result['nics'] == {} assert result['bondings'] == {} assert result['vlans'] == {} def test_ipv4_to_mapped(self): assert '::ffff:127.0.0.1' == addresses.IPv4toMapped('127.0.0.1') def test_get_device_by_ip(self): NL_ADDRESS4 = { 'label': 'iface0', 'address': '127.0.0.1/32', 'family': 'inet', } NL_ADDRESS6 = { 'label': 'iface1', 'address': '2001::1:1:1/48', 'family': 'inet6', } NL_ADDRESSES = [NL_ADDRESS4, NL_ADDRESS6] with mock.patch.object( addresses.nl_addr, 'iter_addrs', lambda: NL_ADDRESSES ): for nl_addr in NL_ADDRESSES: lbl = addresses.getDeviceByIP(nl_addr['address'].split('/')[0]) assert nl_addr['label'] == lbl @mock.patch.object(ipwrapper.Link, '_hiddenNics', ['hid*']) @mock.patch.object(ipwrapper.Link, '_hiddenBonds', ['jb*']) @mock.patch.object(ipwrapper.Link, '_fakeNics', ['fake*']) @mock.patch.object(ipwrapper.Link, '_detectType', lambda x: None) @mock.patch.object(ipwrapper, '_bondExists', lambda x: x == 'jbond') @mock.patch.object(misc, 'getLinks') def test_nics(self, mock_getLinks): """ managed by vdsm: em, me, fake0, fake1 not managed due to hidden bond (jbond) enslavement: me0, me1 not managed due to being hidden nics: hid0, hideous """ mock_getLinks.return_value = self._LINKS_REPORT assert set(nics.nics()) == set(['em', 'me', 'fake', 'fake0']) # Creates a test fixture so that nics() reports: # physical nics: em, me, me0, me1, hid0 and hideous # dummies: fake and fake0 # bonds: jbond (over me0 and me1) _LINKS_REPORT = [ ipwrapper.Link( address='f0:de:f1:da:aa:e7', index=2, linkType=ipwrapper.LinkType.NIC, mtu=1500, name='em', qdisc='pfifo_fast', state='up', ), ipwrapper.Link( address='ff:de:f1:da:aa:e7', index=3, linkType=ipwrapper.LinkType.NIC, mtu=1500, name='me', qdisc='pfifo_fast', state='up', ), ipwrapper.Link( address='ff:de:fa:da:aa:e7', index=4, linkType=ipwrapper.LinkType.NIC, mtu=1500, name='hid0', qdisc='pfifo_fast', state='up', ), ipwrapper.Link( address='ff:de:11:da:aa:e7', index=5, linkType=ipwrapper.LinkType.NIC, mtu=1500, name='hideous', qdisc='pfifo_fast', state='up', ), ipwrapper.Link( address='66:de:f1:da:aa:e7', index=6, linkType=ipwrapper.LinkType.NIC, mtu=1500, name='me0', qdisc='pfifo_fast', state='up', master='jbond', ), ipwrapper.Link( address='66:de:f1:da:aa:e7', index=7, linkType=ipwrapper.LinkType.NIC, mtu=1500, name='me1', qdisc='pfifo_fast', state='up', master='jbond', ), ipwrapper.Link( address='ff:aa:f1:da:aa:e7', index=34, linkType=ipwrapper.LinkType.DUMMY, mtu=1500, name='fake0', qdisc='pfifo_fast', state='up', ), ipwrapper.Link( address='ff:aa:f1:da:bb:e7', index=35, linkType=ipwrapper.LinkType.DUMMY, mtu=1500, name='fake', qdisc='pfifo_fast', state='up', ), ipwrapper.Link( address='66:de:f1:da:aa:e7', index=419, linkType=ipwrapper.LinkType.BOND, mtu=1500, name='jbond', qdisc='pfifo_fast', state='up', ), ] @mock.patch.object(misc, 'open', create=True) def test_get_ifcfg(self, mock_open): gateway = '1.1.1.1' netmask = '255.255.0.0' ifcfg = "GATEWAY0={}\nNETMASK={}\n".format(gateway, netmask) ifcfg_stream = six.StringIO(ifcfg) mock_open.return_value.__enter__.return_value = ifcfg_stream resulted_ifcfg = misc.getIfaceCfg('eth0') assert resulted_ifcfg['GATEWAY'] == gateway assert resulted_ifcfg['NETMASK'] == netmask @mock.patch.object(misc, 'open', create=True) def test_missing_ifcfg_file(self, mock_open): mock_open.return_value.__enter__.side_effect = IOError() ifcfg = misc.getIfaceCfg('eth0') assert ifcfg == {} @staticmethod def _bond_opts_without_mode(bond_name): opts = Bond(bond_name).options opts.pop('mode') return opts def test_get_gateway(self): TEST_IFACE = 'test_iface' # different tables but the gateway is the same so it should be reported DUPLICATED_GATEWAY = { TEST_IFACE: [ { 'destination': 'none', 'family': 'inet', 'gateway': '12.34.56.1', 'oif': TEST_IFACE, 'oif_index': 8, 'scope': 'global', 'source': None, 'table': 203569230, # we got the address 12.34.56.78 }, { 'destination': 'none', 'family': 'inet', 'gateway': '12.34.56.1', 'oif': TEST_IFACE, 'oif_index': 8, 'scope': 'global', 'source': None, 'table': 254, }, ] } SINGLE_GATEWAY = {TEST_IFACE: [DUPLICATED_GATEWAY[TEST_IFACE][0]]} gateway = routes.get_gateway(SINGLE_GATEWAY, TEST_IFACE) assert gateway == '12.34.56.1' gateway = routes.get_gateway(DUPLICATED_GATEWAY, TEST_IFACE) assert gateway == '12.34.56.1' def test_netinfo_ignoring_link_scope_ip(self): v4_link = { 'family': 'inet', 'address': '169.254.0.0/16', 'scope': 'link', 'prefixlen': 16, 'flags': ['permanent'], } v4_global = { 'family': 'inet', 'address': '192.0.2.2/24', 'scope': 'global', 'prefixlen': 24, 'flags': ['permanent'], } v6_link = { 'family': 'inet6', 'address': 'fe80::5054:ff:fea3:f9f3/64', 'scope': 'link', 'prefixlen': 64, 'flags': ['permanent'], } v6_global = { 'family': 'inet6', 'address': 'ee80::5054:ff:fea3:f9f3/64', 'scope': 'global', 'prefixlen': 64, 'flags': ['permanent'], } ipaddrs = {'eth0': (v4_link, v4_global, v6_link, v6_global)} ipv4addr, ipv4netmask, ipv4addrs, ipv6addrs = addresses.getIpInfo( 'eth0', ipaddrs=ipaddrs ) assert ipv4addrs == ['192.0.2.2/24'] assert ipv6addrs == ['ee80::5054:ff:fea3:f9f3/64'] def test_parse_bond_options(self): expected = {'mode': '4', 'miimon': '100'} assert expected == bonding.parse_bond_options('mode=4 miimon=100')