class TestCaseTest(unit.TobikoUnitTest): def setUp(self): super(TestCaseTest, self).setUp() self.addCleanup(self._pop_inner_test_cases) def _pop_inner_test_cases(self): case = tobiko.get_test_case() while case is not self: tobiko.pop_test_case() case = tobiko.get_test_case()
def test_ovs_bridges_mac_table_size(): test_case = tobiko.get_test_case() expected_mac_table_size = '50000' get_mac_table_size_cmd = ('ovs-vsctl get bridge {br_name} ' 'other-config:mac-table-size') if neutron.has_ovn(): get_br_mappings_cmd = ('ovs-vsctl get Open_vSwitch . ' 'external_ids:ovn-bridge-mappings') else: get_br_mappings_cmd = ( 'crudini --get /var/lib/config-data/puppet-generated/neutron/' 'etc/neutron/plugins/ml2/openvswitch_agent.ini ' 'ovs bridge_mappings') for node in topology.list_openstack_nodes(group='overcloud'): try: br_mappings_str = sh.execute(get_br_mappings_cmd, ssh_client=node.ssh_client, sudo=True).stdout.splitlines()[0] except sh.ShellCommandFailed: LOG.debug(f"bridge mappings not configured on node '{node.name}'", exc_info=1) continue br_list = [ br_mapping.split(':')[1] for br_mapping in br_mappings_str.replace('"', '').split(',') ] for br_name in br_list: mac_table_size = sh.execute( get_mac_table_size_cmd.format(br_name=br_name), ssh_client=node.ssh_client, sudo=True).stdout.splitlines()[0] test_case.assertEqual(mac_table_size.replace('"', ''), expected_mac_table_size)
def test_push_test_case(self): class InnerTest(unittest.TestCase): def runTest(self): pass inner_case = InnerTest() tobiko.push_test_case(inner_case) self.assertIs(inner_case, tobiko.get_test_case())
def assert_pcap_content(pcap: dpkt.pcap.Reader, expect_empty: bool): actual_empty = True for _ in pcap: actual_empty = False break testcase = tobiko.get_test_case() LOG.debug(f'Is the obtained pcap file empty? {actual_empty}') testcase.assertEqual(expect_empty, actual_empty)
def test_ovn_dbs_validations(): if not neutron.has_ovn(): LOG.debug('OVN not configured. OVN DB sync validations skipped') return test_case = tobiko.get_test_case() # run validations ovn_dbs_are_synchronized(test_case) ovn_dbs_vip_bindings(test_case)
def make_temp_dir(ssh_client: ssh.SSHClientType = None, sudo: bool = None) \ -> str: test_case = tobiko.get_test_case() dir_name: str = _execute.execute('mktemp -d', ssh_client=ssh_client, sudo=sudo).stdout.strip() test_case.addCleanup(_execute.execute, f'rm -fR "{dir_name}"', ssh_client=ssh_client, sudo=sudo) return dir_name
def test_controllers_shutdown(): test_case = tobiko.get_test_case() all_nodes = topology.list_openstack_nodes(group='controller') if len(all_nodes) < 3: tobiko.skip_test('It requires at least three controller nodes') all_node_names = [node.name for node in all_nodes] LOG.info("Ensure all controller nodes are running: " f"{all_node_names}") for node in all_nodes: node.power_on_overcloud_node() topology.assert_reachable_nodes(all_nodes) LOG.debug('Check VM is running while all controllers nodes are on') nova_server = tobiko.setup_fixture(stacks.CirrosServerStackFixture) nova_server_ip = nova_server.ip_address ping.assert_reachable_hosts([nova_server_ip]) quorum_level = math.ceil(0.5 * len(all_nodes)) assert quorum_level >= len(all_nodes) - quorum_level nodes = random.sample(all_nodes, quorum_level) node_names = [node.name for node in nodes] LOG.info(f"Power off {quorum_level} random controller nodes: " f"{node_names}") for node in nodes: node.power_off_overcloud_node() test_case.addCleanup(node.power_on_overcloud_node) topology.assert_unreachable_nodes(nodes, retry_count=1) topology.assert_reachable_nodes(node for node in all_nodes if node not in nodes) LOG.debug('Check whenever VM is still running while some "' '"controllers nodes are off') reachable, unreachable = ping.ping_hosts([nova_server_ip], count=1) if reachable: LOG.debug(f"VM ips are reachable: {reachable}") if unreachable: LOG.debug(f"VM is are unreachable: {unreachable}") # TODO what do we expect here: VM reachable or unreachable? random.shuffle(nodes) LOG.info(f"Power on controller nodes: {node_names}") for node in nodes: node.power_on_overcloud_node() LOG.debug("Check all controller nodes are running again: " f"{all_node_names}") topology.assert_reachable_nodes(all_nodes, retry_timeout=600.) LOG.debug('Check VM is running while all controllers nodes are on') ping.assert_reachable_hosts([nova_server_ip])
def test_ovs_interfaces_are_absent( group: typing.Pattern[str] = OPENSTACK_NODE_GROUP, interface: typing.Pattern[str] = OVS_INTERFACE): nodes = topology.list_openstack_nodes(group=group) interfaces: typing.Dict[str, typing.List[str]] = (collections.defaultdict(list)) for node in nodes: for node_interface in ip.list_network_interfaces( ssh_client=node.ssh_client, sudo=True): if interface.match(node_interface): interfaces[node.name].append(node_interface) interfaces = dict(interfaces) test_case = tobiko.get_test_case() test_case.assertEqual( {}, interfaces, f"OVS interface(s) found on OpenStack nodes: {interfaces}")
def test_servers_creation(stack=TestServerCreationStack, number_of_servers=2) -> \ tobiko.Selection[_nova.ServerStackFixture]: initial_servers_ids = {server.id for server in nova.list_servers()} pid = os.getpid() fixture_obj = tobiko.get_fixture_class(stack) # Get list of server stack instances fixtures: tobiko.Selection[_nova.ServerStackFixture] = tobiko.select( tobiko.get_fixture(fixture_obj, fixture_id=f'{pid}-{i}') for i in range(number_of_servers or 1)) test_case = tobiko.get_test_case() # Check fixtures types for fixture in fixtures: test_case.assertIsInstance(fixture, _nova.ServerStackFixture) # Delete all servers stacks for fixture in fixtures: tobiko.cleanup_fixture(fixture) # Create all servers stacks for fixture in fixtures: tobiko.use_fixture(fixture) # Check every server ID is unique and new server_ids = {fixture.server_id for fixture in fixtures} test_case.assertEqual(number_of_servers or 1, len(server_ids)) test_case.assertFalse(server_ids & initial_servers_ids) # sleep for 20 sec , ensure no race condition with ssh time.sleep(20) # Test SSH connectivity to floating IP address for fixture in fixtures: test_case.assertTrue(sh.get_hostname(ssh_client=fixture.ssh_client)) # Test pinging to floating IP address ping.assert_reachable_hosts(fixture.floating_ip_address for fixture in fixtures) return fixtures
def assert_ovn_unsupported_dhcp_option_messages( reader: OvnUnsupportedDhcpOptionReader = None, new_lines=True, unsupported_options: typing.Optional[typing.List] = None, **attributes): if reader is None: reader = tobiko.setup_fixture(OvnUnsupportedDhcpOptionReader) # find new logs that match the pattern responses = reader.read_responses() if not new_lines: responses = reader.responses if attributes and responses: responses = responses.with_attributes(**attributes) # assert one line matches per unsupported dhcp option test_case = tobiko.get_test_case() for unsupported_option in unsupported_options or []: messages_unsupported_option = responses.with_attributes( unsupported_dhcp_option=unsupported_option) test_case.assertEqual(1, len(messages_unsupported_option)) LOG.debug('Found one match for unsupported dhcp option ' f'{unsupported_option}')
def assert_has_bandwith_limits(address: typing.Union[str, netaddr.IPAddress], min_bandwith: float, max_bandwith: float, bitrate: int = None, download: bool = None, port: int = None, protocol: str = None, ssh_client: ssh.SSHClientType = None, timeout: tobiko.Seconds = None) -> None: bandwith = _execute.get_bandwidth(address=address, bitrate=bitrate, download=download, port=port, protocol=protocol, ssh_client=ssh_client, timeout=timeout) testcase = tobiko.get_test_case() LOG.debug(f'measured bandwith: {bandwith}') LOG.debug(f'bandwith limits: {min_bandwith} ... {max_bandwith}') # an 8% of lower deviation is allowed testcase.assertGreater(bandwith, min_bandwith) # a 5% of upper deviation is allowed testcase.assertLess(bandwith, max_bandwith)
def check_members_balanced( ip_address: str, protocol: str, port: int, pool_id: str = None, members_count: int = None, lb_algorithm: str = None, requests_count: int = 10, connect_timeout: tobiko.Seconds = 10., interval: tobiko.Seconds = 1, ssh_client: ssh.SSHClientFixture = None) -> (typing.Dict[str, int]): """Check if traffic is properly balanced between members.""" test_case = tobiko.get_test_case() # Getting the members count if members_count is None: if pool_id is None: raise ValueError('Either members_count or pool_id has to be passed' ' to the function.') else: # members_count is None and pool_id is not None members_count = len(octavia.list_members(pool_id=pool_id)) last_content = None replies: typing.Dict[str, int] = collections.defaultdict(lambda: 0) for attempt in tobiko.retry(count=members_count * requests_count, interval=interval): try: content = curl.execute_curl(hostname=ip_address, scheme=protocol, port=port, path='id', connect_timeout=connect_timeout, ssh_client=ssh_client).strip() except sh.ShellCommandFailed as ex: if ex.exit_status == 28: raise octavia.TrafficTimeoutError( reason=str(ex.stderr)) from ex else: raise ex replies[content] += 1 if last_content is not None and lb_algorithm == 'ROUND_ROBIN': if members_count > 1 and last_content == content: raise octavia.RoundRobinException( 'Request was forwarded two times to the same host:\n' f'members_count: {members_count}\n' f'expected: {last_content}\n' f'actual: {content}\n') last_content = content if attempt.is_last: break else: raise RuntimeError('Broken retry loop') LOG.debug(f"Replies counts from load balancer: {replies}") # assert that 'members_count' servers replied missing_members_count = members_count - len(replies) test_case.assertEqual( 0, missing_members_count, f'Missing replies from {missing_members_count} "' '"members.') return replies
def test_get_test_case_out_of_context(self): manager = tobiko.TestCasesManager() result = tobiko.get_test_case(manager=manager) self.assertIsInstance(result, unittest.TestCase) self.assertEqual('tobiko.common._testcase.DummyTestCase.runTest', result.id())
def test_get_test_case(self): result = tobiko.get_test_case() self.assertIs(self, result)