def test_get_ipv6_subnet(self): if not self.stack.has_ipv6: tobiko.skip_test( "Stack {self.stack.stack_name} has no IPv6 subnet") subnet = neutron.get_subnet(self.stack.ipv6_subnet_id) self.assertEqual(self.stack.ipv6_subnet_id, subnet['id']) self.assertEqual(self.stack.ipv6_subnet_details, subnet)
def test_find_floating_network(self): floating_network = CONF.tobiko.neutron.floating_network if not floating_network: tobiko.skip_test('floating_network not configured') network = neutron.find_network(name=floating_network) self.assertIn(floating_network, [network['name'], network['id']]) self.assertEqual(self.stack.gateway_network_id, network['id'])
def test_ipv4_subnet_cidr(self): if not self.stack.has_ipv4: tobiko.skip_test(f"Stack {self.stack.stack_name} has no ipv4 " "subnet") subnet = neutron.find_subnet(cidr=str(self.stack.ipv4_subnet_cidr)) self.assertEqual(neutron.get_subnet(self.stack.ipv4_subnet_id), subnet)
def open_file(filename, mode): try: lzma = import_lzma() except ImportError: tobiko.skip_test( "Package lzma or backports.lzma is required to decompress " f"{filename!r} (mode={mode!r}) XZ image file " f"({sys.version!r}).") return lzma.LZMAFile(filename=filename, mode=mode)
def ssh_client(self) -> ssh.SSHClientType: ssh_client = ssh.ssh_proxy_client() if isinstance(ssh_client, ssh.SSHClientFixture): return ssh_client nodes = topology.list_openstack_nodes() for node in nodes: if isinstance(node.ssh_client, ssh.SSHClientFixture): return ssh_client tobiko.skip_test('No such SSH server host to connect to')
def setup_fixture(self): nodes = topology.list_openstack_nodes() for node in nodes: assert node.ssh_client is not None if podman.is_podman_running(ssh_client=node.ssh_client): self.node = node break if self.node is None: nodes_text = ' '.join(node.name for node in nodes) tobiko.skip_test("Podman server is not running in any of nodes " f"{nodes_text}")
def test_controllers_shutdown(): test_case = tobiko.get_test_case() all_nodes = topology.list_openstack_nodes(group='controller') if len(all_nodes) < 3: tobiko.skip_test('It requires at least three controller nodes') all_node_names = [node.name for node in all_nodes] LOG.info("Ensure all controller nodes are running: " f"{all_node_names}") for node in all_nodes: node.power_on_overcloud_node() topology.assert_reachable_nodes(all_nodes) LOG.debug('Check VM is running while all controllers nodes are on') nova_server = tobiko.setup_fixture(stacks.CirrosServerStackFixture) nova_server_ip = nova_server.ip_address ping.assert_reachable_hosts([nova_server_ip]) quorum_level = math.ceil(0.5 * len(all_nodes)) assert quorum_level >= len(all_nodes) - quorum_level nodes = random.sample(all_nodes, quorum_level) node_names = [node.name for node in nodes] LOG.info(f"Power off {quorum_level} random controller nodes: " f"{node_names}") for node in nodes: node.power_off_overcloud_node() test_case.addCleanup(node.power_on_overcloud_node) topology.assert_unreachable_nodes(nodes, retry_count=1) topology.assert_reachable_nodes(node for node in all_nodes if node not in nodes) LOG.debug('Check whenever VM is still running while some "' '"controllers nodes are off') reachable, unreachable = ping.ping_hosts([nova_server_ip], count=1) if reachable: LOG.debug(f"VM ips are reachable: {reachable}") if unreachable: LOG.debug(f"VM is are unreachable: {unreachable}") # TODO what do we expect here: VM reachable or unreachable? random.shuffle(nodes) LOG.info(f"Power on controller nodes: {node_names}") for node in nodes: node.power_on_overcloud_node() LOG.debug("Check all controller nodes are running again: " f"{all_node_names}") topology.assert_reachable_nodes(all_nodes, retry_timeout=600.) LOG.debug('Check VM is running while all controllers nodes are on') ping.assert_reachable_hosts([nova_server_ip])
def rotate_logs(node): """Rotate all the container logs using 'logrotate' :param node: Node to rotate logs on :type node: class: tobiko.openstack.topology.OpenStackTopologyNode """ containers = get_filtered_node_containers(node, ['logrotate.*', ]) if not containers: tobiko.skip_test('No logrotate container has been found') else: container = containers[0] sh.execute(f'docker exec -u root {container} logrotate ' '-f /etc/logrotate-crond.conf', ssh_client=node.ssh_client, sudo=True)
def test_migrate_server_with_host(self): """Tests cold migration actually ends on target hypervisor """ server = self.setup_server() initial_hypervisor = nova.get_server_hypervisor(server) for hypervisor in nova.list_hypervisors(status='enabled', state='up'): if initial_hypervisor != hypervisor.hypervisor_hostname: target_hypervisor = hypervisor.hypervisor_hostname break else: tobiko.skip_test("Cannot find a valid hypervisor host to migrate " "server to") server = self.migrate_server(server=server, host=target_hypervisor) final_hypervisor = nova.get_server_hypervisor(server) self.assertEqual(target_hypervisor, final_hypervisor)
def _test_migrate_server_with_host(self, live: bool): """Tests cold migration actually ends on target hypervisor """ server = self.ensure_server(status='ACTIVE') initial_hypervisor = nova.get_server_hypervisor(server) hypervisors = nova.list_hypervisors( status='enabled', state='up').select( lambda h: h.hypervisor_hostname != initial_hypervisor) if not hypervisors: tobiko.skip_test("Cannot find a valid hypervisor host to migrate " "server to") target_hypervisor = random.choice(hypervisors).hypervisor_hostname server = self.migrate_server(host=target_hypervisor, live=live) final_hypervisor = nova.get_server_hypervisor(server) self.assertNotEqual(initial_hypervisor, final_hypervisor) self.assertEqual(target_hypervisor, final_hypervisor)
def test_gateway_network(self): if not self.stack.has_gateway: tobiko.skip_test(f"Stack {self.stack.stack_name} has no gateway") self.assertEqual( self.stack.gateway_network_id, self.stack.gateway_details['external_gateway_info']['network_id'])
def test_ssh_proxy_hostname(self): ssh_client = ssh.ssh_proxy_client() if ssh_client is None: tobiko.skip_test('SSH proxy server is not configured') self.test_ssh_hostname(ssh_client=ssh_client)
def test_ping_fixed_ipv6(self): tobiko.skip_test("ping not installed on image")
def test_ncat_command(self): tobiko.skip_test("ncat not installed on image")
def test_get_router(self): if not self.stack.has_gateway: tobiko.skip_test(f"Stack {self.stack.stack_name} has no gateway " "router") router = neutron.get_router(self.stack.gateway_id) self.assertEqual(self.stack.gateway_id, router['id'])
def ipv6_subnet_gateway_ip(self): if not self.stack.network_stack.has_ipv6: tobiko.skip_test(f"Stack {self.stack.network_stack.stack_name} " "has no ipv6 subnet") return self.stack.network_stack.ipv6_subnet_gateway_ip
def setUp(self): super(RouterTest, self).setUp() if not self.stack.network_stack.has_gateway: tobiko.skip_test( f"Stack {self.stack.network_stack.stack_name} has no gateway")
def ssh_client(self) -> ssh.SSHClientType: if docker.is_docker_running(ssh_client=False, sudo=self.sudo): return False tobiko.skip_test('Docker is not running')
def test_ipv6_subnet_gateway_ip(self): if not self.stack.has_ipv6 or not self.stack.has_gateway: tobiko.skip_test(f"Stack {self.stack.stack_name} has no IPv6 " "gateway") self.assertIn(self.stack.ipv6_subnet_gateway_ip, self.stack.ipv6_gateway_addresses)
def cleanup_fixture(self): tobiko.skip_test('some-reason')
def setup_fixture(self): tobiko.skip_test('some-reason')
def setUp(self): super(DVRTest, self).setUp() if not self.router_stack.gateway_details.get('distributed'): tobiko.skip_test('No DVR enabled')
def get_ssh_client(self) -> ssh.SSHClientFixture: for ssh_client in self.iter_ssh_clients(): if docker.is_docker_running(ssh_client=ssh_client, sudo=True): return ssh_client tobiko.skip_test('Docker is not running')