def test_destroy_primary_controller(self, router, prepare_openstack,
                                        devops_env):
        """Destroy primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Destroy primary controller
            9. Wait time while env is unstable
            10. Check ping
        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        primary_controller = self.env.primary_controller
        other_controllers = [x for x
                             in self.env.get_nodes_by_role('controller')
                             if x != primary_controller]

        # Rescedule active l3 agent to primary if needed
        if primary_controller != l3_agent_controller:
            with primary_controller.ssh() as remote:
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource ban neutron-l3-agent {}'.format(
                            node.data['fqdn']))
                from_node = l3_agent_controller.data['fqdn']
                self.wait_router_rescheduled(router_id=router_id,
                                             from_node=from_node,
                                             timeout_seconds=5 * 60)
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource clear neutron-l3-agent {}'.format(
                            node.data['fqdn']))

        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        logger.info("Destroy primary controller {}".format(
            primary_controller.data['fqdn']))
        devops_node = devops_env.get_node_by_fuel_node(primary_controller)
        devops_node.destroy()

        self.wait_router_rescheduled(router_id=router['router']['id'],
                                     from_node=primary_controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm=server1,
            vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
    def test_reset_primary_controller(self, router,
                                      prepare_openstack, devops_env):
        """Reset primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Reset primary controller
            10. Start ping vm2 from vm1 by floating ip
            11. Check that ping lost no more than 10 packets
            12. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state

        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        primary_controller = self.env.primary_controller
        for node in self.env.get_nodes_by_role('controller'):
            if node != primary_controller:
                proxy_node = node.data['fqdn']
                break
        else:
            raise Exception("Can't find non primary controller")
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        # Reschedule active l3 agent to primary if needed
        self.reschedule_active_l3_agt(router_id, primary_controller,
                                      l3_agent_controller)

        from_node = l3_agent_controller.data['fqdn']
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=from_node,
                                     timeout_seconds=5 * 60)

        logger.info("Reset primary controller {}".format(
            primary_controller.data['fqdn']))
        devops_node = devops_env.get_node_by_fuel_node(primary_controller)
        devops_node.reset()

        # To ensure that the l3 agt is moved from the affected controller
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=primary_controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm=server1,
            vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)

        self.check_l3_ha_agent_states(router_id)
    def test_vms_page_size_any_no_hp(self, env, os_conn, networks, keypair,
                                     computes_with_mixed_hp, flavors,
                                     security_group, cleanup):
        """This test checks vms with any/large hw:mem_page_size when both 2Mb
        and 1Gb huge pages are unavailable

            Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Boot vms in order to allocate all huge pages
            3. Boot vm with required mem_page_size and check result:
            vm should be in error state for 'large', for 'any' mem_page_size
            vm is active and 4kb pages are used (i.e. no huge pages)
        """
        host = computes_with_mixed_hp[0]
        zone = 'nova:{}'.format(host)
        self.boot_vms_to_allocate_hp(os_conn, env, host, page_2mb, networks[0])
        self.boot_vms_to_allocate_hp(os_conn, env, host, page_1gb, networks[0])

        flavors[0].set_keys({'hw:mem_page_size': 'any'})
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   nics=[{'net-id': networks[0]}],
                                   key_name=keypair.name,
                                   security_groups=[security_group.id],
                                   availability_zone=zone)
        assert self.get_instance_page_size(os_conn, vm) is None
        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
    def test_vms_connectivity_sriov_numa_after_resize(self, env, os_conn,
                                                      sriov_hosts, aggregate,
                                                      ubuntu_image_id, keypair,
                                                      vf_ports, flavors):
        """This test checks vms between VMs launched on vf port after resizing
            Steps:
            1. Create net1 with subnet, router1 with interface to net1
            2. Create vm1 on vf port with m1.small.performance on 1 NUMA-node
            3. Resize vm1 to m1.medium flavor
            4. Wait and ping 8.8.8.8 from vm1
            5. Resize vm1 to m1.small.performance flavor
            6. Wait and ping 8.8.8.8 from vm1
            7. Resize vm1 to m1.small
            8. Wait and ping 8.8.8.8 from vm1
        """
        hosts = list(set(sriov_hosts) & set(aggregate.hosts))
        if len(hosts) < 1:
            pytest.skip(
                "At least one host is required with SR-IOV and 2 numa nodes")
        m1_cpu_flavor = flavors[0]
        m1_medium = os_conn.nova.flavors.find(name='m1.medium')
        m1_large = os_conn.nova.flavors.find(name='m1.large')

        vm = self.create_vm(os_conn, hosts[0], m1_cpu_flavor, keypair,
                            vf_ports[0], ubuntu_image_id)

        for flavor in [m1_medium, m1_cpu_flavor, m1_large]:
            self.resize(os_conn, vm, flavor)
            network_checks.check_ping_from_vm(
                env, os_conn, vm, vm_keypair=keypair, vm_login='******')
    def test_vms_page_size_less_hp_count(self, env, os_conn, networks,
                                         computes_with_mixed_hp, flavors,
                                         security_group, keypair, scarce_page,
                                         expected_size, cleanup):
        """This test checks vms with hw:mem_page_size=large when count of
        2Mb huge pages is not enough to boot vm while count of free 1Gb huge
        page allows it (and vice versa)

            Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Check that hp count of the 1st type is not enough for vm
            3. Boot vm and check that it use hp of the 2nd type
        """
        host = computes_with_mixed_hp[0]
        flavors[0].set_keys({'hw:mem_page_size': 'large'})

        self.boot_vms_to_allocate_hp(os_conn, env, host, scarce_page,
                                     networks[0],
                                     ram_left_free=flavors[0].ram - 1024)

        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   nics=[{'net-id': networks[0]}],
                                   key_name=keypair.name,
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host))
        assert self.get_instance_page_size(os_conn, vm) == expected_size
        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
    def test_ban_all_l3_agents_and_clear_them(self, router, prepare_openstack):
        """Disable all l3 agents and enable them

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Disable all neutron-l3-agent
            7. Wait until all agents died
            8. Enable all neutron-l3-agent
            9. Wait until all agents alive
            10. Check ping vm2 from vm1 by floating ip
        """
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        agents = self.os_conn.get_l3_for_router(router['router']['id'])
        agent_ids = [x['id'] for x in agents['agents']]
        controller = self.env.get_nodes_by_role('controller')[0]
        with controller.ssh() as remote:
            logger.info('disable all l3 agents')
            remote.check_call('pcs resource disable neutron-l3-agent')
            self.os_conn.wait_agents_down(agent_ids)
            logger.info('enable all l3 agents')
            remote.check_call('pcs resource enable neutron-l3-agent')
            self.os_conn.wait_agents_alive(agent_ids)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm=server1,
            vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
    def test_vms_page_size_one_type_hps_available_only(
            self, env, os_conn, networks, computes_with_mixed_hp, flavors,
            security_group, keypair, mem_page_size, vm_page_size,
            size_to_allocate, cleanup):
        """This test checks that vms with any/large hw:mem_page_size uses 2Mb
        huge pages in case when only 2Mb pages are available

            Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Allocate all 1Gb huge pages for each numa node
            3. Boot vm with any or large hw:mem_page_size
            4. Check that 2Mb huge pages are used for vm
        """
        host = computes_with_mixed_hp[0]
        self.boot_vms_to_allocate_hp(os_conn, env, host, size_to_allocate,
                                     networks[0])

        flavors[0].set_keys({'hw:mem_page_size': mem_page_size})
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   nics=[{'net-id': networks[0]}],
                                   key_name=keypair.name,
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host))
        assert self.get_instance_page_size(os_conn, vm) == vm_page_size
        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
def test_basic_operation_with_fixed_ips(env, os_conn, instances, keypair, network):
    """Basic operations with fixed IPs on an instance

    Scenario:
        1. Create net01, net01__subnet
        2. Boot instances vm1 and vm2 in net01
        3. Check that they ping each other by their fixed IPs
        4. Add a fixed IP to vm1
            nova add-fixed-ip vm1 $NET_ID
        5. Remove old fixed IP from vm1
            nova remove-fixed-ip vm1 <old_fixed_ip>
        6. Wait some time
        7. Check that vm2 can send pings to vm1 by its new fixed IP
    """
    for instance1, instance2 in zip(instances, instances[::-1]):
        ip = os_conn.get_nova_instance_ips(instance2)["fixed"]
        network_checks.check_ping_from_vm(env, os_conn, instance1, vm_keypair=keypair, ip_to_ping=ip)

    instance1, instance2 = instances
    old_ip = os_conn.get_nova_instance_ips(instance1)["fixed"]
    instance1.add_fixed_ip(network["network"]["id"])

    instance1.remove_fixed_ip(old_ip)
    new_ip = os_conn.get_nova_instance_ips(instance1)["fixed"]

    network_checks.check_ping_from_vm(env, os_conn, instance2, vm_keypair=keypair, ip_to_ping=new_ip)
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Update default security group
            2. Create networks net01: net01__subnet, 192.168.1.0/24
            3. Launch vm1 and vm2 in net01 network on a single compute compute
            4. Go to vm1 console and send pings to vm2
        """
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        host = zone.hosts.keys()[0]

        self.setup_rules_for_default_sec_group()

        # create 1 network and 2 instances
        net, subnet = self.create_internal_network_with_subnet()

        self.os_conn.create_server(
            name='server01',
            availability_zone='{}:{}'.format(zone.zoneName, host),
            key_name=self.instance_keypair.name,
            nics=[{'net-id': net['network']['id']}],
            max_count=2,
            wait_for_avaliable=False,
            wait_for_active=False)

        self.server1 = self.os_conn.nova.servers.find(name="server01-1")
        server2 = self.os_conn.nova.servers.find(name="server01-2")
        servers = [self.server1, server2]
        self.os_conn.wait_servers_active(servers)
        self.os_conn.wait_servers_ssh_ready(servers)

        # check pings
        self.server2_ip = self.os_conn.get_nova_instance_ips(server2)['fixed']

        network_checks.check_ping_from_vm(env=self.env,
                                          os_conn=self.os_conn,
                                          vm=self.server1,
                                          vm_keypair=self.instance_keypair,
                                          ip_to_ping=self.server2_ip,
                                          timeout=3 * 60,
                                          vm_login='******',
                                          vm_password='******')

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [
            agt['id'] for agt in
            self.os_conn.neutron.list_agents(
                binary='neutron-openvswitch-agent')['agents']]
        # make a list of ovs agents that resides only on controllers
        controllers = [node.data['fqdn']
                       for node in self.env.get_nodes_by_role('controller')]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']
        self.ovs_conroller_agents = [agt['id'] for agt in ovs_agts
                                     if agt['host'] in controllers]
示例#10
0
    def test_ovs_restart_pcs_disable_enable(self, count):
        """Restart openvswitch-agents with pcs disable/enable on controllers

        Steps:
            1. Update default security group
            2. Create router01, create networks net01: net01__subnet,
                192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
                attach them to router01.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes
            4. Go to vm1 console and send pings to vm2
            5. Disable ovs-agents on a controller, restart service
                neutron-plugin-openvswitch-agent on all computes, and enable
                them back. To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and check that
                it is successful.
            7. Repeat steps 6-7 'count' argument times

        Duration 10m

        """
        self._prepare_openstack()
        for _ in range(count):
            # Check that all ovs agents are alive
            self.os_conn.wait_agents_alive(self.ovs_agent_ids)

            # Disable ovs agent on a controller
            self.disable_ovs_agents_on_controller()

            # Then check that all ovs went down
            self.os_conn.wait_agents_down(self.ovs_conroller_agents)

            # Restart ovs agent service on all computes
            self.restart_ovs_agents_on_computes()

            # Enable ovs agent on a controller
            self.enable_ovs_agents_on_controllers()

            # Then check that all ovs agents are alive
            self.os_conn.wait_agents_alive(self.ovs_agent_ids)

            # sleep is used to check that system will be stable for some time
            # after restarting service
            time.sleep(30)

            network_checks.check_ping_from_vm(self.env,
                                              self.os_conn,
                                              self.server1,
                                              self.instance_keypair,
                                              self.server2_ip,
                                              timeout=3 * 60)

            # check all agents are alive
            assert all([
                agt['alive']
                for agt in self.os_conn.neutron.list_agents()['agents']
            ])
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Update default security group
            2. Create router01, create networks net01: net01__subnet,
                192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
                attach them to router01.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes
            4. Go to vm1 console and send pings to vm2
        """
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        vm_hosts = zone.hosts.keys()[:2]

        self.setup_rules_for_default_sec_group()

        # create router
        router = self.os_conn.create_router(name="router01")

        # create 2 networks and 2 instances
        for i, hostname in enumerate(vm_hosts, 1):
            net, subnet = self.create_internal_network_with_subnet(suffix=i)
            self.os_conn.router_interface_add(
                router_id=router['router']['id'],
                subnet_id=subnet['subnet']['id'])
            self.os_conn.create_server(
                name='server%02d' % i,
                availability_zone='{}:{}'.format(zone.zoneName, hostname),
                key_name=self.instance_keypair.name,
                nics=[{'net-id': net['network']['id']}])

        # check pings
        self.server1 = self.os_conn.nova.servers.find(name="server01")
        self.server2_ip = self.os_conn.get_nova_instance_ips(
            self.os_conn.nova.servers.find(name="server02")
        ).values()[0]

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, self.server1, self.instance_keypair,
            self.server2_ip, timeout=3 * 60)

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [
            agt['id'] for agt in
            self.os_conn.neutron.list_agents(
                binary='neutron-openvswitch-agent')['agents']]
        # make a list of ovs agents that resides only on controllers
        controllers = [node.data['fqdn']
                       for node in self.env.get_nodes_by_role('controller')]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']
        self.ovs_conroller_agents = [agt['id'] for agt in ovs_agts
                                     if agt['host'] in controllers]
示例#12
0
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Update default security group
            2. Create networks net01: net01__subnet, 192.168.1.0/24
            3. Launch vm1 and vm2 in net01 network on a single compute compute
            4. Go to vm1 console and send pings to vm2
        """
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        host = zone.hosts.keys()[0]

        self.setup_rules_for_default_sec_group()

        # create 1 network and 2 instances
        net, subnet = self.create_internal_network_with_subnet()

        self.os_conn.create_server(name='server01',
                                   availability_zone='{}:{}'.format(
                                       zone.zoneName, host),
                                   key_name=self.instance_keypair.name,
                                   nics=[{
                                       'net-id': net['network']['id']
                                   }],
                                   max_count=2)

        # check pings
        self.server1 = self.os_conn.nova.servers.find(name="server01-1")
        self.server2_ip = self.os_conn.get_nova_instance_ips(
            self.os_conn.nova.servers.find(name="server01-2")).values()[0]

        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          self.server1,
                                          self.instance_keypair,
                                          self.server2_ip,
                                          timeout=3 * 60)

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-openvswitch-agent')['agents']
        ]
        # make a list of ovs agents that resides only on controllers
        controllers = [
            node.data['fqdn']
            for node in self.env.get_nodes_by_role('controller')
        ]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']
        self.ovs_conroller_agents = [
            agt['id'] for agt in ovs_agts if agt['host'] in controllers
        ]
    def test_restore_deleted_instance(
            self, set_recl_inst_interv, instances, volumes):
        """Restore previously deleted instance.
        Actions:
        1. Update '/etc/nova/nova.conf' with 'reclaim_instance_interval=86400'
        and restart Nova on all nodes;
        2. Create net and subnet;
        3. Create and run two instances (vm1, vm2) inside same net;
        4. Check that ping are successful between vms;
        5. Create a volume and attach it to an instance vm1;
        6. Delete instance vm1 and check that it's in 'SOFT_DELETE' state;
        7. Restore vm1 instance and check that it's in 'ACTIVE' state;
        8. Check that ping are successful between vms;
        """
        timeout = 60  # (sec) timeout to wait instance for status change

        # Create two vms
        vm1, vm2 = instances

        # Ping one vm from another
        vm1_ip = self.os_conn.get_nova_instance_ips(vm1).values()[0]
        vm2_ip = self.os_conn.get_nova_instance_ips(vm2).values()[0]
        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm1, ip_to_ping=vm2_ip, timeout=60)

        # Create a volume and attach it to an instance vm1
        volume = common_functions.create_volume(
            self.os_conn.cinder, image_id=None)
        self.os_conn.nova.volumes.create_server_volume(
            server_id=vm1.id, volume_id=volume.id, device='/dev/vdb')
        volumes.append(volume)

        # Delete instance vm1 and check that it's in "SOFT_DELETED" state
        common_functions.delete_instance(self.os_conn.nova, vm1.id)
        assert vm1 not in self.os_conn.get_servers()
        common_functions.wait(
            lambda: self.os_conn.server_status_is(vm1, 'SOFT_DELETED'),
            timeout_seconds=timeout, sleep_seconds=5,
            waiting_for='instance {0} changes status to SOFT_DELETED'.format(
                vm1.name))

        # Restore vm1 instance and check that it's in "ACTIVE" state now
        resp = self.os_conn.nova.servers.restore(vm1.id)
        assert resp[0].ok
        common_functions.wait(
            lambda: self.os_conn.is_server_active(vm1.id),
            timeout_seconds=timeout, sleep_seconds=5,
            waiting_for='instance {0} changes status to ACTIVE'.format(
                vm1.name))

        # Ping one vm from another
        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm2, ip_to_ping=vm1_ip, timeout=60)
示例#14
0
    def test_ovs_restart_pcs_vms_on_single_compute_in_single_network(self):
        """Check connectivity for instances scheduled on a single compute in
         a single private network

        Steps:
            1. Update default security group
            2. Create networks net01: net01__subnet, 192.168.1.0/24
            3. Launch vm1 and vm2 in net01 network on a single compute compute
            4. Go to vm1 console and send pings to vm2
            5. Disable ovs-agents on all controllers, restart service
                neutron-plugin-openvswitch-agent on all computes, and enable
                them back. To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and check that
                it is successful.

        Duration 10m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Disable ovs agent on all controllers
        self.disable_ovs_agents_on_controller()

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        self.restart_ovs_agents_on_computes()

        # Enable ovs agent on all controllers
        self.enable_ovs_agents_on_controllers()

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          self.server1,
                                          self.instance_keypair,
                                          self.server2_ip,
                                          timeout=3 * 60)

        # check all agents are alive
        assert all([
            agt['alive']
            for agt in self.os_conn.neutron.list_agents()['agents']
        ])
示例#15
0
    def test_ovs_restart_pcs_ban_clear(self):
        """Restart openvswitch-agents with pcs ban/clear on controllers

        Steps:
            1. Update default security group
            2. Create router01, create networks.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes.
            4. Go to vm1 console and send pings to vm2
            5. Ban ovs-agents on all controllers, clear them and restart
                service neutron-plugin-openvswitch-agent on all computes.
                To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and
                check that it is successful.

        Duration 10m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Ban ovs agents on all controllers
        self.ban_ovs_agents_controllers()

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Cleat ovs agent on all controllers
        self.clear_ovs_agents_controllers()

        # Restart ovs agent service on all computes
        self.restart_ovs_agents_on_computes()

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          self.server1,
                                          self.instance_keypair,
                                          self.server2_ip,
                                          timeout=3 * 60)

        # check all agents are alive
        assert all([
            agt['alive']
            for agt in self.os_conn.neutron.list_agents()['agents']
        ])
    def test_ovs_restart_pcs_disable_enable(self, count):
        """Restart openvswitch-agents with pcs disable/enable on controllers

        Steps:
            1. Update default security group
            2. Create router01, create networks net01: net01__subnet,
                192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
                attach them to router01.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes
            4. Go to vm1 console and send pings to vm2
            5. Disable ovs-agents on a controller, restart service
                neutron-plugin-openvswitch-agent on all computes, and enable
                them back. To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and check that
                it is successful.
            7. Repeat steps 6-7 'count' argument times

        Duration 10m

        """
        self._prepare_openstack()
        for _ in range(count):
            # Check that all ovs agents are alive
            self.os_conn.wait_agents_alive(self.ovs_agent_ids)

            # Disable ovs agent on a controller
            common.disable_ovs_agents_on_controller(self.env)

            # Then check that all ovs went down
            self.os_conn.wait_agents_down(self.ovs_conroller_agents)

            # Restart ovs agent service on all computes
            common.restart_ovs_agents_on_computes(self.env)

            # Enable ovs agent on a controller
            common.enable_ovs_agents_on_controllers(self.env)

            # Then check that all ovs agents are alive
            self.os_conn.wait_agents_alive(self.ovs_agent_ids)

            # sleep is used to check that system will be stable for some time
            # after restarting service
            time.sleep(30)

            network_checks.check_ping_from_vm(
                self.env, self.os_conn, self.server1, self.instance_keypair,
                self.server2_ip, timeout=10 * 60)

            # check all agents are alive
            assert all([agt['alive'] for agt in
                        self.os_conn.neutron.list_agents()['agents']])
    def test_destroy_non_primary_controller(self, router,
                                            prepare_openstack, devops_env):
        """Reset primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Destroy primary controller
            9. Wait time while env is unstable
            10. Check ping
            11. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state

        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        controller = self.env.non_primary_controllers[0]
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        # Reschedule active l3 agent to the non primary if needed
        self.reschedule_active_l3_agt(router_id, controller,
                                      l3_agent_controller)

        logger.info("Destroy non primary controller {}".format(
            controller.data['fqdn']))
        devops_node = devops_env.get_node_by_fuel_node(controller)
        self.env.destroy_nodes([devops_node])

        # To ensure that the l3 agt is moved from the affected controller
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=controller.data['fqdn'],
                                     timeout_seconds=5 * 60)
        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm=server1,
            vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)

        self.check_l3_ha_agent_states(router_id)
    def test_ovs_restart_pcs_ban_clear(self):
        """Restart openvswitch-agents with pcs ban/clear on controllers

        Steps:
            1. Update default security group
            2. Create router01, create networks.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes.
            4. Go to vm1 console and send pings to vm2
            5. Ban ovs-agents on all controllers, clear them and restart
                service neutron-plugin-openvswitch-agent on all computes.
                To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and
                check that it is successful.

        Duration 10m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Ban ovs agents on all controllers
        common.ban_ovs_agents_controllers(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Cleat ovs agent on all controllers
        common.clear_ovs_agents_controllers(self.env)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, self.server1, self.instance_keypair,
            self.server2_ip, timeout=3 * 60)

        # check all agents are alive
        assert all([agt['alive'] for agt in
                    self.os_conn.neutron.list_agents()['agents']])
    def test_ovs_restart_pcs_vms_on_single_compute_in_single_network(self):
        """Check connectivity for instances scheduled on a single compute in
         a single private network

        Steps:
            1. Update default security group
            2. Create networks net01: net01__subnet, 192.168.1.0/24
            3. Launch vm1 and vm2 in net01 network on a single compute compute
            4. Go to vm1 console and send pings to vm2
            5. Disable ovs-agents on all controllers, restart service
                neutron-plugin-openvswitch-agent on all computes, and enable
                them back. To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and check that
                it is successful.

        Duration 10m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Disable ovs agent on all controllers
        common.disable_ovs_agents_on_controller(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Enable ovs agent on all controllers
        common.enable_ovs_agents_on_controllers(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, self.server1, self.instance_keypair,
            self.server2_ip, timeout=3 * 60)

        # check all agents are alive
        assert all([agt['alive'] for agt in
                    self.os_conn.neutron.list_agents()['agents']])
示例#20
0
 def check_no_ping_from_vm(self,
                           vm,
                           vm_keypair=None,
                           ip_to_ping=None,
                           timeout=None,
                           vm_login='******',
                           vm_password='******'):
     logger.info('Expecting that ping from VM should fail')
     # Get ping results
     with pytest.raises(AssertionError):
         network_checks.check_ping_from_vm(env=self.env,
                                           os_conn=self.os_conn,
                                           vm=vm,
                                           vm_keypair=vm_keypair,
                                           ip_to_ping=ip_to_ping,
                                           timeout=timeout)
示例#21
0
    def check_vm_connectivity_ubuntu(
            self, env, os_conn, keypair, vms, inactive_ips=()):
        vm_ips = {}
        for vm in vms:
            ips = [ip for ip in self.get_instance_ips(os_conn, vm) if
                   ip not in inactive_ips]
            vm_ips[vm] = ips
        for vm in vms:
            ips = ['8.8.8.8']
            for vm_1 in vms:
                if vm != vm_1:
                    ips.extend(vm_ips[vm_1])

            network_checks.check_ping_from_vm(
                env, os_conn, vm, vm_keypair=keypair, ip_to_ping=ips,
                vm_login='******', vm_password='******', vm_ip=vm_ips[vm][0])
def test_live_evacuate_instances(instances, os_conn, env, keypair,
                                 nova_client):
    """Live evacuate all instances of the specified host to other available
    hosts without shared storage

    Scenario:
        1. Create net01, net01__subnet
        2. Boot instances vm1 and vm2 in net01 on compute node1
        3. Run the 'nova host-evacuate-live' command to live-migrate
            vm1 and vm2 instances from compute node1 to compute node2:
            nova host-evacuate-live --target-host node-2.domain.tld \
            --block-migrate node-1.domain.tld
        4. Check that all live-migrated instances are hosted on target host
            and are in ACTIVE status
        5. Check pings between vm1 and vm2
    """
    old_host = getattr(instances[0], 'OS-EXT-SRV-ATTR:host')
    new_host = [x.hypervisor_hostname
                for x in os_conn.nova.hypervisors.list()
                if x.hypervisor_hostname != old_host][0]

    nova_client(
        'host-evacuate-live',
        params='--target-host {new_host} --block-migrate {old_host}'.format(
            old_host=old_host,
            new_host=new_host))

    common.wait(lambda: all([os_conn.is_server_active(x) for x in instances]),
                timeout_seconds=2 * 60,
                waiting_for='instances became to ACTIVE status')

    for instance in instances:
        instance.get()
        assert getattr(instance, 'OS-EXT-SRV-ATTR:host') == new_host

    for instance1, instance2 in zip(instances, instances[::-1]):
        ip = os_conn.get_nova_instance_ips(instance2)['fixed']
        network_checks.check_ping_from_vm(env,
                                          os_conn,
                                          instance1,
                                          vm_keypair=keypair,
                                          ip_to_ping=ip)
def test_migrate_instances(instances, os_conn, env, keypair, nova_client):
    """Migrate all instances of the specified host to other available hosts

    Scenario:
        1. Create net01, net01__subnet
        2. Boot instances vm1, vm2 and vm3 in net01 on compute node1
        3. Run the 'nova host-servers-migrate <compute node1>' command
        4. Check that every instance is rescheduled to other computes
        5. Check that the status of every rescheduled instance is VERIFY_RESIZE
        6. Confirm resize for every instance:
            nova resize-confirm vm1 (vm2, vm3)
        7. Check that every migrated instance has an ACTIVE status now
        8. Send pings between vm1, vm2 and vm3 to check network connectivity
    """
    old_host = getattr(instances[0], 'OS-EXT-SRV-ATTR:host')
    nova_client('host-servers-migrate', params=old_host)

    common.wait(lambda: all([os_conn.server_status_is(x, 'VERIFY_RESIZE')
                             for x in instances]),
                timeout_seconds=2 * 60,
                waiting_for='instances became to VERIFY_RESIZE status')

    for instance in instances:
        instance.get()
        assert getattr(instance, 'OS-EXT-SRV-ATTR:host') != old_host

    for instance in instances:
        instance.confirm_resize()

    common.wait(lambda: all([os_conn.is_server_active(x) for x in instances]),
                timeout_seconds=2 * 60,
                waiting_for='instances became to ACTIVE status')

    for instance in instances:
        ips = [os_conn.get_nova_instance_ips(x)['fixed']
               for x in instances if x != instance]
        network_checks.check_ping_from_vm(env,
                                          os_conn,
                                          instance,
                                          vm_keypair=keypair,
                                          ip_to_ping=ips)
 def test_vms_connectivity_sriov_numa(self, env, os_conn, sriov_hosts,
                                      aggregate, vf_ports, flavors,
                                      ubuntu_image_id, keypair):
     """This test checks vms connectivity with all features
         Steps:
         1. Create net1 with subnet, router1 with interface to net1
         2. Create vm1 on vf port with m1.small.performance on 1 NUMA-node
         3. Check that vm is on one numa-node
         4. Check Ping 8.8.8.8 from vm1
     """
     hosts = list(set(sriov_hosts) & set(aggregate.hosts))
     if len(hosts) < 1:
         pytest.skip(
             "At least one host is required with SR-IOV and 2 numa nodes")
     vm = self.create_vm(os_conn, hosts[0], flavors[0], keypair,
                         vf_ports[0], ubuntu_image_id)
     cpus = get_cpu_distribition_per_numa_node(env)
     self.check_cpu_for_vm(os_conn, vm, 1, cpus[hosts[0]])
     network_checks.check_ping_from_vm(env, os_conn, vm,
                                       vm_keypair=keypair,
                                       vm_login='******')
    def test_vms_with_custom_threading_policy(self, env, os_conn,
                                              hosts_hyper_threading,
                                              flavors, networks, keypair,
                                              security_group, policy,
                                              expected_count):
        """This test checks vcpu allocation for vms with different values of
        flavor cpu_thread_policy

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vm
            4. Check that both cpu are on the different cores in case of
            cpu_thread_policy = isolate and on the same core in case of prefer
            or require
            5. Check ping 8.8.8.8 from vm
        """

        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]

        flavors[0].set_keys({'hw:cpu_thread_policy': policy})
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   key_name=keypair.name,
                                   nics=[{'net-id': networks[0]}],
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host),
                                   wait_for_avaliable=False)
        self.check_cpu_for_vm(os_conn, vm, 1, cpus)

        used_ts = self.get_vm_thread_siblings_lists(os_conn, vm)
        assert len(used_ts) == expected_count, (
            "Unexpected count of used cores. It should be {0} for '{1}' "
            "threading policy, but actual it's {2}").format(
            expected_count, policy, len(used_ts))

        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
    def test_vms_page_size_2mb_and_1gb_available(self, env, os_conn, networks,
                                                 computes_with_mixed_hp,
                                                 flavors, security_group,
                                                 keypair, page_size,
                                                 allowed_sizes, cleanup):
        """This test checks vms with any/large hw:mem_page_size when both 2Mb
        and 1Gb huge pages are available

            Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Check that both 2Mb and 1Gb huge pages are available
            3. Boot vm and check page size: should be 1Gb in case of 'large'
            and any (1Gb or 2Mb) for 'any' mem_page_size
        """
        host = computes_with_mixed_hp[0]
        flavors[0].set_keys({'hw:mem_page_size': page_size})
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   nics=[{'net-id': networks[0]}],
                                   key_name=keypair.name,
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host))
        assert self.get_instance_page_size(os_conn, vm) in allowed_sizes
        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
def check_vm_connectivity_cirros_ubuntu(env, os_conn, keypair, cirros, ubuntu):
    """This method checks vms connectivity for mixed vms (cirros and ubuntu).
    To check: ping from cirros to ubuntu and 8.8.8.8 and ping from ubuntu to
    cirros and 8.8.8.8
    """
    ips = {cirros: os_conn.get_nova_instance_ips(cirros)['fixed'],
           ubuntu: os_conn.get_nova_instance_ips(ubuntu)['fixed']}
    network_checks.check_ping_from_vm(env, os_conn, cirros, timeout=None,
                                      ip_to_ping=ips[ubuntu])
    network_checks.check_ping_from_vm(env, os_conn, cirros, timeout=None)
    network_checks.check_ping_from_vm(env, os_conn, ubuntu,
                                      vm_keypair=keypair, timeout=None,
                                      ip_to_ping=ips[cirros],
                                      vm_login='******')
    network_checks.check_ping_from_vm(env, os_conn, ubuntu, timeout=None,
                                      vm_keypair=keypair,
                                      vm_login='******')
    def test_cpu_and_memory_distribution(self, env, os_conn, networks, flavors,
                                         security_group, aggregate, keypair):
        """This test checks distribution of cpu for vm with cpu pinning
        Steps:
            1. Create flavor with custom numa_cpu and numa_mem distribution
            2. Create net1 with subnet, net2 with subnet and router1 with
                interfaces to both nets
            3. Launch vm using created flavor
            4. Check memory allocation per numa node
            5. Check CPU allocation
            6. Ping 8.8.8.8 from vm1
        """

        host = aggregate.hosts[0]
        numa_count = 2
        cpus = get_cpu_distribition_per_numa_node(env)

        flavors[0].set_keys({'hw:numa_nodes': numa_count,
                             'hw:numa_cpus.0': self.cpu_numa0,
                             'hw:numa_cpus.1': self.cpu_numa1,
                             'hw:numa_mem.0': self.mem_numa0,
                             'hw:numa_mem.1': self.mem_numa1})

        exp_mem = {'0': self.mem_numa0, '1': self.mem_numa1}
        exp_pin = {'numa0': [int(cpu) for cpu in self.cpu_numa0.split(',')],
                   'numa1': [int(cpu) for cpu in self.cpu_numa1.split(',')]}
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   nics=[{'net-id': networks[0]}],
                                   key_name=keypair.name,
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host))

        self.check_cpu_for_vm(os_conn, vm, numa_count, cpus[host], exp_pin)
        act_mem = self.get_memory_allocation_per_numa(os_conn, vm, numa_count)
        assert act_mem == exp_mem, "Actual memory allocation is not OK"
        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
示例#29
0
    def test_ban_all_l3_agents_and_clear_them(self, router, prepare_openstack):
        """Disable all l3 agents and enable them

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Disable all neutron-l3-agent
            7. Wait until all agents died
            8. Enable all neutron-l3-agent
            9. Wait until all agents alive
            10. Check ping vm2 from vm1 by floating ip
        """
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        agents = self.os_conn.get_l3_for_router(router['router']['id'])
        agent_ids = [x['id'] for x in agents['agents']]
        controller = self.env.get_nodes_by_role('controller')[0]
        with controller.ssh() as remote:
            logger.info('disable all l3 agents')
            remote.check_call('pcs resource disable neutron-l3-agent')
            self.os_conn.wait_agents_down(agent_ids)
            logger.info('enable all l3 agents')
            remote.check_call('pcs resource enable neutron-l3-agent')
            self.os_conn.wait_agents_alive(agent_ids)

        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          vm=server1,
                                          vm_keypair=self.instance_keypair,
                                          ip_to_ping=server2_ip)
    def test_restart_all_neutron_services(self, env, os_conn):
        """Check that neutron services works fine after restart

        Scenario:
            1. Boot "vm1" in exists internal net
            2. Vefiry ping public IP from "vm1"
            3. Restart all neutron services on all nodes
            4. Boot "vm2" in same net as "vm1"
            5. Verify pings betwheen vms and to public ip
            6. Create new network "net01" and router between "net01" and
                external net
            7. Boot "vm3" on "net01"
            8. Vefiry ping public IP from "vm3"
            9. Delete all created servers
            10. Delete created router and network
        """

        security_group = os_conn.create_sec_group_for_ssh()
        int_net = os_conn.int_networks[0]
        vm1 = os_conn.create_server('vm1',
                                    nics=[{'net-id': int_net['id']}],
                                    security_groups=[security_group.name])

        network_checks.check_ping_from_vm(env, os_conn, vm1)

        controllers = env.get_nodes_by_role('controller')
        computes = env.get_nodes_by_role('compute')

        # Restart pacemaker resources
        neutron_psc_ids = []
        with controllers[0].ssh() as remote:
            # Get pcs resources info
            result = remote.check_call("crm_mon -1 -X", verbose=False)
            root = ElementTree.fromstring(result.stdout_string)
            for node in root.findall('.//clone'):
                clone_id = node.attrib.get('id', '')
                if 'neutron' in clone_id:
                    neutron_psc_ids.append(clone_id)
                    # Restart pcs resource
                    remote.check_call('pcs resource restart '
                                      '{0}'.format(clone_id))

        # Restart all upstart services
        for node in controllers + computes:
            with node.ssh() as remote:
                result = remote.check_call("initctl list | grep running | "
                                           "grep neutron | awk '{ print $1 }'")
                for service in result.stdout_string.splitlines():
                    if len(get_close_matches(service, neutron_psc_ids)) == 0:
                        remote.check_call('restart {}'.format(service))

        wait(os_conn.neutron.list_networks,
             expected_exceptions=ServiceUnavailable,
             timeout_seconds=60,
             sleep_seconds=5,
             waiting_for='neutron services to be up')

        vm2 = os_conn.create_server('vm2',
                                    nics=[{'net-id': int_net['id']}],
                                    security_groups=[security_group.name])

        network_checks.check_vm_connectivity(env, os_conn)

        net01, subnet01 = self.create_internal_network_with_subnet(suffix=1)
        router = self.create_router_between_nets(os_conn.ext_network, subnet01,
                                                 suffix=1)

        vm3 = os_conn.create_server('vm3',
                                    nics=[{'net-id': net01['network']['id']}],
                                    security_groups=[security_group.name])

        network_checks.check_ping_from_vm(env, os_conn, vm3)

        for vm in vm1, vm2, vm3:
            vm.delete()
        os_conn.wait_servers_deleted([vm1, vm2, vm3])

        os_conn.delete_router(router['router']['id'])

        os_conn.delete_network(net01['network']['id'])
示例#31
0
    def test_vni_for_icmp_between_instances(self, router):
        """Check VNI and segmentation_id for icmp traffic between instances
        on different computers

        Scenario:
            1. Create private network net01, subnet 10.1.1.0/24
            2. Create private network net02, subnet 10.1.2.0/24
            3. Create router01_02 and connect net01 and net02 with it
            4. Boot instances vm1 and vm2 on different computers
            5. Check that net02 got a new segmentation_id, different from net1
            6. Ping vm1 from vm2
            7. On compute with vm_1 start listen vxlan port 4789
            8. On compute with vm_2 start listen vxlan port 4789
            9. Ping vm2 from vm1
            10. Check that when traffic goes through net02 tunnel
                (from vm2 to router01_02) all packets have VNI of net02
                and when they travel through net01 tunnel
                (from router to vm1) they have VNI of net01
        """
        # Create network and instance
        compute_nodes = self.zone.hosts.keys()[:2]
        for i, compute_node in enumerate(compute_nodes, 1):
            network = self.os_conn.create_network(name='net%02d' % i)
            subnet = self.os_conn.create_subnet(
                network_id=network['network']['id'],
                name='net%02d__subnet' % i,
                cidr="10.1.%d.0/24" % i)
            self.os_conn.router_interface_add(router_id=router['router']['id'],
                                              subnet_id=subnet['subnet']['id'])
            self.os_conn.create_server(
                name='server%02d' % i,
                availability_zone='{}:{}'.format(self.zone.zoneName,
                                                 compute_node),
                key_name=self.instance_keypair.name,
                nics=[{
                    'net-id': network['network']['id']
                }],
                security_groups=[self.security_group.id])

        net1, net2 = [
            x for x in self.os_conn.list_networks()['networks']
            if x['name'] in ("net01", "net02")
        ]

        # Check that networks has different segmentation_id
        assert (net1['provider:segmentation_id'] !=
                net2['provider:segmentation_id'])

        # Check ping from server1 to server2
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2).values()[0]
        network_checks.check_ping_from_vm(self.env, self.os_conn, server1,
                                          self.instance_keypair, server2_ip)

        # Start tcpdump
        compute1 = self.env.find_node_by_fqdn(compute_nodes[0])
        compute2 = self.env.find_node_by_fqdn(compute_nodes[1])
        log_file1 = gen_temp_file(prefix='vxlan', suffix='.log')
        log_file2 = gen_temp_file(prefix='vxlan', suffix='.log')
        with tcpdump_vxlan(ip=compute1.data['ip'],
                           env=self.env,
                           log_path=log_file1.name), tcpdump_vxlan(
                               ip=compute2.data['ip'],
                               env=self.env,
                               log_path=log_file2.name):
            # Ping server1 from server2
            server1_ip = self.os_conn.get_nova_instance_ips(
                server1).values()[0]
            network_checks.check_ping_from_vm(self.env, self.os_conn, server2,
                                              self.instance_keypair,
                                              server1_ip)

        # Check traffic
        check_all_traffic_has_vni(net1['provider:segmentation_id'],
                                  log_file1.name)
        check_all_traffic_has_vni(net2['provider:segmentation_id'],
                                  log_file2.name)
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Update default security group
            2. Upload the image with pre-installed iperf
            3. Create router01, create networks net01: net01__subnet,
            192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
            attach them to router01.
            4. Create keypair
            5. Launch vm1 in net01 network and vm2 in net02 network
            on different computes
            6. Go to vm1 console and send pings to vm2
        """

        self.setup_rules_for_default_sec_group()
        vm_image = self.create_image()

        self.instance_keypair = self.os_conn.create_key(
            key_name='instancekey')
        zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        hosts = zone.hosts.keys()[:2]

        # create router
        router = self.os_conn.create_router(name="router01")

        ext_network = self.os_conn.ext_network

        self.os_conn.router_gateway_add(router_id=router['router']['id'],
                                        network_id=ext_network['id'])

        ready_marker = 'ironic_iperf_boot_complete'

        userdata = '\n'.join([
            '#!/bin/bash -v',
            'apt-get install -yq iperf',
            'iperf -u -s -p 5002 <&- > /tmp/iperf_udp.log 2>&1 &',
            'echo "{marker}"',
        ]).format(marker=ready_marker)

        # create 2 networks and 2 instances
        instances = []
        for i, hostname in enumerate(hosts, 1):
            net, subnet = self.create_internal_network_with_subnet(suffix=i)
            self.os_conn.router_interface_add(
                router_id=router['router']['id'],
                subnet_id=subnet['subnet']['id'])
            instance = self.os_conn.create_server(
                name='server%02d' % i,
                availability_zone='{}:{}'.format(zone.zoneName, hostname),
                image_id=vm_image.id,
                userdata=userdata,
                flavor=2,
                timeout=60 * 10,
                key_name=self.instance_keypair.name,
                nics=[{'net-id': net['network']['id']}],
                wait_for_active=False,
                wait_for_avaliable=False)
            instances.append(instance)

        self.os_conn.wait_servers_active(instances)

        self.os_conn.wait_marker_in_servers_log(instances, marker=ready_marker)

        # check pings
        self.server1 = self.os_conn.nova.servers.find(name="server01")
        self.server2 = self.os_conn.nova.servers.find(name="server02")
        self.server2_ip = self.os_conn.get_nova_instance_ips(
            self.os_conn.nova.servers.find(name="server02")).values()[0]
        network_checks.check_ping_from_vm(
            self.env, self.os_conn, self.server1, self.instance_keypair,
            self.server2_ip, vm_login='******', vm_password='******',
            timeout=4 * 60)

        # make a list of ovs agents that resides only on controllers
        controllers = [node.data['fqdn']
                       for node in self.env.get_nodes_by_role('controller')]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [agt['id'] for agt in ovs_agts]
        self.ovs_conroller_agents = [agt['id'] for agt in ovs_agts
                                     if agt['host'] in controllers]
 def ping_public_ip_from_vms(self, vms, key, username='******'):
     for vm in vms:
         network_checks.check_ping_from_vm(
             self.env, self.os_conn, vm, key, vm_login=username)
    def test_vms_with_custom_cpu_thread_policy_less_resources(
            self, env, os_conn, hosts_hyper_threading, flavors, networks,
            keypair, security_group, policy):
        """This test checks vms with cpu_thread_policy prefer/require parameter
         with less resources

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vms to have no ability to create vm on one core
                Steps are:
                1) boot M + N - 1 vms with flavor_require
                    N = count of thread siblings list is on numa0
                    M = count of thread siblings list is on numa0
                    As result 1 core is free
                2) create 2 vms with 1 vcpu and 'prefer' policy
                3) delete 1 vm with 2 vcpu from step 1 or 2
                4) create 1 vm with 1 vcpu and 'prefer' policy
                5) delete 1 vm with 1 vpcu from step 3
            4. Boot vm with cpu pinning flavor with required cpu_thread_policy
            5. For 'require' policy check that vm is in error state, for
            policy 'prefer' vm should be active and available
        """

        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        zone = 'nova:{}'.format(host)
        flv_prefer, _, flv_require, flv_prefer_1_vcpu = flavors

        numa_count = len(cpus.keys())
        ts_lists = [list(set(self.get_thread_siblings_lists(os_conn, host, i)))
                    for i in range(numa_count)]
        if ts_lists[0] <= 1 and ts_lists[1] <= 1:
            pytest.skip("Configuration is NOK since instance should be on the "
                        "one numa node and use cpus from the different cores")

        def create_server_with_flavor(prefix, flavor):
            return os_conn.create_server(
                name='vm{0}_{1}'.format(prefix, flavor.name),
                flavor=flavor.id,
                key_name=keypair.name,
                nics=[{'net-id': networks[0]}],
                security_groups=[security_group.id],
                wait_for_avaliable=False,
                availability_zone=zone)

        # Boot vms to have no ability to create vm on one core
        for i in range(len(ts_lists[0]) + len(ts_lists[1]) - 1):
            vm_2_vcpu = create_server_with_flavor(prefix=i, flavor=flv_require)

        for i in range(2):
            vm_1_vcpu = create_server_with_flavor(prefix="{0}_vcpu1".format(i),
                                                  flavor=flv_prefer_1_vcpu)
        vm_2_vcpu.delete()
        os_conn.wait_servers_deleted([vm_2_vcpu])
        create_server_with_flavor(prefix="_vcpu1_prefer",
                                  flavor=flv_prefer_1_vcpu)
        vm_1_vcpu.delete()
        os_conn.wait_servers_deleted([vm_1_vcpu])

        # Boot vm with cpu pinning flavor with required cpu_thread_policy
        if policy == 'prefer':
            vm = os_conn.create_server(name='vm_{0}'.format(flv_prefer.name),
                                       flavor=flv_prefer.id,
                                       key_name=keypair.name,
                                       nics=[{'net-id': networks[0]}],
                                       security_groups=[security_group.id],
                                       wait_for_avaliable=False,
                                       availability_zone=zone)
            os_conn.wait_servers_ssh_ready(os_conn.get_servers())
            network_checks.check_ping_from_vm(env, os_conn, vm,
                                              vm_keypair=keypair)
        else:
            with pytest.raises(InstanceError) as e:
                os_conn.create_server(name='vm', flavor=flv_require.id,
                                      nics=[{'net-id': networks[0]}],
                                      key_name=keypair.name,
                                      security_groups=[security_group.id],
                                      availability_zone='nova:{}'.format(host),
                                      wait_for_avaliable=False)
            expected_message = ("Insufficient compute resources: "
                                "Requested instance NUMA topology cannot fit "
                                "the given host NUMA topology")
            logger.info("Instance status is error:\n{0}".format(str(e.value)))
            assert expected_message in str(e.value), (
                "Unexpected reason of instance error")
    def test_vni_for_icmp_between_instances(self, router):
        """Check VNI and segmentation_id for icmp traffic between instances
        on different computers

        Scenario:
            1. Create private network net01, subnet 10.1.1.0/24
            2. Create private network net02, subnet 10.1.2.0/24
            3. Create router01_02 and connect net01 and net02 with it
            4. Boot instances vm1 and vm2 on different computers
            5. Check that net02 got a new segmentation_id, different from net1
            6. Ping vm1 from vm2
            7. On compute with vm_1 start listen vxlan port 4789
            8. On compute with vm_2 start listen vxlan port 4789
            9. Ping vm2 from vm1
            10. Check that when traffic goes through net02 tunnel
                (from vm2 to router01_02) all packets have VNI of net02
                and when they travel through net01 tunnel
                (from router to vm1) they have VNI of net01
        """
        # Create network and instance
        compute_nodes = self.zone.hosts.keys()[:2]
        for i, compute_node in enumerate(compute_nodes, 1):
            network = self.os_conn.create_network(name='net%02d' % i)
            subnet = self.os_conn.create_subnet(
                network_id=network['network']['id'],
                name='net%02d__subnet' % i,
                cidr="10.1.%d.0/24" % i)
            self.os_conn.router_interface_add(
                router_id=router['router']['id'],
                subnet_id=subnet['subnet']['id'])
            self.os_conn.create_server(
                name='server%02d' % i,
                availability_zone='{}:{}'.format(self.zone.zoneName,
                                                 compute_node),
                key_name=self.instance_keypair.name,
                nics=[{'net-id': network['network']['id']}],
                security_groups=[self.security_group.id])

        net1, net2 = [x for x in self.os_conn.list_networks()['networks']
                      if x['name'] in ("net01", "net02")]

        # Check that networks has different segmentation_id
        assert (net1['provider:segmentation_id'] !=
                net2['provider:segmentation_id'])

        # Check ping from server1 to server2
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2).values()[0]
        network_checks.check_ping_from_vm(self.env, self.os_conn, server1,
                                          self.instance_keypair, server2_ip)

        # Start tcpdump
        compute1 = self.env.find_node_by_fqdn(compute_nodes[0])
        compute2 = self.env.find_node_by_fqdn(compute_nodes[1])
        log_file1 = gen_temp_file(prefix='vxlan', suffix='.log')
        log_file2 = gen_temp_file(prefix='vxlan', suffix='.log')
        with tcpdump_vxlan(
                ip=compute1.data['ip'], env=self.env,
                log_path=log_file1.name), tcpdump_vxlan(
                ip=compute2.data['ip'], env=self.env, log_path=log_file2.name):
            # Ping server1 from server2
            server1_ip = self.os_conn.get_nova_instance_ips(
                server1).values()[0]
            network_checks.check_ping_from_vm(
                self.env, self.os_conn, server2, self.instance_keypair,
                server1_ip)

        # Check traffic
        check_all_traffic_has_vni(net1['provider:segmentation_id'],
                                  log_file1.name)
        check_all_traffic_has_vni(net2['provider:segmentation_id'],
                                  log_file2.name)
示例#36
0
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Update default security group
            2. Upload the image with pre-installed iperf
            3. Create router01, create networks net01: net01__subnet,
            192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
            attach them to router01.
            4. Create keypair
            5. Launch vm1 in net01 network and vm2 in net02 network
            on different computes
            6. Go to vm1 console and send pings to vm2
        """

        self.setup_rules_for_default_sec_group()
        vm_image = self.create_image(self.ubuntu_iperf_image())

        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        hosts = zone.hosts.keys()[:2]

        # create router
        router = self.os_conn.create_router(name="router01")

        # create 2 networks and 2 instances
        for i, hostname in enumerate(hosts, 1):
            net, subnet = self.create_internal_network_with_subnet(suffix=i)
            self.os_conn.router_interface_add(router_id=router['router']['id'],
                                              subnet_id=subnet['subnet']['id'])
            self.os_conn.create_server(name='server%02d' % i,
                                       availability_zone='{}:{}'.format(
                                           zone.zoneName, hostname),
                                       image_id=vm_image.id,
                                       flavor=2,
                                       timeout=60 * 10,
                                       key_name=self.instance_keypair.name,
                                       nics=[{
                                           'net-id': net['network']['id']
                                       }])

        # check pings
        self.server1 = self.os_conn.nova.servers.find(name="server01")
        self.server2 = self.os_conn.nova.servers.find(name="server02")
        self.server2_ip = self.os_conn.get_nova_instance_ips(
            self.os_conn.nova.servers.find(name="server02")).values()[0]
        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          self.server1,
                                          self.instance_keypair,
                                          self.server2_ip,
                                          vm_login='******',
                                          vm_password='******',
                                          timeout=4 * 60)

        # make a list of ovs agents that resides only on controllers
        controllers = [
            node.data['fqdn']
            for node in self.env.get_nodes_by_role('controller')
        ]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [agt['id'] for agt in ovs_agts]
        self.ovs_conroller_agents = [
            agt['id'] for agt in ovs_agts if agt['host'] in controllers
        ]
    def test_vms_with_custom_cpu_thread_policy_less_resources(
            self, env, os_conn, hosts_hyper_threading, flavors, networks,
            keypair, security_group, policy):
        """This test checks vms with cpu_thread_policy prefer/require parameter
         with less resources

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vms to have no ability to create vm on one core
            4. Boot vm with cpu pinning flavor with required cpu_thread_policy
            5. For 'require' policy check that vm is in error state, for
            policy 'prefer' vm should be active and available
        """

        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        zone = 'nova:{}'.format(host)
        flv_prefer, flv_isolate, flv_require = flavors

        numa_count = len(cpus.keys())
        ts_lists = [list(set(self.get_thread_siblings_lists(os_conn, host, i)))
                    for i in range(numa_count)]
        if ts_lists[0] <= 1 and ts_lists[1] <= 1:
            pytest.skip("Configuration is NOK since instance should be on the "
                        "one numa node and use cpus from the different cores")
        # Vms boot order depends on current environment
        #
        # If only 1 thread siblings list is on numa0 => we're not able to boot
        # vm on different cores anyway. Steps are:
        # 1) allocate whole numa0 by vm with flavor_require => numa0 is busy
        # 2) boot N-2 vms with the same flavor (N=count of thread siblings list
        #  is on numa1) => 2 cores are free
        # 3) Boot 1 vm with flavor_isolate => 2 vcpus from different cores to
        #  be allocated => 2 vcpus from different cores are free
        #
        # If 2 thread siblings list is on numa0 steps are:
        # 1) Boot 1 vm with flavor_isolate => 2 vcpus from different cores to
        #  be allocated => 2 vcpus from different cores are free
        # 2) Boot N vms with flavor_require (N=count of thread siblings list
        #  is on numa1)
        #
        # If more than 2 thread siblings list is on numa0 steps are:
        # 1) boot N-2 vms with flavor_require (N=count of thread siblings list
        #  is on numa0) => 2 cores are free
        # 2) boot vm with flavor_isolate => 2 vcpus from different cores are
        #  free
        # 3) Boot N vms with flavor_require (N=count of thread siblings list
        #  is on numa1)
        if len(ts_lists[0]) == 1:
            boot_order = [(flv_require, 1),
                          (flv_require, len(ts_lists[1]) - 2),
                          (flv_isolate, 1)]
        elif len(ts_lists[0]) == 2:
            boot_order = [(flv_isolate, 1),
                          (flv_require, len(ts_lists[1]))]
        else:
            boot_order = [(flv_require, len(ts_lists[0]) - 2),
                          (flv_isolate, 1),
                          (flv_require, len(ts_lists[1]))]

        for (flavor, count) in boot_order:
            for i in range(count):
                os_conn.create_server(name='vm{0}_{1}'.format(i, flavor.name),
                                      flavor=flavor.id,
                                      key_name=keypair.name,
                                      nics=[{'net-id': networks[0]}],
                                      security_groups=[security_group.id],
                                      wait_for_avaliable=False,
                                      availability_zone=zone)
        if policy == 'prefer':
            vm = os_conn.create_server(name='vm_{0}'.format(flv_prefer.name),
                                       flavor=flv_prefer.id,
                                       key_name=keypair.name,
                                       nics=[{'net-id': networks[0]}],
                                       security_groups=[security_group.id],
                                       wait_for_avaliable=False,
                                       availability_zone=zone)
            network_checks.check_ping_from_vm(env, os_conn, vm,
                                              vm_keypair=keypair)
        else:
            with pytest.raises(InstanceError) as e:
                os_conn.create_server(name='vm', flavor=flv_require.id,
                                      nics=[{'net-id': networks[0]}],
                                      key_name=keypair.name,
                                      security_groups=[security_group.id],
                                      availability_zone='nova:{}'.format(host),
                                      wait_for_avaliable=False)
            expected_message = ("Insufficient compute resources: "
                                "Requested instance NUMA topology cannot fit "
                                "the given host NUMA topology")
            logger.info("Instance status is error:\n{0}".format(str(e.value)))
            assert expected_message in str(e.value), (
                "Unexpected reason of instance error")
示例#38
0
    def test_destroy_primary_controller(self, router, prepare_openstack,
                                        env_name):
        """Destroy primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Start ping vm2 from vm1 by floating ip
            9. Destroy primary controller
            10. Stop ping
            11. Check that ping lost no more than 10 packets
        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        primary_controller = self.env.primary_controller
        other_controllers = [
            x for x in self.env.get_nodes_by_role('controller')
            if x != primary_controller
        ]

        # Rescedule active l3 agent to primary if needed
        if primary_controller != l3_agent_controller:
            with primary_controller.ssh() as remote:
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource ban neutron-l3-agent {}'.format(
                            node.data['fqdn']))
                self.wait_router_migrate(router_id,
                                         primary_controller.data['fqdn'])
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource clear neutron-l3-agent {}'.format(
                            node.data['fqdn']))

        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        logger.info("Destroy primary controller {}".format(
            primary_controller.data['fqdn']))
        devops_node = DevopsClient.get_node_by_mac(
            env_name=env_name, mac=primary_controller.data['mac'])
        devops_node.destroy()

        self.wait_router_rescheduled(router_id=router['router']['id'],
                                     from_node=primary_controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          vm=server1,
                                          vm_keypair=self.instance_keypair,
                                          ip_to_ping=server2_ip)