def test_vms_with_cpu_thread_policy_wo_hyper_threading(
            self, env, os_conn, hosts_without_hyper_threading, flavors,
            networks, keypair, security_group):
        """This test checks vms with cpu_thread_policy parameter in case of
        disabled hyper-threading

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavors with hw:numa_nodes=1 and
            cpu_thread_policy
            3. Boot vm and check that all vcpus are on the different core
            4. Redo for all flavors
            5. Check vms connectivity
        """

        host = hosts_without_hyper_threading[0]
        zone = 'nova:{}'.format(host)

        for flv in flavors:
            vm = os_conn.create_server(name='vm{}'.format(flv.name),
                                       flavor=flv.id,
                                       key_name=keypair.name,
                                       nics=[{'net-id': networks[0]}],
                                       security_groups=[security_group.id],
                                       availability_zone=zone)

            used_ts_list = self.get_vm_thread_siblings_lists(os_conn, vm)
            assert len(used_ts_list) == flv.vcpus, (
                "vcpus should be on the different cores")

        network_checks.check_vm_connectivity(env, os_conn)
    def test_shutdown_primary_controller_with_l3_agt(self):
        """[Neutron VLAN and VXLAN] Shut down primary controller
           and check l3-agent

        Steps:
            1. Check on what agents is router1:
                neutron l3-agent-list-hosting-router router1
            2. If there isn't agent on the primary controller:
                neutron l3-agent-router-remove non_on_primary_agent_id router1
                neutron l3-agent-router-add on_primary_agent_id router1
            3. Destroy primary controller
                virsh destroy <primary_controller>
            4. Wait some time until all agents are up
                neutron-agent-list
            5. Check that all routers reschedule from primary controller:
                neutron router-list-on-l3-agent <on_primary_agent_id>
            6. Boot vm3 in network1
            7. ping 8.8.8.8 from vm3
            8. ping between vm1 and vm3 by internal ip
            9. ping between vm1 and vm2 by floating ip
        """

        self._prepare_openstack()
        # Get current L3 agent on router01
        l3_agent = self.os_conn.neutron.list_l3_agent_hosting_routers(
            self.router['id'])['agents'][0]
        # Check if the agent is not on the primary controller
        # Reschedule if needed
        if l3_agent['host'] != self.primary_host:

            self.os_conn.reschedule_router_to_primary_host(self.router['id'],
                                                           self.primary_host)
            l3_agent = self.os_conn.neutron.list_l3_agent_hosting_routers(
                self.router['id'])['agents'][0]

        # virsh destroy of the primary controller
        self.env.destroy_nodes([self.primary_node])

        # Excluding the id of the l3_agent from the list
        # since it will stay on the destroyed controller
        # and remain disabled
        self.l3_agent_ids.remove(l3_agent['id'])

        # Then check that the rest l3 agents are alive
        self.os_conn.wait_agents_alive(self.l3_agent_ids)

        # Check that there are no routers on the first agent
        self.check_no_routers_on_l3_agent(l3_agent['id'])

        # Waiting for messaging layer recovery
        # https://bugs.launchpad.net/mos/+bug/1592312
        logger.debug('Waiting 5 minutes for messaging layer recovery')
        time.sleep(5 * 60)

        self.os_conn.add_server(self.networks[0],
                                self.instance_keypair.name,
                                self.hosts[0],
                                self.security_group.id)
        # Create one more server and check connectivity
        network_checks.check_vm_connectivity(self.env, self.os_conn)
Example #3
0
    def test_l3_agent_after_drop_rabbit_port(self):
        """Drop rabbit port and check l3-agent work

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. with iptables in CLI drop rabbit's port #5673 on what router1 is
            9. Wait for route rescheduling
            10. Check that router moved to the health l3-agent
            11. Boot one more VM (VM3) in network1
            12. Boot vm3 in network1
            13. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other

        Duration 10m

        """
        # drop rabbit port
        self.drop_rabbit_port(router_name="router01")

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
Example #4
0
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1
            2. Create router1 and connect it with network1 and external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. ping 8.8.8.8 from vm2
            7. ping vm1 from vm2 and vm1 from vm2
        """

        # init variables
        exist_networks = self.os_conn.list_networks()['networks']
        ext_network = [x for x in exist_networks
                       if x.get('router:external')][0]
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.hosts = self.zone.hosts.keys()
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        self.setup_rules_for_default_sec_group()

        # create router
        self.router = self.os_conn.create_router(name="router01")['router']
        self.os_conn.router_gateway_add(router_id=self.router['id'],
                                        network_id=ext_network['id'])
        logger.info('router {} was created'.format(self.router['id']))

        # create one network by amount of the compute hosts
        self.net_id = self.os_conn.add_net(self.router['id'])

        # create two instances in that network
        # each instance is on the own compute
        for i, hostname in enumerate(self.hosts, 1):
            self.os_conn.create_server(name='server%02d' % i,
                                       availability_zone='{}:{}'.format(
                                           self.zone.zoneName, hostname),
                                       key_name=self.instance_keypair.name,
                                       nics=[{
                                           'net-id': self.net_id
                                       }])

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-openvswitch-agent')['agents']
        ]
        # make a list of ovs agents that resides only on controllers
        controllers = [
            node.data['fqdn']
            for node in self.env.get_nodes_by_role('controller')
        ]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']
        self.ovs_conroller_agents = [
            agt['id'] for agt in ovs_agts if agt['host'] in controllers
        ]
    def test_vms_connectivity_after_ovs_restart_on_computes(
            self, env, os_conn, computes_with_dpdk_hp, flavors, networks,
            keypair, security_group):
        """This test checks connectivity between VMs with DPDK after ovs
        restart on computes. Please note we're not able to count DPDK huge
        pages only, they're added to count of 2Mb huge pages.
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create flavor for huge pages with 512Mb ram, 1 vcpu and 1Gb disk
            3. Launch vm1, vm2, vm3 on compute-1 and vm4 on compute-2, vm1 and
            vm2 in net1, vm3 and vm4 in net2
            4. Check that neutron port has binding:vif_type = vhostuser
            5. Check instance page size
            6. Restart ovs on computes
            7. Check vms connectivity after ovs restart
        """

        hosts = computes_with_dpdk_hp
        vms_param = [(hosts[0], networks[0], None),
                     (hosts[0], networks[0], None),
                     (hosts[0], networks[1], None),
                     (hosts[1], networks[1], None)]
        self.create_vms(os_conn, hosts, networks, flavors[0], keypair,
                        security_group, vms_param)

        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)
        self.restart_ovs_on_computes(env, os_conn)
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)
    def test_cpu_pinning_one_numa_cell(
            self, env, os_conn, networks, flavors, security_group,
            aggregate):
        """This test checks that cpu pinning executed successfully for
        instances created on computes with 1 NUMA
        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Launch instances vm1, vm3 in net1 with m1.small.performance on
            compute-1, vm2 on compute-2.
            3. Check numa nodes for all vms
            4. Check parameter in /etc/defaults/grub
            5. Check vms connectivity
        """
        hosts = aggregate.hosts
        vms = []
        network_for_instances = [networks[0], networks[1], networks[0]]
        hosts_for_instances = [hosts[0], hosts[1], hosts[0]]
        cpus = get_cpu_distribition_per_numa_node(env)

        for i in range(2):
            vms.append(os_conn.create_server(
                name='vm{}'.format(i),
                flavor=flavors[0].id,
                nics=[{'net-id': network_for_instances[i]}],
                availability_zone='nova:{}'.format(hosts_for_instances[i]),
                security_groups=[security_group.id]))

        for vm in vms:
            host = getattr(vm, "OS-EXT-SRV-ATTR:host")
            assert host in hosts
            self.check_cpu_for_vm(os_conn, vm, 1, cpus[host])

        network_checks.check_vm_connectivity(env, os_conn)
    def test_cpu_pinning_resize(
            self, env, os_conn, networks, flavors, security_group,
            aggregate, aggregate_n):
        """This test checks that cpu pinning executed successfully for
        instances created on computes with 1 NUMA
        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Launch vm1 using m1.small.performance-1 flavor on compute-1 and
            vm2 on compute-2 with m1.small.old flavor.
            3. Resize vm1 to m1.small.performance-2
            4. Ping vm1 from vm2
            5. Resize vm1 to m1.small.performance-3
            6. Ping vm1 from vm2
            7. Resize vm1 to m1.small.performance-1
            8. Ping vm1 from vm2
            9. Resize vm1 to m1.small.old
            10. Ping vm1 from vm2
            11. Resize vm1 to m1.small.performance-4
            12. Ping vm1 from vm2
            13. Resize vm1 to m1.small.performance-1
            14. Ping vm1 from vm2
        """
        hosts = aggregate.hosts
        vms = []
        cpus = get_cpu_distribition_per_numa_node(env)
        flavors_for_resize = ['m1.small.perfomance-2',
                              'm1.small.perfomance-3',
                              'm1.small.perfomance-1',
                              'm1.small.old', 'm1.small.perfomance-4',
                              'm1.small.perfomance-1']

        for i in range(2):
            vms.append(os_conn.create_server(
                name='vm{}'.format(i),
                flavor=flavors[i].id,
                nics=[{'net-id': networks[i]}],
                availability_zone='nova:{}'.format(hosts[i]),
                security_groups=[security_group.id]))
        vm = vms[0]

        for flavor in flavors_for_resize:
            numas = 2
            for object_flavor in flavors:
                if object_flavor.name == flavor:
                    vm = self.resize(os_conn, vm, object_flavor.id)
                    break
            if flavor is not 'm1.small.old':
                if flavor in ['m1.small.perfomance-4',
                              'm1.small.perfomance-1']:
                    numas = 1
                host = getattr(vm, "OS-EXT-SRV-ATTR:host")
                assert host in hosts
                self.check_cpu_for_vm(os_conn,
                                      os_conn.get_instance_detail(vm),
                                      numas, cpus[host])
            os_conn.wait_servers_ssh_ready(vms)
            network_checks.check_vm_connectivity(env, os_conn)
Example #8
0
    def test_VLAN_Allo_2M_HP_vms_req_HP_vms_with_old_flavor(
            self, env, os_conn, networks, nfv_flavor, keypair, security_group,
            volume, aggregate):
        """
        This test checks that Huge pages set for vm1, vm2 and vm3 shouldn't use Huge pages, connectivity works properly
        Steps:
        1. Create net01, subnet.
        2. Create net 02, subnet.
        3. Create router, set gateway and add interfaces to both networks
        4. Launch vms using m1.small.hpgs flavor: vm1, vm3 on compute-1, vm2 on compute-2. For vm1 use new flavor,
        for vm2 and vm3 - old
        5. Locate the part of all instances configuration that is relevant to huge pages: #on controller
        hypervisor=nova show hpgs-test | grep OS-EXT-SRV-ATTR:host | cut -d\| -f3 instance=nova show hpgs-test |
        grep OS-EXT-SRV-ATTR:instance_name | cut -d\| -f3 # on compute virsh dumpxml $instance |awk '/memoryBacking/
        {p=1}; p; /\/numatune/ {p=0}'
        6. ping vm2 from vm1
        7. ping vm2 from vm3
        8. ping vm3 from vm1
        9. Check that it was allocated only HP for vm1
        """

        hosts = aggregate.hosts

        vm_0 = os_conn.create_server(name='vm1',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[0]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[0]),
                                     security_groups=[security_group.id])
        vm_1 = os_conn.create_server(name='vm2',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     availability_zone='nova:{}'.format(
                                         hosts[1]),
                                     security_groups=[security_group.id],
                                     nics=[{
                                         'net-id': networks[1]
                                     }])
        vm_2 = os_conn.create_server(name='vm3',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[1]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[1]),
                                     security_groups=[security_group.id])
        vms = [vm_0, vm_1, vm_2]

        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=768)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=512)
        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
Example #9
0
    def test_ban_l3_agents_and_clear_last(self):
        """Ban all l3-agents, clear last of them and check health of l3-agent

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. Ban l3-agent on what router1 is
            9. Wait for route rescheduling
            10. Repeat steps 7-8 twice
            11. Clear last L3 agent
            12. Check that router moved to the health l3-agent
            13. Boot one more VM (VM3) in network1
            14. Boot vm3 in network1
            15. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other

        Duration 10m

        """
        net_id = self.os_conn.neutron.list_networks(
            name="net01")['networks'][0]['id']
        devops_node = self.get_node_with_dhcp(net_id)
        ip = devops_node.data['ip']

        # ban l3 agents
        for _ in range(2):
            self.ban_l3_agent(router_name="router01", _ip=ip)
        last_banned_node = self.ban_l3_agent(router_name="router01",
                                             _ip=ip,
                                             wait_for_migrate=False)

        # clear last banned l3 agent
        self.clear_l3_agent(_ip=ip,
                            router_name="router01",
                            node=last_banned_node,
                            wait_for_alive=True)

        # create another server on net01
        net01 = self.os_conn.nova.networks.find(label="net01")
        self.os_conn.create_server(
            name='server03',
            availability_zone='{}:{}'.format(self.zone.zoneName,
                                             self.hosts[0]),
            key_name=self.instance_keypair.name,
            nics=[{'net-id': net01.id}],
            security_groups=[self.security_group.id])

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
def test_evacuate(devops_env, env, os_conn, instances, keypair):
    """Evacuate instances from failed compute node

    Scenario:
        1. Create net01, net01__subnet:
            neutron net-create net01
            neutron subnet-create net01 192.168.1.0/24 --enable-dhcp \
            --name net01__subnet
        2. Boot instances vm1 and vm2 in net01 on a single compute node:
        3. Destroy a compute node where instances are scheduled
        4. Evacuate instances vm1 and vm2:
            nova evacuate vm1 && nova evacuate vm2
        5. Check that they are rescheduled onto another compute node and
            are in ACTIVE state:
        6. Check that pings between vm1 and vm2 are successful
    """
    compute_host = getattr(instances[0], 'OS-EXT-SRV-ATTR:hypervisor_hostname')
    compute_node = env.find_node_by_fqdn(compute_host)
    devops_node = devops_env.get_node_by_mac(compute_node.data['mac'])
    devops_node.destroy()

    def is_hypervisor_down():
        hypervisor = os_conn.nova.hypervisors.find(
            hypervisor_hostname=compute_host)
        return hypervisor.state == 'down'

    common.wait(
        is_hypervisor_down,
        timeout_seconds=5 * 60,
        waiting_for='hypervisor {0} to be in down state'.format(compute_host))

    for instance in instances:
        os_conn.nova.servers.evacuate(instance)

    def is_instances_migrate():
        for instance in os_conn.nova.servers.list():
            if instance not in instances:
                continue
            if instance.status == 'ERROR':
                raise Exception('Instance {0.name} is in ERROR status\n'
                                '{0.fault[message]}\n'
                                '{0.fault[details]}'.format(instance))
            if not os_conn.server_status_is(instance, 'ACTIVE'):
                return False
            if getattr(instance,
                       'OS-EXT-SRV-ATTR:hypervisor_hostname') == compute_host:
                return False
        return True

    common.wait(is_instances_migrate,
                timeout_seconds=5 * 60,
                waiting_for='instances to migrate to another compute')

    network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)
Example #11
0
    def test_cold_migration_for_huge_pages_2m(self, env, os_conn, networks,
                                              nfv_flavor, security_group,
                                              aggregate):
        """This test checks that cold migration executed successfully for
            instances created on computes with huge pages 2M
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Launch instance vm1 in net1 with m1.small.hpgs
            3. Check that vm1 is created on compute with huge pages
            4. Launch instance vm2 in net2 with m1.small.hpgs
            5. Check that vm2 is created on compute with huge pages
            6. Check vms connectivity
            7. Cold migrate vm1 and check that vm moved to other compute with
            huge pages
            8. Check vms connectivity
        """
        free_pages = {0: 1024, 1: 768, 2: 512}
        hosts = aggregate.hosts
        vms = []
        vm_hosts = []
        for i in range(2):
            vm = os_conn.create_server(name='vm{}'.format(i),
                                       flavor=nfv_flavor[0].id,
                                       security_groups=[security_group.id],
                                       nics=[{
                                           'net-id': networks[i]
                                       }])
            vms.append(vm)
        for vm in vms:
            host = getattr(vm, "OS-EXT-SRV-ATTR:host")
            assert host in hosts
            vm_hosts.append(host)
        for host in hosts:
            self.check_pages(os_conn,
                             host,
                             total_pages=1024,
                             free_pages=free_pages[vm_hosts.count(host)])
        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)

        vm_0_new = self.migrate(os_conn, vms[0])
        vm_host_0_new = getattr(vm_0_new, "OS-EXT-SRV-ATTR:host")
        assert vm_host_0_new in hosts
        assert vm_host_0_new != vm_hosts.pop(0)
        vm_hosts.append(vm_host_0_new)
        for host in hosts:
            self.check_pages(os_conn,
                             host,
                             total_pages=1024,
                             free_pages=free_pages[vm_hosts.count(host)])
        self.check_instance_page_size(os_conn, vm_0_new, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
def test_evacuate(devops_env, env, os_conn, instances, keypair):
    """Evacuate instances from failed compute node

    Scenario:
        1. Create net01, net01__subnet:
            neutron net-create net01
            neutron subnet-create net01 192.168.1.0/24 --enable-dhcp \
            --name net01__subnet
        2. Boot instances vm1 and vm2 in net01 on a single compute node:
        3. Destroy a compute node where instances are scheduled
        4. Evacuate instances vm1 and vm2:
            nova evacuate vm1 && nova evacuate vm2
        5. Check that they are rescheduled onto another compute node and
            are in ACTIVE state:
        6. Check that pings between vm1 and vm2 are successful
    """
    compute_host = getattr(instances[0], 'OS-EXT-SRV-ATTR:hypervisor_hostname')
    compute_node = env.find_node_by_fqdn(compute_host)
    devops_node = devops_env.get_node_by_mac(compute_node.data['mac'])
    devops_node.destroy()

    def is_hypervisor_down():
        hypervisor = os_conn.nova.hypervisors.find(
            hypervisor_hostname=compute_host)
        return hypervisor.state == 'down'

    common.wait(
        is_hypervisor_down,
        timeout_seconds=5 * 60,
        waiting_for='hypervisor {0} to be in down state'.format(compute_host))

    for instance in instances:
        os_conn.nova.servers.evacuate(instance)

    def is_instances_migrate():
        for instance in os_conn.nova.servers.list():
            if instance not in instances:
                continue
            if instance.status == 'ERROR':
                raise Exception('Instance {0.name} is in ERROR status\n'
                                '{0.fault[message]}\n'
                                '{0.fault[details]}'.format(instance))
            if not os_conn.server_status_is(instance, 'ACTIVE'):
                return False
            if getattr(instance,
                       'OS-EXT-SRV-ATTR:hypervisor_hostname') == compute_host:
                return False
        return True

    common.wait(is_instances_migrate,
                timeout_seconds=5 * 60,
                waiting_for='instances to migrate to another compute')

    network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1
            2. Create router1 and connect it with network1 and external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. ping 8.8.8.8 from vm2
            7. ping vm1 from vm2 and vm1 from vm2
        """

        # init variables
        exist_networks = self.os_conn.list_networks()['networks']
        ext_network = [x for x in exist_networks
                       if x.get('router:external')][0]
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.hosts = self.zone.hosts.keys()
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        self.setup_rules_for_default_sec_group()

        # create router
        self.router = self.os_conn.create_router(name="router01")['router']
        self.os_conn.router_gateway_add(router_id=self.router['id'],
                                        network_id=ext_network['id'])
        logger.info('router {} was created'.format(self.router['id']))

        # create one network by amount of the compute hosts
        self.net_id = self.os_conn.add_net(self.router['id'])

        # create two instances in that network
        # each instance is on the own compute
        for i, hostname in enumerate(self.hosts, 1):
            self.os_conn.create_server(
                name='server%02d' % i,
                availability_zone='{}:{}'.format(self.zone.zoneName, hostname),
                key_name=self.instance_keypair.name,
                nics=[{'net-id': self.net_id}])

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)

        # make a list of all ovs agent ids
        self.ovs_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-openvswitch-agent')['agents']]
        # make a list of ovs agents that resides only on controllers
        controllers = [node.data['fqdn']
                       for node in self.env.get_nodes_by_role('controller')]
        ovs_agts = self.os_conn.neutron.list_agents(
            binary='neutron-openvswitch-agent')['agents']
        self.ovs_conroller_agents = [agt['id'] for agt in ovs_agts
                                     if agt['host'] in controllers]
Example #14
0
    def test_shutdown_primary_controller_with_l3_agt(self):
        """[Neutron VLAN and VXLAN] Shut down primary controller
           and check l3-agent

        Steps:
            1. Check on what agents is router1:
                neutron l3-agent-list-hosting-router router1
            2. If there isn't agent on the primary controller:
                neutron l3-agent-router-remove non_on_primary_agent_id router1
                neutron l3-agent-router-add on_primary_agent_id router1
            3. Destroy primary controller
                virsh destroy <primary_controller>
            4. Wait some time until all agents are up
                neutron-agent-list
            5. Check that all routers reschedule from primary controller:
                neutron router-list-on-l3-agent <on_primary_agent_id>
            6. Boot vm3 in network1
            7. ping 8.8.8.8 from vm3
            8. ping between vm1 and vm3 by internal ip
            9. ping between vm1 and vm2 by floating ip
        """

        self._prepare_openstack()
        # Get current L3 agent on router01
        l3_agent = self.os_conn.neutron.list_l3_agent_hosting_routers(
            self.router['id'])['agents'][0]
        # Check if the agent is not on the primary controller
        # Reschedule if needed
        if l3_agent['host'] != self.primary_host:

            self.os_conn.reschedule_router_to_primary_host(
                self.router['id'], self.primary_host)
            l3_agent = self.os_conn.neutron.list_l3_agent_hosting_routers(
                self.router['id'])['agents'][0]

        # virsh destroy of the primary controller
        self.env.destroy_nodes([self.primary_node])

        # Excluding the id of the l3_agent from the list
        # since it will stay on the destroyed controller
        # and remain disabled
        self.l3_agent_ids.remove(l3_agent['id'])

        # Then check that the rest l3 agents are alive
        self.os_conn.wait_agents_alive(self.l3_agent_ids)

        # Check that there are no routers on the first agent
        self.check_no_routers_on_l3_agent(l3_agent['id'])

        self.os_conn.add_server(self.networks[0], self.instance_keypair.name,
                                self.hosts[0], self.security_group.id)
        # Create one more server and check connectivity
        network_checks.check_vm_connectivity(self.env, self.os_conn)
    def test_vms_connectivity_after_evacuation(self, env, os_conn, networks,
                                               flavors, aggregate,
                                               security_group, devops_env):
        """This test checks vms connectivity for vms with cpu pinning with 1
        NUMA after evacuation

        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Boot vm0 with cpu flavor on host0 and net0
            3. Boot vm1 with old flavor on host1 and net1
            4. Check vms connectivity
            5. Kill compute0 and evacuate vm0 to compute1 with
            --on-shared-storage parameter
            6. Check vms connectivity
            7. Check numa nodes for vm0
            8. Make compute0 alive
            9. Check that resources for vm0 were deleted from compute0
        """
        cpus = get_cpu_distribition_per_numa_node(env)
        hosts = aggregate.hosts
        vms = []

        for i in range(2):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=flavors[i].id,
                nics=[{'net-id': networks[i]}],
                availability_zone='nova:{}'.format(hosts[i]),
                security_groups=[security_group.id])
            vms.append(vm)
        network_checks.check_vm_connectivity(env, os_conn)
        self.check_cpu_for_vm(os_conn, vms[0], 1, cpus[hosts[0]])

        self.compute_change_state(os_conn, devops_env, hosts[0], state='down')
        vm0_new = self.evacuate(os_conn, devops_env, vms[0])
        vm0_new.get()
        new_host = getattr(vm0_new, "OS-EXT-SRV-ATTR:host")
        assert new_host in hosts, "Unexpected host after evacuation"
        assert new_host != hosts[0], "Host didn't change after evacuation"
        os_conn.wait_servers_ssh_ready(vms)
        network_checks.check_vm_connectivity(env, os_conn)
        self.check_cpu_for_vm(os_conn, vm0_new, 1, cpus[new_host])

        self.compute_change_state(os_conn, devops_env, hosts[0], state='up')
        old_hv = os_conn.nova.hypervisors.find(hypervisor_hostname=hosts[0])
        assert old_hv.running_vms == 0, (
            "Old hypervisor {0} shouldn't have running vms").format(hosts[0])

        instance_name = getattr(vm0_new, "OS-EXT-SRV-ATTR:instance_name")
        assert instance_name in self.get_instances(os_conn, new_host), (
            "Instance should be in the list of instances on the new host")
        assert instance_name not in self.get_instances(os_conn, hosts[0]), (
            "Instance shouldn't be in the list of instances on the old host")
    def test_ban_l3_agents_many_times(self):
        """Ban l3-agent many times and check health of l3-agent

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. Ban l3-agent on what router1 is
            9. Wait for route rescheduling
            10. Repeat steps 7-8
            11. Ban l3-agent on what router1 is
            12. Wait for L3 agent dies
            13. Clear last banned L3 agent
            14. Wait for L3 agent alive
            15. Repeat steps 11-14 40 times
            16. Boot one more VM (VM3) in network1
            17. Boot vm3 in network1
            18. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other vm

        Duration 30m

        """
        net_id = self.os_conn.neutron.list_networks(
            name="net01")['networks'][0]['id']
        devops_node = self.get_node_with_dhcp(net_id)
        ip = devops_node.data['ip']

        # ban 2 l3 agents
        for _ in range(2):
            self.ban_l3_agent(router_name="router01", _ip=ip)

        for i in range(40):
            # ban l3 agent
            logger.info('Ban/clear L3 agent. Iteration #{} from 40'.format(i))
            last_banned_node = self.ban_l3_agent(router_name="router01",
                                                 _ip=ip,
                                                 wait_for_migrate=False,
                                                 wait_for_die=True)
            # clear last banned l3 agent
            self.clear_l3_agent(_ip=ip,
                                router_name="router01",
                                node=last_banned_node,
                                wait_for_alive=True)

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn,
                                             timeout=10 * 60)
Example #17
0
    def test_ban_l3_agents_many_times(self):
        """Ban l3-agent many times and check health of l3-agent

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. Ban l3-agent on what router1 is
            9. Wait for route rescheduling
            10. Repeat steps 7-8
            11. Ban l3-agent on what router1 is
            12. Wait for L3 agent dies
            13. Clear last banned L3 agent
            14. Wait for L3 agent alive
            15. Repeat steps 11-14 40 times
            16. Boot one more VM (VM3) in network1
            17. Boot vm3 in network1
            18. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other vm

        Duration 30m

        """
        net_id = self.os_conn.neutron.list_networks(
            name="net01")['networks'][0]['id']
        devops_node = self.get_node_with_dhcp(net_id)
        ip = devops_node.data['ip']

        # ban 2 l3 agents
        for _ in range(2):
            self.ban_l3_agent(router_name="router01", _ip=ip)

        for _ in range(40):
            # ban l3 agent
            last_banned_node = self.ban_l3_agent(router_name="router01",
                                                 _ip=ip,
                                                 wait_for_migrate=False,
                                                 wait_for_die=True)
            # clear last banned l3 agent
            self.clear_l3_agent(_ip=ip,
                                router_name="router01",
                                node=last_banned_node,
                                wait_for_alive=True)

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
    def test_hp_distribution_1g_2m_for_vms(self, env, os_conn,
                                           computes_with_mixed_hp, networks,
                                           flavors, security_group):
        """This test checks huge pages 1Gb and 2Mb distribution for vms
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create vm1 in net1 on compute1 with 1Gb flavor
            3. Create vm2 in net2 on compute2 with 2Mb flavor
            4. Create vm3 in net2 on compute1 with 2Mb flavor
            5. Check instances configuration (about huge pages)
            6. Check quantity of HP on computes
            7. Check pings from all vms to all vms by all ips
        """
        small_nfv_flavor, medium_nfv_flavor = flavors[0], flavors[1]
        count_to_allocate_2mb = small_nfv_flavor.ram * 1024 / page_2mb
        count_to_allocate_1gb = medium_nfv_flavor.ram * 1024 / page_1gb

        initial_conf = computes_configuration(env)

        hosts = computes_with_mixed_hp
        vms_params = [
            (hosts[0], networks[0], medium_nfv_flavor, page_1gb),
            (hosts[1], networks[1], small_nfv_flavor, page_2mb),
            (hosts[0], networks[1], small_nfv_flavor, page_2mb), ]

        vms = {}

        for i, (host, network, flavor, size) in enumerate(vms_params):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=flavor.id,
                nics=[{'net-id': network}],
                availability_zone='nova:{}'.format(host),
                security_groups=[security_group.id])
            vms.update({vm: size})

        for vm, exp_size in vms.items():
            assert self.get_instance_page_size(os_conn, vm) == exp_size

        vms_distribution = [(hosts[0], 1, 1), (hosts[1], 0, 1), ]
        final_conf = computes_configuration(env)
        for (host, nr_1gb, nr_2mb) in vms_distribution:
            exp_free_1g = (initial_conf[host][page_1gb]['total'] -
                           nr_1gb * count_to_allocate_1gb)
            exp_free_2m = (initial_conf[host][page_2mb]['total'] -
                           nr_2mb * count_to_allocate_2mb)
            assert exp_free_1g == final_conf[host][page_1gb]['free']
            assert exp_free_2m == final_conf[host][page_2mb]['free']

        os_conn.wait_servers_ssh_ready(vms.keys())
        network_checks.check_vm_connectivity(env, os_conn)
    def test_kill_active_l3_agt(self):
        """[Neutron VLAN and VXLAN] Kill l3-agent process

            8. get node with l3 agent where is the router1:
                neutron l3-agent-hosting-router router1
            9. on this node find l3-agent process:
                ps aux | grep l3-agent
            10. Kill it:
                kill -9 <pid>
            11. Wait some time until all agents are up
                neutron-agent-list
            12. Boot vm3 in network1
            13. ping 8.8.8.8 from vm3
            14. ping between vm1 and vm3 by internal ip
            15. ping between vm1 and vm2 by floating ip
        """

        self._prepare_openstack()
        # Get current L3 agent on router01
        router_agt = self.os_conn.neutron.list_l3_agent_hosting_routers(
                self.router['id'])['agents'][0]

        # Find the current controller ip with the router01
        controller_ip = ''
        for node in self.env.get_all_nodes():
            if node.data['fqdn'] == router_agt['host']:
                controller_ip = node.data['ip']
                break

        # If ip is empty than no controller with the router was found
        assert controller_ip, "No controller with the router was found"

        with self.env.get_ssh_to_node(controller_ip) as remote:
            cmd = "ps -aux | grep [n]eutron-l3-agent | awk '{print $2}'"
            result = remote.execute(cmd)
            pid = result['stdout'][0]
            logger.info('Got l3 agent pid  {}'.format(pid))
            logger.info('Now going to kill it on the controller {}'.format(
                        controller_ip))
            result = remote.execute('kill -9 {}'.format(pid))
            assert result['exit_code'] == 0, "kill failed {}".format(result)

        self.os_conn.wait_agents_alive(self.l3_agent_ids)

        # Create one more server and check connectivity
        self.os_conn.add_server(self.networks[0],
                                self.instance_keypair.name,
                                self.hosts[0],
                                self.security_group.id)
        network_checks.check_vm_connectivity(self.env, self.os_conn)
Example #20
0
    def test_huge_pages_distribution(self, env, os_conn, networks, keypair,
                                     nfv_flavor, security_group, aggregate):
        """This test checks huge pages' distribution with flavor for 2M
            and 1G Huge Pages
            Steps:
            1. Create flavors m1.small.hpgs and m1.small.hpgs-1
            2. Create net01 with subnet, net02 with subnet and  router01 with
            interfaces to both nets
            3. Launch instance vm1 on compute-1 in net01 with m1.small.hpgs-1
            4. Launch instance vm2 on compute-2 in net02 with m1.small.hpgs
            5. Launch instance vm3 on compute-1 in net02 with m1.small.hpgs
            6. Check vms connectivity
        """
        hosts = aggregate.hosts

        vm_1 = os_conn.create_server(name='vm1',
                                     flavor=nfv_flavor[1].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[0]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[0]),
                                     security_groups=[security_group.id])
        vm_2 = os_conn.create_server(name='vm2',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[1]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[1]),
                                     security_groups=[security_group.id])
        vm_3 = os_conn.create_server(name='vm3',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[1]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[0]),
                                     security_groups=[security_group.id])

        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=512)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=512)

        self.check_instance_page_size(os_conn, vm_1, size=1048576)
        self.check_instance_page_size(os_conn, vm_2, size=2048)
        self.check_instance_page_size(os_conn, vm_3, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
    def test_allocation_huge_pages_2m_for_vms(self, env, os_conn, networks,
                                              nfv_flavor, security_group,
                                              aggregate):
        """This test checks allocation 2M HugePages for instances
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Launch vm1 and vm2 using net1 on the first compute
            3. Launch vm3 using net1 on the second compute
            4. Launch vm4 using net2 on the second compute
            5. Check instances configuration (about huge pages)
            6. Check quantity of HP on computes
            7. Associate floating to vm1
            8. Check pings from all vms to all vms by all ips
        """
        free_pages = {1: 768, 3: 256}
        hosts = aggregate.hosts
        vms = []

        for i in range(2):
            vm_name = 'vm{}'.format(i)
            vm = os_conn.create_server(
                name=vm_name, flavor=nfv_flavor[0].id,
                nics=[{'net-id': networks[0]}],
                availability_zone='nova:{}'.format(hosts[0]),
                security_groups=[security_group.id])
            vms.append(vm)
        vm2 = os_conn.create_server(
            name='vm2', flavor=nfv_flavor[0].id,
            nics=[{'net-id': networks[1]}],
            availability_zone='nova:{}'.format(hosts[0]),
            security_groups=[security_group.id])
        vm3 = os_conn.create_server(
            name='vm3', flavor=nfv_flavor[0].id,
            nics=[{'net-id': networks[1]}],
            availability_zone='nova:{}'.format(hosts[1]),
            security_groups=[security_group.id])
        vms.extend([vm2, vm3])

        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)

        self.check_pages(os_conn, hosts[0], total_pages=1024,
                         free_pages=free_pages[3])
        self.check_pages(os_conn, hosts[1], total_pages=1024,
                         free_pages=free_pages[1])

        os_conn.assign_floating_ip(vms[0])
        network_checks.check_vm_connectivity(env, os_conn)
    def test_allocation_huge_pages_2m_for_vms(self, env, os_conn, networks,
                                              computes_with_hp_2mb,
                                              flavors, security_group):
        """This test checks allocation 2M HugePages for instances
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Launch vm1 and vm2 using net1 on the first compute
            3. Launch vm3 using net1 on the second compute
            4. Launch vm4 using net2 on the second compute
            5. Check instances configuration (about huge pages)
            6. Check quantity of HP on computes
            7. Associate floating to vm1
            8. Check pings from all vms to all vms by all ips
        """
        small_nfv_flavor = flavors[0]
        count_to_allocate_2mb = small_nfv_flavor.ram * 1024 / page_2mb
        initial_conf = computes_configuration(env)
        hosts = computes_with_hp_2mb

        vms = []
        vms_params = [
            (hosts[0], networks[0]),
            (hosts[0], networks[0]),
            (hosts[0], networks[1]),
            (hosts[1], networks[1]),
        ]
        for i, (host, network) in enumerate(vms_params):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=small_nfv_flavor.id,
                nics=[{'net-id': network}],
                availability_zone='nova:{}'.format(host),
                security_groups=[security_group.id])
            vms.append(vm)

        for vm in vms:
            assert self.get_instance_page_size(os_conn, vm) == page_2mb

        vms_distribution = [(hosts[0], 3), (hosts[1], 1), ]
        final_conf = computes_configuration(env)
        for (host, nr_2mb) in vms_distribution:
            exp_free_2m = (initial_conf[host][page_2mb]['total'] -
                           nr_2mb * count_to_allocate_2mb)
            assert exp_free_2m == final_conf[host][page_2mb]['free']

        os_conn.assign_floating_ip(vms[0])
        os_conn.wait_servers_ssh_ready(vms)
        network_checks.check_vm_connectivity(env, os_conn)
    def test_cold_migration_for_huge_pages_2m(
            self, env, os_conn, networks, nfv_flavor, security_group,
            aggregate):
        """This test checks that cold migration executed successfully for
            instances created on computes with huge pages 2M
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Launch instance vm1 in net1 with m1.small.hpgs
            3. Check that vm1 is created on compute with huge pages
            4. Launch instance vm2 in net2 with m1.small.hpgs
            5. Check that vm2 is created on compute with huge pages
            6. Check vms connectivity
            7. Cold migrate vm1 and check that vm moved to other compute with
            huge pages
            8. Check vms connectivity
        """
        free_pages = {0: 1024, 1: 768, 2: 512}
        hosts = aggregate.hosts
        vms = []
        vm_hosts = []
        for i in range(2):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=nfv_flavor[0].id,
                security_groups=[security_group.id],
                nics=[{'net-id': networks[i]}])
            vms.append(vm)
        for vm in vms:
            host = getattr(vm, "OS-EXT-SRV-ATTR:host")
            assert host in hosts
            vm_hosts.append(host)
        for host in hosts:
            self.check_pages(os_conn, host, total_pages=1024,
                             free_pages=free_pages[vm_hosts.count(host)])
        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)

        vm_0_new = self.migrate(os_conn, vms[0])
        vm_host_0_new = getattr(vm_0_new, "OS-EXT-SRV-ATTR:host")
        assert vm_host_0_new in hosts
        assert vm_host_0_new != vm_hosts.pop(0)
        vm_hosts.append(vm_host_0_new)
        for host in hosts:
            self.check_pages(os_conn, host, total_pages=1024,
                             free_pages=free_pages[vm_hosts.count(host)])
        self.check_instance_page_size(os_conn, vm_0_new, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
Example #24
0
    def test_kill_active_l3_agt(self):
        """[Neutron VLAN and VXLAN] Kill l3-agent process

            8. get node with l3 agent where is the router1:
                neutron l3-agent-hosting-router router1
            9. on this node find l3-agent process:
                ps aux | grep l3-agent
            10. Kill it:
                kill -9 <pid>
            11. Wait some time until all agents are up
                neutron-agent-list
            12. Boot vm3 in network1
            13. ping 8.8.8.8 from vm3
            14. ping between vm1 and vm3 by internal ip
            15. ping between vm1 and vm2 by floating ip
        """

        self._prepare_openstack()
        # Get current L3 agent on router01
        router_agt = self.os_conn.neutron.list_l3_agent_hosting_routers(
            self.router['id'])['agents'][0]

        # Find the current controller ip with the router01
        controller_ip = ''
        for node in self.env.get_all_nodes():
            if node.data['fqdn'] == router_agt['host']:
                controller_ip = node.data['ip']
                break

        # If ip is empty than no controller with the router was found
        assert controller_ip, "No controller with the router was found"

        with self.env.get_ssh_to_node(controller_ip) as remote:
            cmd = "ps -aux | grep [n]eutron-l3-agent | awk '{print $2}'"
            result = remote.execute(cmd)
            pid = result['stdout'][0]
            logger.info('Got l3 agent pid  {}'.format(pid))
            logger.info('Now going to kill it on the controller {}'.format(
                controller_ip))
            result = remote.execute('kill -9 {}'.format(pid))
            assert result['exit_code'] == 0, "kill failed {}".format(result)

        self.os_conn.wait_agents_alive(self.l3_agent_ids)

        # Create one more server and check connectivity
        self.os_conn.add_server(self.networks[0], self.instance_keypair.name,
                                self.hosts[0], self.security_group.id)
        network_checks.check_vm_connectivity(self.env, self.os_conn)
Example #25
0
    def prepare_openstack(self, init):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. Ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
        """
        # init variables
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.security_group = self.os_conn.create_sec_group_for_ssh()
        self.hosts = self.zone.hosts.keys()[:2]
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')

        # create router
        router = self.os_conn.create_router(name="router01")
        self.os_conn.router_gateway_add(
            router_id=router['router']['id'],
            network_id=self.os_conn.ext_network['id'])

        # create 2 networks and 2 instances
        for i, hostname in enumerate(self.hosts, 1):
            network = self.os_conn.create_network(name='net%02d' % i)
            subnet = self.os_conn.create_subnet(
                network_id=network['network']['id'],
                name='net%02d__subnet' % i,
                cidr="192.168.%d.0/24" % i)
            self.os_conn.router_interface_add(
                router_id=router['router']['id'],
                subnet_id=subnet['subnet']['id'])
            self.os_conn.create_server(
                name='server%02d' % i,
                availability_zone='{}:{}'.format(self.zone.zoneName, hostname),
                key_name=self.instance_keypair.name,
                nics=[{'net-id': network['network']['id']}],
                security_groups=[self.security_group.id])

        # add floating ip to first server
        server1 = self.os_conn.nova.servers.find(name="server01")
        self.os_conn.assign_floating_ip(server1)

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
Example #26
0
    def test_ban_one_l3_agent(self, ban_count):
        """Check l3-agent rescheduling after l3-agent dies on vlan

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. get node with l3 agent on what is router1
            9. ban this l3 agent on the node with pcs
                (e.g. pcs resource ban neutron-l3-agent
                node-3.test.domain.local)
            10. wait some time (about 20-30) while pcs resource and
                neutron agent-list will show that it is dead
            11. Check that router1 was rescheduled
            12. Boot vm3 in network1
            13. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other

        Duration 10m

        """
        net_id = self.os_conn.neutron.list_networks(
            name="net01")['networks'][0]['id']
        devops_node = self.get_node_with_dhcp(net_id)
        ip = devops_node.data['ip']

        # ban l3 agent
        for _ in range(ban_count):
            self.ban_l3_agent(_ip=ip, router_name="router01")

        # create another server on net01
        net01 = self.os_conn.nova.networks.find(label="net01")
        self.os_conn.create_server(
            name='server03',
            availability_zone='{}:{}'.format(self.zone.zoneName,
                                             self.hosts[0]),
            key_name=self.instance_keypair.name,
            nics=[{'net-id': net01.id}],
            security_groups=[self.security_group.id])

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
    def test_restart_primary_controller_with_l3_agt(self):
        """[Neutron VLAN and VXLAN] Reset primary controller and check l3-agent

        Steps:
            1. Check on what agents is router1:
                neutron l3-agent-list-hosting-router router1
            2. If there isn't agent on the primary controller:
                neutron l3-agent-router-remove non_on_primary_agent_id router1
                neutron l3-agent-router-add on_primary_agent_id router1
            3. Restart primary controller
            4. Wait some time until all agents are up
                neutron-agent-list
            5. Check that all routers reschedule from primary controller:
                neutron router-list-on-l3-agent <on_primary_agent_id>
            6. Boot vm3 in network1
            7. ping 8.8.8.8 from vm3
            8. ping between vm1 and vm3 by internal ip
            9. ping between vm1 and vm2 by floating ip
        """

        self._prepare_openstack()
        # Get current L3 agent on router01
        router_agt = self.os_conn.neutron.list_l3_agent_hosting_routers(
                        self.router['id'])['agents'][0]
        # Check if the agent is not on the primary controller
        # Reschedule if needed
        if router_agt['host'] != self.primary_host:
            self.os_conn.reschedule_router_to_primary_host(self.router['id'],
                                                           self.primary_host)
            router_agt = self.os_conn.neutron.list_l3_agent_hosting_routers(
                            self.router['id'])['agents'][0]

        # virsh destroy of the primary controller
        self.env.warm_restart_nodes([self.primary_node])

        # Check that the all l3 are alive
        self.os_conn.wait_agents_alive(self.l3_agent_ids)

        # Check that there are no routers on the first agent
        self.check_no_routers_on_l3_agent(router_agt['id'])

        # Create one more server and check connectivity
        self.os_conn.add_server(self.networks[0],
                                self.instance_keypair.name,
                                self.hosts[0],
                                self.security_group.id)
        network_checks.check_vm_connectivity(self.env, self.os_conn)
    def test_vms_connectivity_after_evacuation(self, env, os_conn, volume,
                                               computes_with_dpdk_hp, flavors,
                                               networks, keypair, devops_env,
                                               security_group):
        """This test checks connectivity between VMs with DPDK after
        evacuation. Please note we're not able to count DPDK huge pages only,
        they're added to count of 2Mb huge pages.
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create flavor for huge pages with 512Mb ram, 1 vcpu and 1Gb disk
            3. Launch vm1 (from not empty volume), vm2 on compute-1,
            vm3 - on compute-2, vm1 in net1, vm2 and vm3 in net2
            4. Kill compute2 and evacuate vm3
            5. Check vms connectivity
            6. Start compute2
            7. Check instance page size
            8. Check that neutron port has binding:vif_type = vhostuser
            9. Check that count of free 2Mb huge pages is expected one for
            each host
        """
        hosts = computes_with_dpdk_hp
        initial_conf = computes_configuration(env)
        vms_param = [(hosts[0], networks[0], {'vda': volume.id}),
                     (hosts[0], networks[1], None),
                     (hosts[1], networks[1], None)]
        vms = self.create_vms(os_conn, hosts, networks, flavors[0], keypair,
                              security_group, vms_param=vms_param)
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)

        self.compute_change_state(os_conn, devops_env, hosts[1], state='down')
        vm_new = self.evacuate(os_conn, devops_env, vms[2],
                               on_shared_storage=False)
        vm_new_host = getattr(os_conn.nova.servers.get(vm_new),
                              "OS-EXT-SRV-ATTR:host")
        assert vm_new_host in hosts
        assert vm_new_host != hosts[1]
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)

        self.compute_change_state(os_conn, devops_env, hosts[1], state='up')

        final_conf = computes_configuration(env)
        exp_hosts_usage = [(hosts[0], 3), (hosts[1], 0)]
        for (host, nr_2mb) in exp_hosts_usage:
            exp_free_2m = (initial_conf[host][page_2mb]['free'] -
                           nr_2mb * flavors[0].ram * 1024 / page_2mb)
            assert exp_free_2m == final_conf[host][page_2mb]['free']
    def test_allocation_huge_pages_2m_for_vms_with_old_flavor(
            self, env, os_conn, networks, computes_with_hp_2mb, flavors,
            computes_without_hp, security_group):
        """This test checks that Huge pages set for vm1, vm2 and vm3 shouldn't
            use Huge pages, connectivity works properly
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create vm1 in net1 on compute1 with 2Mb flavor
            3. Create vm2 in net2 on compute2 with old flavor
            4. Create vm3 in net1 on compute1 with old flavor
            5. Check huge pages. Check that it was allocated only HP for vm1
            6. Check pings from all vms to all vms by all ips
        """
        small_nfv_flavor, old_flavor = flavors[0], flavors[2]
        count_to_allocate_2mb = small_nfv_flavor.ram * 1024 / page_2mb
        initial_conf = computes_configuration(env)
        hosts_hp = computes_with_hp_2mb
        hosts_no_hp = computes_without_hp

        vms_params = [
            (hosts_hp[0], networks[0], small_nfv_flavor, page_2mb),
            (hosts_no_hp[0], networks[1], old_flavor, None),
            (hosts_hp[0], networks[0], old_flavor, None)]
        vms = {}

        for i, (host, network, flavor, size) in enumerate(vms_params):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=flavor.id,
                nics=[{'net-id': network}],
                availability_zone='nova:{}'.format(host),
                security_groups=[security_group.id])
            vms.update({vm: size})

        for vm, exp_size in vms.items():
            assert self.get_instance_page_size(os_conn, vm) == exp_size

        vms_distribution = [(hosts_hp[0], 1), (hosts_no_hp[0], 0), ]
        final_conf = computes_configuration(env)
        for (host, nr_2mb) in vms_distribution:
            exp_free_2m = (initial_conf[host][page_2mb]['total'] -
                           nr_2mb * count_to_allocate_2mb)
            assert exp_free_2m == final_conf[host][page_2mb]['free']

        os_conn.wait_servers_ssh_ready(vms.keys())
        network_checks.check_vm_connectivity(env, os_conn)
    def test_resizing_of_vms_with_huge_pages(self, env, os_conn,
                                             computes_with_mixed_hp,
                                             networks, flavors,
                                             security_group):
        """This test checks resizing of VM with flavor for 2M to flavor
            for 1G flavor and on old flavor
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create vm1 in net1 on compute1 with 2Mb flavor
            3. Create vm2 in net2 on compute2 with old flavor
            4. Check instances configuration (about huge pages)
            5. Check pings from all vms to all vms by all ips
            6. Resize vm1 to 1Gb and check ping
            7. Resize vm1 to old and check ping
            8. Resize vm1 to 1Gb and check ping
            9. Resize vm1 to 2Mb and check ping
        """
        small_nfv_flv, meduim_nfv_flv, old_flv = flavors
        hosts = computes_with_mixed_hp
        vms_params = [
            (hosts[0], networks[0], small_nfv_flv, page_2mb),
            (hosts[1], networks[1], old_flv, None), ]
        vms = {}
        for i, (host, network, flavor, size) in enumerate(vms_params):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=flavor.id,
                nics=[{'net-id': network}],
                availability_zone='nova:{}'.format(host),
                security_groups=[security_group.id])
            vms.update({vm: size})

        for vm, exp_size in vms.items():
            assert self.get_instance_page_size(os_conn, vm) == exp_size

        params = [(meduim_nfv_flv, page_1gb),
                  (old_flv, None),
                  (meduim_nfv_flv, page_1gb),
                  (small_nfv_flv, page_2mb), ]

        for (flavor, size) in params:
            self.resize(os_conn, vms.keys()[0], flavor_to_resize=flavor)
            assert self.get_instance_page_size(os_conn, vms.keys()[0]) == size
            os_conn.wait_servers_ssh_ready(vms.keys())
            network_checks.check_vm_connectivity(env, os_conn)
    def test_VLAN_Allo_2M_HP_vms_req_HP_vms_with_old_flavor(self, env, os_conn, networks, nfv_flavor, keypair,
                                                            security_group, volume, aggregate):
        """
        This test checks that Huge pages set for vm1, vm2 and vm3 shouldn't use Huge pages, connectivity works properly
        Steps:
        1. Create net01, subnet.
        2. Create net 02, subnet.
        3. Create router, set gateway and add interfaces to both networks
        4. Launch vms using m1.small.hpgs flavor: vm1, vm3 on compute-1, vm2 on compute-2. For vm1 use new flavor,
        for vm2 and vm3 - old
        5. Locate the part of all instances configuration that is relevant to huge pages: #on controller
        hypervisor=nova show hpgs-test | grep OS-EXT-SRV-ATTR:host | cut -d\| -f3 instance=nova show hpgs-test |
        grep OS-EXT-SRV-ATTR:instance_name | cut -d\| -f3 # on compute virsh dumpxml $instance |awk '/memoryBacking/
        {p=1}; p; /\/numatune/ {p=0}'
        6. ping vm2 from vm1
        7. ping vm2 from vm3
        8. ping vm3 from vm1
        9. Check that it was allocated only HP for vm1
        """

        hosts = aggregate.hosts

        vm_0 = os_conn.create_server(
            name='vm1', flavor=nfv_flavor[0].id, key_name=keypair.name,
            nics=[{'net-id': networks[0]}],
            availability_zone='nova:{}'.format(hosts[0]),
            security_groups=[security_group.id])
        vm_1 = os_conn.create_server(
            name='vm2', flavor=nfv_flavor[0].id, key_name=keypair.name,
            availability_zone='nova:{}'.format(hosts[1]),
            security_groups=[security_group.id],
            nics=[{'net-id': networks[1]}])
        vm_2 = os_conn.create_server(
            name='vm3', flavor=nfv_flavor[0].id, key_name=keypair.name,
            nics=[{'net-id': networks[1]}],
            availability_zone='nova:{}'.format(hosts[1]),
            security_groups=[security_group.id])
        vms = [vm_0, vm_1, vm_2]

        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=768)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=512)
        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
    def test_cpu_pinning_migration(
            self, env, os_conn, networks, flavors, security_group,
            aggregate):
        """This test checks that cpu pinning executed successfully for
        instances created on computes with 1 NUMA
        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Launch vm1 using m1.small.performance flavor on compute-1 and
            vm2 on compute-2.
            3. Migrate vm1 from compute-1
            4. Check CPU Pinning
        """
        hosts = aggregate.hosts

        vms = []
        cpus = get_cpu_distribition_per_numa_node(env)

        for i in range(2):
            vms.append(os_conn.create_server(
                name='vm{}'.format(i),
                flavor=flavors[0].id,
                nics=[{'net-id': networks[0]}],
                availability_zone='nova:{}'.format(hosts[i]),
                security_groups=[security_group.id]))
        for i in range(5):
            vm_host = getattr(vms[0], "OS-EXT-SRV-ATTR:host")

            vm_0_new = self.migrate(os_conn, vms[0])
            vm_host_0_new = getattr(vm_0_new, "OS-EXT-SRV-ATTR:host")

            assert vm_host_0_new != vm_host

            for vm in vms:
                host = getattr(vm, "OS-EXT-SRV-ATTR:host")
                self.check_cpu_for_vm(os_conn,
                                      os_conn.get_instance_detail(vm), 2,
                                      cpus[host])

            os_conn.wait_servers_ssh_ready(vms)
            network_checks.check_vm_connectivity(env, os_conn)
    def test_base_vms_connectivity(self, env, os_conn, computes_with_dpdk_hp,
                                   networks, keypair, flavors, security_group):
        """This test checks base connectivity between VMs with DPDK. Please
        note we're not able to count DPDK huge pages only, they're added to
        count of 2Mb huge pages.
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create flavor for huge pages with 512Mb ram, 1 vcpu and 1Gb disk
            3. Launch vm1, vm2, vm3 on compute-1 and vm4 on compute-2, vm1 and
            vm2 in net1, vm3 and vm4 in net2
            4. Check vms connectivity
            5. Check instance page size
            6. Check that neutron port has binding:vif_type = vhostuser
            7. Check that count of 2Mb huge pages is expected for each host
        """
        hosts = computes_with_dpdk_hp
        initial_conf = computes_configuration(env)

        vms_param = [(hosts[0], networks[0], None),
                     (hosts[0], networks[0], None),
                     (hosts[0], networks[1], None),
                     (hosts[1], networks[1], None)]
        vms = self.create_vms(os_conn, hosts, networks, flavors[0], keypair,
                              security_group, vms_param)
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)

        for vm in vms:
            self.check_vif_type_for_vm(vm, os_conn)
            act_size = self.get_instance_page_size(os_conn, env)
            assert act_size == page_2mb, (
                "Unexpected package size. Should be {0} instead of {1}".format(
                    page_2mb, act_size))

        final_conf = computes_configuration(env)
        exp_hosts_usage = [(hosts[0], 3), (hosts[1], 1)]
        for (host, nr_2mb) in exp_hosts_usage:
            exp_free_2m = (initial_conf[host][page_2mb]['free'] -
                           nr_2mb * flavors[0].ram * 1024 / page_2mb)
            assert exp_free_2m == final_conf[host][page_2mb]['free']
    def test_huge_pages_distribution(
            self, env, os_conn, networks, keypair, nfv_flavor,
            security_group, aggregate):
        """This test checks huge pages' distribution with flavor for 2M
            and 1G Huge Pages
            Steps:
            1. Create flavors m1.small.hpgs and m1.small.hpgs-1
            2. Create net01 with subnet, net02 with subnet and  router01 with
            interfaces to both nets
            3. Launch instance vm1 on compute-1 in net01 with m1.small.hpgs-1
            4. Launch instance vm2 on compute-2 in net02 with m1.small.hpgs
            5. Launch instance vm3 on compute-1 in net02 with m1.small.hpgs
            6. Check vms connectivity
        """
        hosts = aggregate.hosts

        vm_1 = os_conn.create_server(
            name='vm1', flavor=nfv_flavor[1].id, key_name=keypair.name,
            nics=[{'net-id': networks[0]}],
            availability_zone='nova:{}'.format(hosts[0]),
            security_groups=[security_group.id])
        vm_2 = os_conn.create_server(
            name='vm2', flavor=nfv_flavor[0].id, key_name=keypair.name,
            nics=[{'net-id': networks[1]}],
            availability_zone='nova:{}'.format(hosts[1]),
            security_groups=[security_group.id])
        vm_3 = os_conn.create_server(
            name='vm3', flavor=nfv_flavor[0].id, key_name=keypair.name,
            nics=[{'net-id': networks[1]}],
            availability_zone='nova:{}'.format(hosts[0]),
            security_groups=[security_group.id])

        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=512)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=512)

        self.check_instance_page_size(os_conn, vm_1, size=1048576)
        self.check_instance_page_size(os_conn, vm_2, size=2048)
        self.check_instance_page_size(os_conn, vm_3, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
    def test_ssh_connection_after_ovs_restart(self, env, os_conn,
                                              computes_with_dpdk_hp, flavors,
                                              networks, security_group,
                                              keypair, restart_point):
        """This test checks ssh connection between VMs with DPDK after ovs
        restart on computes/controllers.
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create flavor for huge pages with 512Mb ram, 1 vcpu and 1Gb disk
            3. Launch vm1, vm2, vm3 on compute-1 and vm4 on compute-2, vm1 and
            vm2 in net1, vm3 and vm4 in net2
            4. Check that neutron port has binding:vif_type = vhostuser
            5. Check instance page size
            6. Open ssh connection to vm1 and vm4
            7. Restart ovs on computes/controllers
            8. Check that both ssh connections are still alive
            9. Check vms connectivity
        """
        hosts = computes_with_dpdk_hp
        vms_param = [(hosts[0], networks[0], None),
                     (hosts[0], networks[0], None),
                     (hosts[0], networks[1], None),
                     (hosts[1], networks[1], None)]
        vms = self.create_vms(os_conn, hosts, networks, flavors[0], keypair,
                              security_group, vms_param)

        vm1_remote = os_conn.ssh_to_instance(env, vms[0], keypair)
        vm4_remote = os_conn.ssh_to_instance(env, vms[3], keypair)

        with vm1_remote, vm4_remote:
            if restart_point == 'computes':
                self.restart_ovs_on_computes(env, os_conn)
            elif restart_point == 'controllers':
                self.restart_ovs_on_controllers(env, os_conn)
            vm1_remote.check_call("uname")
            vm4_remote.check_call("uname")
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)
    def test_vms_connectivity_after_cold_migration(self, env, os_conn,
                                                   computes_with_dpdk_hp,
                                                   flavors, networks, keypair,
                                                   security_group):
        """This test checks connectivity between VMs with DPDK after cold
        migration. Please note we're not able to count DPDK huge pages only,
        they're added to count of 2Mb huge pages.
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create flavor for huge pages with 512Mb ram, 1 vcpu and 1Gb disk
            3. Launch vm1, vm2 on compute-1, vm3 - on compute-2, vm1 in net1,
            vm2 and vm3 in net2
            4. Migrate vm1 and check that vm moved to other compute with
            huge pages
            5. Check instance page size
            6. Check that neutron port has binding:vif_type = vhostuser
            7. Check vms connectivity
        """
        hosts = computes_with_dpdk_hp
        vms_param = [(hosts[0], networks[0], None),
                     (hosts[0], networks[1], None),
                     (hosts[1], networks[1], None)]
        vms = self.create_vms(os_conn, hosts, networks, flavors[0], keypair,
                              security_group, vms_param=vms_param)
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)

        vm0_new = self.migrate(os_conn, vms[0])
        vm0_host = getattr(os_conn.nova.servers.get(vm0_new),
                           "OS-EXT-SRV-ATTR:host")
        assert vm0_host in hosts, ("Unexpected host {0},"
                                   "should be in {1}".format(vm0_host, hosts))
        assert vm0_host != hosts[0], ("New host is expected instead of {0}"
                                      "after cold migration".format(hosts[0]))

        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)
    def test_vms_connectivity_after_live_migration(self, env, os_conn,
                                                   computes_with_dpdk_hp,
                                                   flavors, networks, keypair,
                                                   security_group):
        """This test checks connectivity between VMs with DPDK after live
        migration. Please note we're not able to count DPDK huge pages only,
        they're added to count of 2Mb huge pages.
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create flavor for huge pages with 512Mb ram, 1 vcpu and 1Gb disk
            3. Launch vm1, vm2 on compute-1, vm3 - on compute-2, vm1 in net1,
            vm2 and vm3 in net2
            4. Live migrate vm1 to compute2
            5. Check instance page size
            6. Check that neutron port has binding:vif_type = vhostuser
            7. Check that count of free 2Mb huge pages is expected one for
            each host
            8. Check vms connectivity
        """
        hosts = computes_with_dpdk_hp
        initial_conf = computes_configuration(env)
        vms_param = [(hosts[0], networks[0], None),
                     (hosts[0], networks[1], None),
                     (hosts[1], networks[1], None)]
        vms = self.create_vms(os_conn, hosts, networks, flavors[0], keypair,
                              security_group, vms_param=vms_param)
        self.live_migrate(os_conn, vms[0], hosts[1])
        network_checks.check_vm_connectivity(env, os_conn, vm_keypair=keypair)

        final_conf = computes_configuration(env)
        exp_hosts_usage = [(hosts[0], 1), (hosts[1], 2)]
        for (host, nr_2mb) in exp_hosts_usage:
            exp_free_2m = (initial_conf[host][page_2mb]['free'] -
                           nr_2mb * flavors[0].ram * 1024 / page_2mb)
            assert exp_free_2m == final_conf[host][page_2mb]['free']
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. ping 8.8.8.8 from vm2
            7. ping vm1 from vm2 and vm1 from vm2
        """

        # init variables
        exist_networks = self.os_conn.list_networks()['networks']
        ext_network = [x for x in exist_networks
                       if x.get('router:external')][0]
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.hosts = self.zone.hosts.keys()[:2]
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        self.security_group = self.os_conn.create_sec_group_for_ssh()
        self.networks = []

        # create router
        self.router = self.os_conn.create_router(name="router01")['router']
        self.os_conn.router_gateway_add(router_id=self.router['id'],
                                        network_id=ext_network['id'])
        logger.info(
            'router {name}({id}) was created'.format(**self.router))

        # create networks by amount of the compute hosts
        for hostname in self.hosts:
            net_id = self.os_conn.add_net(self.router['id'])
            self.networks.append(net_id)
            self.os_conn.add_server(net_id,
                                    self.instance_keypair.name,
                                    hostname,
                                    self.security_group.id)

        # add floating ip to first server
        self.server1 = self.os_conn.nova.servers.find(name="server01")
        self.os_conn.assign_floating_ip(self.server1)

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)

        # Find a primary controller
        primary_controller = self.env.primary_controller
        self.primary_node = self.devops_env.get_node_by_fuel_node(
            primary_controller)
        self.primary_host = primary_controller.data['fqdn']

        # Find a non-primary controller
        non_primary_controller = self.env.non_primary_controllers[0]
        self.non_primary_node = self.devops_env.get_node_by_fuel_node(
            non_primary_controller)
        self.non_primary_host = non_primary_controller.data['fqdn']

        # make a list of all l3 agent ids
        self.l3_agent_ids = [agt['id'] for agt in
                             self.os_conn.neutron.list_agents(
                                binary='neutron-l3-agent')['agents']]

        self.dhcp_agent_ids = [agt['id'] for agt in
                               self.os_conn.neutron.list_agents(
                                   binary='neutron-dhcp-agent')['agents']]
Example #39
0
    def test_lm_ceph_for_huge_pages(self, env, os_conn, networks, volume,
                                    keypair, nfv_flavor, security_group,
                                    aggregate):
        """This test checks that live migration executed successfully for
            instances created on computes with ceph and huge pages
            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Launch instance vm1 with volume vol1 on compute-1 in net1 with
            m1.small.hpgs
            3. Launch instance vm2 on compute-2 in net2 with m1.small.hpgs
            4. Make volume from vm2 volume_vm
            5. Launch instance vm3 on compute-2 in net2 with volume_vm
            with m1.small.hpgs
            6. Check vms connectivity
            7. Live migrate vm1 on compute-2 and check that vm moved to
            compute-2 with Active state
            8. Check vms connectivity
            9. Live migrate vm2 with block-migrate parameter on compute-1 and
            check that vm moved to compute-2 with Active state
            10. Check vms connectivity
            11. Live migrate vm3 on compute-1 and check that vm moved to
            compute-1 with Active state
            12. Check vms connectivity
        """
        hosts = aggregate.hosts

        vm_0 = os_conn.create_server(name='vm1',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[0]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[0]),
                                     security_groups=[security_group.id],
                                     block_device_mapping={'vda': volume.id})
        vm_1 = os_conn.create_server(name='vm2',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     availability_zone='nova:{}'.format(
                                         hosts[1]),
                                     security_groups=[security_group.id],
                                     nics=[{
                                         'net-id': networks[1]
                                     }])
        volume_vm = self.create_volume_from_vm(os_conn, vm_1)
        vm_2 = os_conn.create_server(name='vm3',
                                     flavor=nfv_flavor[0].id,
                                     key_name=keypair.name,
                                     nics=[{
                                         'net-id': networks[1]
                                     }],
                                     availability_zone='nova:{}'.format(
                                         hosts[1]),
                                     security_groups=[security_group.id],
                                     block_device_mapping={'vda': volume_vm})
        vms = [vm_0, vm_1, vm_2]

        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=768)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=512)
        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)

        self.live_migrate(os_conn, vms[0], hosts[1], block_migration=False)
        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=1024)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=256)
        network_checks.check_vm_connectivity(env, os_conn)

        self.live_migrate(os_conn, vms[1], hosts[0])
        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=768)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=512)
        network_checks.check_vm_connectivity(env, os_conn)

        self.live_migrate(os_conn, vms[2], hosts[0], block_migration=False)
        self.check_pages(os_conn, hosts[0], total_pages=1024, free_pages=512)
        self.check_pages(os_conn, hosts[1], total_pages=1024, free_pages=768)
        for vm in vms:
            self.check_instance_page_size(os_conn, vm, size=2048)
        network_checks.check_vm_connectivity(env, os_conn)
Example #40
0
    def test_shutdown_not_primary_controller(self, env_name):
        """Shut down non-primary controller and check l3-agent work

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. Check on what agents is router1
            9. If agent on primary controller move it to any other controller
            10. Destroy non primary controller
            11. Wait for L3 agent dies
            12. Check that all routers reschedule from non primary controller
            13. Boot one more VM (VM3) in network1
            14. Boot vm3 in network1
            15. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other vm

        Duration 10m

        """
        router = self.os_conn.neutron.list_routers(
            name='router01')['routers'][0]
        l3_agent = self.os_conn.get_l3_for_router(router['id'])['agents'][0]
        leader_node = self.env.leader_controller

        # Move router to slave l3 agent, if needed
        if leader_node.data['fqdn'] == l3_agent['host']:
            l3_agents = self.os_conn.list_l3_agents()
            leader_l3_agent = [x for x in l3_agents
                               if x['host'] == leader_node.data['fqdn']][0]
            self.os_conn.neutron.remove_router_from_l3_agent(
                leader_l3_agent['id'],
                router_id=router['id'])
            slave_l3_agents = [x for x in l3_agents if x != leader_l3_agent]
            l3_agent = slave_l3_agents[0]
            self.os_conn.neutron.add_router_to_l3_agent(
                l3_agent['id'],
                body={'router_id': router['id']})

        # Destroy node with l3 agent
        node = self.env.find_node_by_fqdn(l3_agent['host'])
        devops_node = DevopsClient.get_node_by_mac(env_name=env_name,
                                                   mac=node.data['mac'])
        if devops_node is not None:
            devops_node.destroy()
        else:
            raise Exception("Can't find devops controller node to destroy it")

        # Wait for l3 agent die
        wait(
            lambda: self.os_conn.get_l3_for_router(
                router['id'])['agents'][0]['alive'] is False,
            expected_exceptions=NeutronClientException,
            timeout_seconds=60 * 5, sleep_seconds=(1, 60, 5),
            waiting_for="L3 agent is died")

        # Wait for migrating all routers from died L3 agent
        wait(
            lambda: len(self.os_conn.neutron.list_routers_on_l3_agent(
                l3_agent['id'])['routers']) == 0,
            timeout_seconds=60 * 5, sleep_seconds=(1, 60, 5),
            waiting_for="migrating all routers from died L3 agent"
        )

        # create another server on net01
        net01 = self.os_conn.nova.networks.find(label="net01")
        self.os_conn.create_server(
            name='server03',
            availability_zone='{}:{}'.format(self.zone.zoneName,
                                             self.hosts[0]),
            key_name=self.instance_keypair.name,
            nics=[{'net-id': net01.id}],
            security_groups=[self.security_group.id])

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
Example #41
0
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. ping 8.8.8.8 from vm2
            7. ping vm1 from vm2 and vm1 from vm2
        """

        # init variables
        exist_networks = self.os_conn.list_networks()['networks']
        ext_network = [x for x in exist_networks
                       if x.get('router:external')][0]
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.hosts = self.zone.hosts.keys()[:2]
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        self.security_group = self.os_conn.create_sec_group_for_ssh()
        self.networks = []

        # create router
        self.router = self.os_conn.create_router(name="router01")['router']
        self.os_conn.router_gateway_add(router_id=self.router['id'],
                                        network_id=ext_network['id'])
        logger.info('router {name}({id}) was created'.format(**self.router))

        # create networks by amount of the compute hosts
        for hostname in self.hosts:
            net_id = self.os_conn.add_net(self.router['id'])
            self.networks.append(net_id)
            self.os_conn.add_server(net_id, self.instance_keypair.name,
                                    hostname, self.security_group.id)

        # add floating ip to first server
        self.server1 = self.os_conn.nova.servers.find(name="server01")
        self.os_conn.assign_floating_ip(self.server1)

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)

        # Find a primary controller
        primary_controller = self.env.primary_controller
        mac = primary_controller.data['mac']
        self.primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)
        self.primary_host = primary_controller.data['fqdn']

        # Find a non-primary controller
        non_primary_controller = self.env.non_primary_controllers[0]
        mac = non_primary_controller.data['mac']
        self.non_primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)
        self.non_primary_host = non_primary_controller.data['fqdn']

        # make a list of all l3 agent ids
        self.l3_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-l3-agent')['agents']
        ]

        self.dhcp_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-dhcp-agent')['agents']
        ]
Example #42
0
    def test_restart_openvswitch_agent_under_bat(self):
        """Restart openvswitch-agents with broadcast traffic background

        Steps:
            1. Go to vm1's console and run arping
               to initiate broadcast traffic:
                    sudo arping -I eth0 <vm2_fixed_ip>
            2. Disable ovs-agents on all controllers
            3. Restart service 'neutron-plugin-openvswitch-agent'
               on all computes
            4. Enable ovs-agents back.
            5. Check that pings between vm1 and vm2 aren't interrupted
               or not more than 2 packets are lost
        """
        self._prepare_openstack()
        # Run arping in background on server01 towards server02
        srv_list = self.os_conn.nova.servers.list()
        srv1 = srv_list.pop()
        srv2 = srv_list.pop()
        vm_ip = self.os_conn.get_nova_instance_ips(
            self.os_conn.nova.servers.find(name=srv2.name))['fixed']

        arping_cmd = 'sudo arping -I eth0 {}'.format(vm_ip)
        cmd = ' '.join((arping_cmd, '< /dev/null > ~/arp.log 2>&1 &'))
        result = network_checks.run_on_vm(self.env, self.os_conn, srv1,
                                          self.instance_keypair, cmd)
        err_msg = 'Failed to start the arping on vm result: {}'.format(result)
        assert not result['exit_code'], err_msg

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Disable ovs agent on all controllers
        self.disable_ovs_agents_on_controller()

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        self.restart_ovs_agents_on_computes()

        # Enable ovs agent on all controllers
        self.enable_ovs_agents_on_controllers()

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Check that arping is still executing
        cmd = 'ps'
        result = network_checks.run_on_vm(self.env, self.os_conn, srv1,
                                          self.instance_keypair, cmd)
        arping_is_run = False
        for line in result['stdout']:
            if arping_cmd in line:
                arping_is_run = True
                break
        err_msg = 'arping was not found in stdout: {}'.format(result['stdout'])
        assert arping_is_run, err_msg

        # Read log of arpping execution for future possible debug
        cmd = 'cat ~/arp.log'
        result = network_checks.run_on_vm(self.env, self.os_conn, srv1,
                                          self.instance_keypair, cmd)
        logger.debug(result)

        # Check connectivity
        network_checks.check_vm_connectivity(self.env, self.os_conn)