コード例 #1
0
    def reset_computes(self, hostnames, env_name):

        def get_hypervisors():
            return [x for x in self.os_conn.nova.hypervisors.list()
                    if x.hypervisor_hostname in hostnames]

        node_states = defaultdict(list)

        def is_nodes_started():
            for hypervisor in get_hypervisors():
                state = hypervisor.state
                prev_states = node_states[hypervisor.hypervisor_hostname]
                if len(prev_states) == 0 or state != prev_states[-1]:
                    prev_states.append(state)

            return all(x[-2:] == ['down', 'up'] for x in node_states.values())

        logger.info('Resetting computes {}'.format(hostnames))
        for hostname in hostnames:
            node = self.env.find_node_by_fqdn(hostname)
            devops_node = DevopsClient.get_node_by_mac(env_name=env_name,
                                                       mac=node.data['mac'])
            devops_node.reset()

        wait(is_nodes_started, timeout_seconds=10 * 60)
コード例 #2
0
    def test_destroy_primary_controller(self, router, prepare_openstack,
                                        env_name):
        """Destroy primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Start ping vm2 from vm1 by floating ip
            9. Destroy primary controller
            10. Stop ping
            11. Check that ping lost no more than 10 packets
        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        primary_controller = self.env.primary_controller
        other_controllers = [x for x
                             in self.env.get_nodes_by_role('controller')
                             if x != primary_controller]

        # Rescedule active l3 agent to primary if needed
        if primary_controller != l3_agent_controller:
            with primary_controller.ssh() as remote:
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource ban neutron-l3-agent {}'.format(
                            node.data['fqdn']))
                self.wait_router_migrate(router_id,
                                         primary_controller.data['fqdn'])
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource clear neutron-l3-agent {}'.format(
                            node.data['fqdn']))

        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        logger.info("Destroy primary controller {}".format(
            primary_controller.data['fqdn']))
        devops_node = DevopsClient.get_node_by_mac(
            env_name=env_name, mac=primary_controller.data['mac'])
        devops_node.destroy()

        self.wait_router_rescheduled(router_id=router['router']['id'],
                                     from_node=primary_controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, vm=server1,
            vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
コード例 #3
0
    def test_east_west_connectivity_after_destroy_controller(
            self, prepare_openstack, env_name):
        """Check East-West connectivity after destroy controller

        Scenario:
            1. Create net01, subnet net01__subnet for it
            2. Create net02, subnet net02__subnet for it
            3. Create router01_02 with router type Distributed and
                with gateway to external network
            4. Add interfaces to the router01_02 with net01_subnet and
                net02_subnet
            5. Boot vm_1 in the net01
            6. Boot vm_2 in the net02 on another compute
            7. Go to the vm_1
            8. Ping vm_2
            9. Destroy one controller
            10. Go to the vm_2 with internal ip from namespace on compute
            11. Ping vm_1 with internal IP

        Duration 10m

        """
        self.check_ping_from_vm(vm=self.server1,
                                vm_keypair=self.instance_keypair,
                                ip_to_ping=self.server2_ip)

        # destroy controller
        controller = self.env.get_nodes_by_role('controller')[0]
        devops_node = DevopsClient.get_node_by_mac(env_name=env_name,
                                                   mac=controller.data['mac'])
        self.env.destroy_nodes([devops_node])

        self.check_ping_from_vm(vm=self.server2,
                                vm_keypair=self.instance_keypair,
                                ip_to_ping=self.server1_ip)
コード例 #4
0
    def prepare_openstack(self, setup, env_name):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. ping 8.8.8.8 from vm2
            7. ping vm1 from vm2 and vm1 from vm2
        """

        # init variables
        exist_networks = self.os_conn.list_networks()['networks']
        ext_network = [x for x in exist_networks
                       if x.get('router:external')][0]
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.hosts = self.zone.hosts.keys()[:2]
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        self.security_group = self.os_conn.create_sec_group_for_ssh()
        self.networks = []

        # create router
        self.router = self.os_conn.create_router(name="router01")['router']
        self.os_conn.router_gateway_add(router_id=self.router['id'],
                                        network_id=ext_network['id'])
        logger.info('router {} was created'.format(self.router['id']))

        # create networks by amount of the compute hosts
        for hostname in self.hosts:
            net_id = self.os_conn.add_net(self.router['id'])
            self.networks.append(net_id)
            self.os_conn.add_server(net_id,
                                    self.instance_keypair.name,
                                    hostname,
                                    self.security_group.id)

        # add floating ip to first server
        server1 = self.os_conn.nova.servers.find(name="server01")
        self.os_conn.assign_floating_ip(server1)

        # check pings
        self.check_vm_connectivity()

        # Find a primary contrloller
        mac = self.env.primary_controller.data['mac']
        self.primary_node = DevopsClient.get_node_by_mac(env_name=env_name,
                                                         mac=mac)
        self.primary_host = self.env.primary_controller.data['fqdn']

        # make a list of all l3 agent ids
        self.l3_agent_ids = [agt['id'] for agt in
                             self.os_conn.neutron.list_agents(
                                binary='neutron-l3-agent')['agents']]

        self.dhcp_agent_ids = [agt['id'] for agt in
                               self.os_conn.neutron.list_agents(
                                   binary='neutron-dhcp-agent')['agents']]
コード例 #5
0
def fuel_master_ip(request, env_name, snapshot_name):
    """Get fuel master ip"""
    fuel_ip = request.config.getoption("--fuel-ip")
    if not fuel_ip:
        fuel_ip = DevopsClient.get_admin_node_ip(env_name=env_name)
    if not fuel_ip:
        fuel_ip = SERVER_ADDRESS
    return fuel_ip
コード例 #6
0
def fuel_master_ip(request, env_name, snapshot_name):
    """Get fuel master ip"""
    fuel_ip = request.config.getoption("--fuel-ip")
    if not fuel_ip:
        fuel_ip = DevopsClient.get_admin_node_ip(env_name=env_name)
    if not fuel_ip:
        fuel_ip = SERVER_ADDRESS
    return fuel_ip
コード例 #7
0
    def test_reset_primary_controller(self, router, prepare_openstack, env_name):
        """Reset primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Start ping vm2 from vm1 by floating ip
            9. Reset primary controller
            10. Stop ping
            11. Check that ping lost no more than 10 packets
            12. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state

        """
        router_id = router["router"]["id"]
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]["host"])
        primary_controller = self.env.primary_controller
        for node in self.env.get_nodes_by_role("controller"):
            if node != primary_controller:
                proxy_node = node.data["fqdn"]
                break
        else:
            raise Exception("Can't find non primary controller")
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)["floating"]

        # Reschedule active l3 agent to primary if needed
        self.reschedule_active_l3_agt(router_id, primary_controller, l3_agent_controller)

        from_node = l3_agent_controller.data["fqdn"]
        self.wait_router_rescheduled(router_id=router_id, from_node=from_node, timeout_seconds=5 * 60)

        # Start ping in background and reset the node
        with self.background_ping(
            vm=server1, vm_keypair=self.instance_keypair, ip_to_ping=server2_ip, proxy_node=proxy_node
        ) as ping_result:

            devops_node = DevopsClient.get_node_by_mac(env_name=env_name, mac=primary_controller.data["mac"])
            devops_node.reset()

        assert ping_result["sent"] - ping_result["received"] < 10

        # To ensure that the l3 agt is moved from the affected controller
        self.wait_router_rescheduled(
            router_id=router_id, from_node=primary_controller.data["fqdn"], timeout_seconds=5 * 60
        )

        self.check_l3_ha_agent_states(router_id)
コード例 #8
0
    def test_destroy_non_primary_controller(self, router, prepare_openstack,
                                            env_name):
        """Reset primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Start ping vm2 from vm1 by floating ip
            9. destroy primary controller
            10. Stop ping
            11. Check that ping lost no more than 10 packets
            12. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state

        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        controller = self.env.non_primary_controllers[0]
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        # Reschedule active l3 agent to the non primary if needed
        self.reschedule_active_l3_agt(router_id, controller,
                                      l3_agent_controller)

        from_node = l3_agent_controller.data['fqdn']
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=from_node,
                                     timeout_seconds=5 * 60)

        # Start ping in background and destroy the node
        with self.background_ping(vm=server1,
                                  vm_keypair=self.instance_keypair,
                                  ip_to_ping=server2_ip) as ping_result:

            devops_node = DevopsClient.get_node_by_mac(
                env_name=env_name, mac=controller.data['mac'])
            self.env.destroy_nodes([devops_node])

        assert ping_result['sent'] - ping_result['received'] < 10

        # To ensure that the l3 agt is moved from the affected controller
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        self.check_l3_ha_agent_states(router_id)
コード例 #9
0
    def test_destroy_non_primary_controller(self, router,
                                            prepare_openstack, env_name):
        """Reset primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Start ping vm2 from vm1 by floating ip
            9. destroy primary controller
            10. Stop ping
            11. Check that ping lost no more than 10 packets
            12. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state

        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        controller = self.env.non_primary_controllers[0]
        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        # Reschedule active l3 agent to the non primary if needed
        self.reschedule_active_l3_agt(router_id, controller,
                                      l3_agent_controller)

        from_node = l3_agent_controller.data['fqdn']
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=from_node,
                                     timeout_seconds=5 * 60)

        # Start ping in background and destroy the node
        with self.background_ping(vm=server1,
                                  vm_keypair=self.instance_keypair,
                                  ip_to_ping=server2_ip) as ping_result:

            devops_node = DevopsClient.get_node_by_mac(
                env_name=env_name, mac=controller.data['mac'])
            self.env.destroy_nodes([devops_node])

        assert ping_result['sent'] - ping_result['received'] < 10

        # To ensure that the l3 agt is moved from the affected controller
        self.wait_router_rescheduled(router_id=router_id,
                                     from_node=controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        self.check_l3_ha_agent_states(router_id)
コード例 #10
0
    def test_shutdown_active_controller_during_upload(self,
                                                      glance,
                                                      image_file,
                                                      suffix,
                                                      timeout=60):
        """Check that image is created successfully if during creating image
        to perform shutdown of active controller

        Steps:
            1. Create image using URL Link
            2. Shutdown active controller during creation of image
            3. Check that image is present in list and image status is `active`
            4. Delete created image
            5. Check that image deleted
        """

        # Find a primary controller
        primary_controller = self.env.primary_controller
        mac = primary_controller.data['mac']
        self.primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)

        name = 'Test_{}'.format(suffix[:6])
        image_url = settings.GLANCE_IMAGE_URL
        cmd = ('image-create --name {name} --container-format bare '
               '--disk-format qcow2 --location {image_url}'.format(
                   name=name, image_url=image_url))
        image = parser.details(glance(cmd))
        logger.info('Image starts to upload')

        # Shutdown primary controller
        self.env.warm_shutdown_nodes([self.primary_node])

        image_list = parser.listing(glance('image-list'))
        assert image['id'] in [x['ID'] for x in image_list]

        image_data = parser.details(glance('image-show {id}'.format(**image)))
        assert image_data['status'] == 'active'
        logger.info('Image is active')

        glance('image-delete {id}'.format(**image))

        images = parser.listing(glance('image-list'))
        assert image['id'] not in [x['ID'] for x in images]
コード例 #11
0
    def test_shutdown_snat_controller(self, env_name):
        """Shutdown controller with SNAT-namespace and check it reschedules.

        Scenario:
            1. Create net01, subnet net01__subnet for it
            2. Create router01 with external network and
                router type Distributed
            3. Add interfaces to the router01 with net01__subnet
            4. Boot vm_1 in the net01
            5. Go to the vm_1 and ping 8.8.8.8
            6. Find controller with SNAT-namespace
               and kill this controller with virsh:
               ``ip net | grep snat`` on all controllers
               ``virsh destroy <controller_with_snat>``
            7. Check SNAT moved to another
            8. Go to the vm_1 and ping 8.8.8.8

        Duration 10m

        """
        self._prepare_openstack_env()
        self.check_ping_from_vm(self.server, vm_keypair=self.instance_keypair)
        # Get controller with SNAT and destroy it
        controller_with_snat = self.find_snat_controller()
        logger.info('Destroying controller with SNAT: {}'.format(
            controller_with_snat.data['fqdn']))
        devops_node = DevopsClient.get_node_by_mac(
            env_name=env_name, mac=controller_with_snat.data['mac'])
        self.env.destroy_nodes([devops_node])
        # Wait for SNAT reschedule
        wait_msg = "Waiting for snat is rescheduled"
        new_controller_with_snat = wait(
            lambda: self.find_snat_controller(
                excluded=[controller_with_snat.data['fqdn']]),
            timeout_seconds=60 * 3,
            sleep_seconds=(1, 60, 5),
            waiting_for=wait_msg)
        # Check external ping and proper SNAT rescheduling
        self.check_ping_from_vm(self.server, vm_keypair=self.instance_keypair)
        assert (
            controller_with_snat.data['fqdn'] !=
            new_controller_with_snat.data['fqdn'])
コード例 #12
0
    def test_shutdown_primary_controller(self,
                                         glance,
                                         image_file,
                                         suffix,
                                         timeout=60):
        """Check creating image after shutdown primary controller

        Steps:
            1. Shutdown primary controller
            2. Create image from `image_file`
            3. Check that image is present in list and image status is `active`
            4. Delete created image
            5. Check that image deleted
        """
        # Find a primary controller
        primary_controller = self.env.primary_controller
        mac = primary_controller.data['mac']
        primary_node = DevopsClient.get_node_by_mac(env_name=self.env_name,
                                                    mac=mac)

        # Shutdown primary controller
        self.env.warm_shutdown_nodes([primary_node])

        name = 'Test_{}'.format(suffix[:6])
        cmd = ('image-create --name {name} --container-format bare '
               '--disk-format qcow2 --file {file}'.format(name=name,
                                                          file=image_file))
        image = parser.details(glance(cmd))
        logger.info('Image starts to upload')

        image_list = parser.listing(glance('image-list'))
        assert image['id'] in [x['ID'] for x in image_list]

        image_data = parser.details(glance('image-show {id}'.format(**image)))
        assert image_data['status'] == 'active'
        logger.info('Image is active')

        glance('image-delete {id}'.format(**image))

        images = parser.listing(glance('image-list'))
        assert image['id'] not in [x['ID'] for x in images]
コード例 #13
0
    def test_shutdown_active_controller_during_upload(
            self, glance, image_file, suffix, timeout=60):
        """Check that image is created successfully if during creating image
        to perform shutdown of active controller

        Steps:
            1. Create image using URL Link
            2. Shutdown active controller during creation of image
            3. Check that image is present in list and image status is `active`
            4. Delete created image
            5. Check that image deleted
        """

        # Find a primary controller
        primary_controller = self.env.primary_controller
        mac = primary_controller.data['mac']
        self.primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)

        name = 'Test_{}'.format(suffix[:6])
        image_url = settings.GLANCE_IMAGE_URL
        cmd = ('image-create --name {name} --container-format bare '
               '--disk-format qcow2 --location {image_url}'.format(
                    name=name, image_url=image_url))
        image = parser.details(glance(cmd))
        logger.info('Image starts to upload')

        # Shutdown primary controller
        self.env.warm_shutdown_nodes([self.primary_node])

        image_list = parser.listing(glance('image-list'))
        assert image['id'] in [x['ID'] for x in image_list]

        image_data = parser.details(glance('image-show {id}'.format(**image)))
        assert image_data['status'] == 'active'
        logger.info('Image is active')

        glance('image-delete {id}'.format(**image))

        images = parser.listing(glance('image-list'))
        assert image['id'] not in [x['ID'] for x in images]
コード例 #14
0
    def test_shutdown_primary_controller(
            self, glance, image_file, suffix, timeout=60):
        """Check creating image after shutdown primary controller

        Steps:
            1. Shutdown primary controller
            2. Create image from `image_file`
            3. Check that image is present in list and image status is `active`
            4. Delete created image
            5. Check that image deleted
        """
        # Find a primary controller
        primary_controller = self.env.primary_controller
        mac = primary_controller.data['mac']
        primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)

        # Shutdown primary controller
        self.env.warm_shutdown_nodes([primary_node])

        name = 'Test_{}'.format(suffix[:6])
        cmd = ('image-create --name {name} --container-format bare '
               '--disk-format qcow2 --file {file}'.format(
                    name=name, file=image_file))
        image = parser.details(glance(cmd))
        logger.info('Image starts to upload')

        image_list = parser.listing(glance('image-list'))
        assert image['id'] in [x['ID'] for x in image_list]

        image_data = parser.details(glance('image-show {id}'.format(**image)))
        assert image_data['status'] == 'active'
        logger.info('Image is active')

        glance('image-delete {id}'.format(**image))

        images = parser.listing(glance('image-list'))
        assert image['id'] not in [x['ID'] for x in images]
コード例 #15
0
    def test_shutdown_not_primary_controller(self, env_name):
        """Shut down non-primary controller and check l3-agent work

        Scenario:
            1. Revert snapshot with neutron cluster
            2. Create network1, network2
            3. Create router1 and connect it with network1, network2 and
               external net
            4. Boot vm1 in network1 and associate floating ip
            5. Boot vm2 in network2
            6. Add rules for ping
            7. ping 8.8.8.8, vm1 (both ip) and vm2 (fixed ip) from each other
            8. Check on what agents is router1
            9. If agent on primary controller move it to any other controller
            10. Destroy non primary controller
            11. Wait for L3 agent dies
            12. Check that all routers reschedule from non primary controller
            13. Boot one more VM (VM3) in network1
            14. Boot vm3 in network1
            15. ping 8.8.8.8, vm1 (both ip), vm2 (fixed ip) and vm3 (fixed ip)
                from each other vm

        Duration 10m

        """
        router = self.os_conn.neutron.list_routers(
            name='router01')['routers'][0]
        l3_agent = self.os_conn.get_l3_for_router(router['id'])['agents'][0]
        leader_node = self.env.leader_controller

        # Move router to slave l3 agent, if needed
        if leader_node.data['fqdn'] == l3_agent['host']:
            l3_agents = self.os_conn.list_l3_agents()
            leader_l3_agent = [x for x in l3_agents
                               if x['host'] == leader_node.data['fqdn']][0]
            self.os_conn.neutron.remove_router_from_l3_agent(
                leader_l3_agent['id'],
                router_id=router['id'])
            slave_l3_agents = [x for x in l3_agents if x != leader_l3_agent]
            l3_agent = slave_l3_agents[0]
            self.os_conn.neutron.add_router_to_l3_agent(
                l3_agent['id'],
                body={'router_id': router['id']})

        # Destroy node with l3 agent
        node = self.env.find_node_by_fqdn(l3_agent['host'])
        devops_node = DevopsClient.get_node_by_mac(env_name=env_name,
                                                   mac=node.data['mac'])
        if devops_node is not None:
            devops_node.destroy()
        else:
            raise Exception("Can't find devops controller node to destroy it")

        # Wait for l3 agent die
        wait(
            lambda: self.os_conn.get_l3_for_router(
                router['id'])['agents'][0]['alive'] is False,
            expected_exceptions=NeutronClientException,
            timeout_seconds=60 * 5, sleep_seconds=(1, 60, 5),
            waiting_for="L3 agent is died")

        # Wait for migrating all routers from died L3 agent
        wait(
            lambda: len(self.os_conn.neutron.list_routers_on_l3_agent(
                l3_agent['id'])['routers']) == 0,
            timeout_seconds=60 * 5, sleep_seconds=(1, 60, 5),
            waiting_for="migrating all routers from died L3 agent"
        )

        # create another server on net01
        net01 = self.os_conn.nova.networks.find(label="net01")
        self.os_conn.create_server(
            name='server03',
            availability_zone='{}:{}'.format(self.zone.zoneName,
                                             self.hosts[0]),
            key_name=self.instance_keypair.name,
            nics=[{'net-id': net01.id}],
            security_groups=[self.security_group.id])

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)
コード例 #16
0
    def test_destroy_primary_controller(self, router, prepare_openstack,
                                        env_name):
        """Destroy primary controller (l3 agent on it should be
            with ACTIVE ha_state)

        Scenario:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1
            4. Boot vm2 in network2 and associate floating ip
            5. Add rules for ping
            6. Find node with active ha_state for router
            7. If node from step 6 isn't primary controller,
                reschedule router1 to primary by banning all another
                and then clear them
            8. Start ping vm2 from vm1 by floating ip
            9. Destroy primary controller
            10. Stop ping
            11. Check that ping lost no more than 10 packets
        """
        router_id = router['router']['id']
        agents = self.get_active_l3_agents_for_router(router_id)
        l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
        primary_controller = self.env.primary_controller
        other_controllers = [
            x for x in self.env.get_nodes_by_role('controller')
            if x != primary_controller
        ]

        # Rescedule active l3 agent to primary if needed
        if primary_controller != l3_agent_controller:
            with primary_controller.ssh() as remote:
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource ban neutron-l3-agent {}'.format(
                            node.data['fqdn']))
                self.wait_router_migrate(router_id,
                                         primary_controller.data['fqdn'])
                for node in other_controllers:
                    remote.check_call(
                        'pcs resource clear neutron-l3-agent {}'.format(
                            node.data['fqdn']))

        server1 = self.os_conn.nova.servers.find(name="server01")
        server2 = self.os_conn.nova.servers.find(name="server02")
        server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']

        logger.info("Destroy primary controller {}".format(
            primary_controller.data['fqdn']))
        devops_node = DevopsClient.get_node_by_mac(
            env_name=env_name, mac=primary_controller.data['mac'])
        devops_node.destroy()

        self.wait_router_rescheduled(router_id=router['router']['id'],
                                     from_node=primary_controller.data['fqdn'],
                                     timeout_seconds=5 * 60)

        network_checks.check_ping_from_vm(self.env,
                                          self.os_conn,
                                          vm=server1,
                                          vm_keypair=self.instance_keypair,
                                          ip_to_ping=server2_ip)
コード例 #17
0
    def test_check_port_binding_after_restart_node(self):
        """[Neutron VLAN and VXLAN] Check that no redundant DHCP agents
        assigned to the network after DHCP agents restart.

        Steps:
            1. Update quotas for creation a lot of networks:
                neutron quota-update --network 1000 --subnet 1000
                                     --router 1000 --port 1000:
            2. Create 50 networks, subnets, launch and terminate instance
            3. Check port ids on networkX:
                neutron port-list --network_id=<yout_network_id>
                --device_owner=network:dhcp
            4. Check host binding for all ports:
                Get binding:host_id from
                network port-show <port_id>
            5. Destroy one of controllers with dhcp agent for networkX:
                virsh destroy <node>
                Wait till node are down
            6. Start destroyed controller with dhcp agent for networkX:
                virsh start <node>
                Wait till node are up.
            7. Check port id's for networkX. They should be the same as before
                restart:
                neutron port-list --network_id=<yout_network_id>
                --device_owner=network:dhcp
            4. Check host binding for all ports:
                Get binding:host_id from
                network port-show <port_id>
               Check that network is rescheduled from one DHCP agent to
                another, only one host changed after restart.
        """
        self._prepare_openstack()

        # Create 50 networks, launch and terminate instances
        # According to the test requirements 50 networks should be created
        # However during implementation found that only about 34 nets
        # can be created for one tenant. Need to clarify that situation.
        self.create_networks(29, self.router, self.networks,
                             self.instance_keypair, self.security_group)

        # Get DHCP agents for the net9
        net_id = self.networks[8]
        ports_ids_before = [
            port['id'] for port in self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')
        ]

        ports_binding_before = [
            port['binding:host_id']
            for port in self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')
        ]

        # virsh destroy of the controller with dhcp agent
        for controller in self.env.non_primary_controllers:
            if controller.data['fqdn'] in ports_binding_before:
                controller_to_restart = controller
        mac = controller_to_restart.data['mac']
        controller_with_dhcp = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)

        self.env.warm_restart_nodes([controller_with_dhcp])

        # Check ports_binding after restart node
        ports_ids_after = [
            port['id'] for port in self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')
        ]
        err_msg = 'Ports ids are changed after restart'
        assert ports_ids_before == ports_ids_after, err_msg

        ports_binding_after = [
            port['binding:host_id']
            for port in self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')
        ]

        new_dhcp_host = set(ports_binding_before) & set(ports_binding_after)
        err_msg = 'Dhcp agents recheduled incorrect after restart'
        assert len(new_dhcp_host) == 1, err_msg
コード例 #18
0
    def _prepare_openstack(self):
        """Prepare OpenStack for scenarios run

        Steps:
            1. Create network1, network2
            2. Create router1 and connect it with network1, network2 and
                external net
            3. Boot vm1 in network1 and associate floating ip
            4. Boot vm2 in network2
            5. Add rules for ping
            6. ping 8.8.8.8 from vm2
            7. ping vm1 from vm2 and vm1 from vm2
        """

        # init variables
        exist_networks = self.os_conn.list_networks()['networks']
        ext_network = [x for x in exist_networks
                       if x.get('router:external')][0]
        self.zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
        self.hosts = self.zone.hosts.keys()[:2]
        self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
        self.security_group = self.os_conn.create_sec_group_for_ssh()
        self.networks = []

        # create router
        self.router = self.os_conn.create_router(name="router01")['router']
        self.os_conn.router_gateway_add(router_id=self.router['id'],
                                        network_id=ext_network['id'])
        logger.info('router {name}({id}) was created'.format(**self.router))

        # create networks by amount of the compute hosts
        for hostname in self.hosts:
            net_id = self.os_conn.add_net(self.router['id'])
            self.networks.append(net_id)
            self.os_conn.add_server(net_id, self.instance_keypair.name,
                                    hostname, self.security_group.id)

        # add floating ip to first server
        self.server1 = self.os_conn.nova.servers.find(name="server01")
        self.os_conn.assign_floating_ip(self.server1)

        # check pings
        network_checks.check_vm_connectivity(self.env, self.os_conn)

        # Find a primary controller
        primary_controller = self.env.primary_controller
        mac = primary_controller.data['mac']
        self.primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)
        self.primary_host = primary_controller.data['fqdn']

        # Find a non-primary controller
        non_primary_controller = self.env.non_primary_controllers[0]
        mac = non_primary_controller.data['mac']
        self.non_primary_node = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)
        self.non_primary_host = non_primary_controller.data['fqdn']

        # make a list of all l3 agent ids
        self.l3_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-l3-agent')['agents']
        ]

        self.dhcp_agent_ids = [
            agt['id'] for agt in self.os_conn.neutron.list_agents(
                binary='neutron-dhcp-agent')['agents']
        ]
コード例 #19
0
def devops_env(env_name):
    return DevopsClient.get_env(env_name=env_name)
コード例 #20
0
def devops_requirements(request, env_name):
    if request.node.get_marker('need_devops'):
        try:
            DevopsClient.get_env(env_name=env_name)
        except Exception:
            pytest.skip('requires devops env to be defined')
コード例 #21
0
def revert_snapshot(env_name, snapshot_name):
    DevopsClient.revert_snapshot(env_name=env_name,
                                 snapshot_name=snapshot_name)
コード例 #22
0
def devops_requirements(request, env_name):
    if request.node.get_marker('need_devops'):
        try:
            DevopsClient.get_env(env_name=env_name)
        except Exception:
            pytest.skip('requires devops env to be defined')
コード例 #23
0
def revert_snapshot(env_name, snapshot_name):
    DevopsClient.revert_snapshot(env_name=env_name,
                                 snapshot_name=snapshot_name)
コード例 #24
0
def revert_snapshot(request, env_name, snapshot_name):
    """Revert Fuel devops snapshot before test"""
    if getattr(request.node, 'do_revert', True):
        DevopsClient.revert_snapshot(env_name=env_name,
                                     snapshot_name=snapshot_name)
        setattr(request.node, 'do_revert', False)
コード例 #25
0
def devops_env(env_name):
    return DevopsClient.get_env(env_name=env_name)
コード例 #26
0
    def test_check_port_binding_after_restart_node(self):
        """[Neutron VLAN and VXLAN] Check that no redundant DHCP agents
        assigned to the network after DHCP agents restart.

        Steps:
            1. Update quotas for creation a lot of networks:
                neutron quota-update --network 1000 --subnet 1000
                                     --router 1000 --port 1000:
            2. Create 50 networks, subnets, launch and terminate instance
            3. Check port ids on networkX:
                neutron port-list --network_id=<yout_network_id>
                --device_owner=network:dhcp
            4. Check host binding for all ports:
                Get binding:host_id from
                network port-show <port_id>
            5. Destroy one of controllers with dhcp agent for networkX:
                virsh destroy <node>
                Wait till node are down
            6. Start destroyed controller with dhcp agent for networkX:
                virsh start <node>
                Wait till node are up.
            7. Check port id's for networkX. They should be the same as before
                restart:
                neutron port-list --network_id=<yout_network_id>
                --device_owner=network:dhcp
            4. Check host binding for all ports:
                Get binding:host_id from
                network port-show <port_id>
               Check that network is rescheduled from one DHCP agent to
                another, only one host changed after restart.
        """
        self._prepare_openstack()

        # Create 50 networks, launch and terminate instances
        # According to the test requirements 50 networks should be created
        # However during implementation found that only about 34 nets
        # can be created for one tenant. Need to clarify that situation.
        self.create_networks(29, self.router, self.networks,
                             self.instance_keypair, self.security_group)

        # Get DHCP agents for the net9
        net_id = self.networks[8]
        ports_ids_before = [
            port['id'] for port in self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')]

        ports_binding_before = [
            port['binding:host_id'] for port in
            self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')]

        # virsh destroy of the controller with dhcp agent
        for controller in self.env.non_primary_controllers:
            if controller.data['fqdn'] in ports_binding_before:
                controller_to_restart = controller
        mac = controller_to_restart.data['mac']
        controller_with_dhcp = DevopsClient.get_node_by_mac(
            env_name=self.env_name, mac=mac)

        self.env.warm_restart_nodes([controller_with_dhcp])

        # Check ports_binding after restart node
        ports_ids_after = [
            port['id'] for port in self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')]
        err_msg = 'Ports ids are changed after restart'
        assert ports_ids_before == ports_ids_after, err_msg

        ports_binding_after = [
            port['binding:host_id'] for port in
            self.os_conn.list_ports_for_network(
                network_id=net_id, device_owner='network:dhcp')]

        new_dhcp_host = set(ports_binding_before) & set(ports_binding_after)
        err_msg = 'Dhcp agents recheduled incorrect after restart'
        assert len(new_dhcp_host) == 1, err_msg