def deploy_env(self):
     """Deploy env and wait till it will be deployed"""
     deploy_task = self.env.deploy_changes()
     wait(lambda: is_task_ready(deploy_task),
          timeout_seconds=60 * self.TIMEOUT_FOR_DEPLOY,
          sleep_seconds=60,
          waiting_for='changes to be deployed')
    def test_delete_node(self, env, roles, ironic, make_image, flavors,
                         keypair, os_conn, ironic_nodes):
        """Delete one of multiple ironic nodes.

        Scenario:
            1. Remove created ironic node from cluster
            2. Boot new ironic instance
            3. Check ironic instance status is ACTIVE
        """
        fuel_node = [x for x in env.get_all_nodes()
                     if x.data['name'] == self.node_name][0]
        if 'ceph-osd' in roles:
            with fuel_node.ssh() as remote:
                remove_ceph_from_node(remote)

        env.unassign([fuel_node.id])

        # Deploy changes
        task = env.deploy_changes()

        common.wait(lambda: common.is_task_ready(task),
                    timeout_seconds=60 * 60,
                    sleep_seconds=60,
                    waiting_for='changes to be deployed')

        image = make_image(node_driver=ironic_nodes[0].driver)
        instance = ironic.boot_instance(image=image,
                                        flavor=flavors[0],
                                        keypair=keypair)

        assert os_conn.nova.servers.get(instance.id).status == 'ACTIVE'
def check_env_state_after_task(env, task, nodes):
    """This function checks state of env after task finishing"""
    common.wait(lambda: common.is_task_ready(task), timeout_seconds=60 * 120,
                sleep_seconds=30, waiting_for='deployment task to be finished')

    common.wait(lambda: are_nodes_in_state(env, nodes, 'ready'),
                timeout_seconds=60, sleep_seconds=2,
                waiting_for='nodes in ready state')

    assert env.status == 'operational', (
        "Env should be operational after noop run of fuel task execution, "
        "but current state is {0}".format(env.status))
    def test_delete_node(self, env, roles, ironic, ubuntu_image, flavors,
                         keypair, os_conn, ironic_nodes):
        """Delete one of multiple ironic nodes.

        Scenario:
            1. Remove created ironic node from cluster
            2. Boot new ironic instance
            3. Check ironic instance status is ACTIVE
        """
        env.unassign([self.fuel_node.id])

        # Deploy changes
        task = env.deploy_changes()

        common.wait(lambda: common.is_task_ready(task),
                    timeout_seconds=40 * 60,
                    sleep_seconds=60,
                    waiting_for='changes to be deployed')

        instance = ironic.boot_instance(image=ubuntu_image,
                                        flavor=flavors[0],
                                        keypair=keypair)

        assert os_conn.nova.servers.get(instance.id).status == 'ACTIVE'
Example #5
0
    def test_delete_node(self, env, roles, ironic, ubuntu_image, flavors,
                         keypair, os_conn, ironic_nodes):
        """Delete one of multiple ironic nodes.

        Scenario:
            1. Remove created ironic node from cluster
            2. Boot new ironic instance
            3. Check ironic instance status is ACTIVE
        """
        env.unassign([self.fuel_node.id])

        # Deploy changes
        task = env.deploy_changes()

        common.wait(lambda: common.is_task_ready(task),
                    timeout_seconds=40 * 60,
                    sleep_seconds=60,
                    waiting_for='changes to be deployed')

        instance = ironic.boot_instance(image=ubuntu_image,
                                        flavor=flavors[0],
                                        keypair=keypair)

        assert os_conn.nova.servers.get(instance.id).status == 'ACTIVE'
    def test_add_node(self, env, env_name, suffix, os_conn, ubuntu_image,
                      flavors, keypair, ironic, ironic_nodes, roles):
        """Test ironic work after add new ironic-conductor node to cluster

        Scenario:
            1. Create fuel-slave devops node
            2. Add node to cluster with 'ironic' role
            3. Deploy changes
            4. Run network verification
            5. Run OSTF sanity tests
            6. Boot ironic instance
        """
        if 'ceph' in roles and not conftest.is_ceph_enabled(env):
            pytest.skip('This test requires CEPH')

        devops_env = devops_client.DevopsClient.get_env(env_name)
        devops_node = devops_env.add_node(
            name='new-ironic_{}'.format(suffix[:4]),
            memory=4096,
            disks=(50, 50, 50))

        fuel_node = common.wait(
            lambda: env.get_node_by_devops_node(devops_node),
            timeout_seconds=10 * 60,
            sleep_seconds=20,
            waiting_for='node to be discovered')

        # Rename node
        fuel_node.set({'name': 'new_ironic'})

        env.assign([fuel_node], roles)

        # Make devops network.id -> fuel networks mapping
        controller = env.get_nodes_by_role('controller')[0]
        interfaces_map = {}
        for fuel_if, devop_if in map_interfaces(devops_env, controller):
            interfaces_map[devop_if.network_id] = fuel_if['assigned_networks']

        # Assign fuel networks to corresponding interfaces
        interfaces = []
        for fuel_if, devop_if in map_interfaces(devops_env, fuel_node):
            fuel_if['assigned_networks'] = interfaces_map[devop_if.network_id]
            interfaces.append(fuel_if)

        fuel_node.upload_node_attribute('interfaces', interfaces)

        # Verify network
        result = env.wait_network_verification()
        assert result.status == 'ready'

        # Deploy changes
        task = env.deploy_changes()

        common.wait(lambda: common.is_task_ready(task),
                    timeout_seconds=80 * 60,
                    sleep_seconds=60,
                    waiting_for='changes to be deployed')

        fuel_node = env.get_node_by_devops_node(devops_node)

        result = env.wait_network_verification()
        assert result.status == 'ready'

        common.wait(lambda: env.is_ostf_tests_pass('sanity'),
                    timeout_seconds=5 * 60,
                    waiting_for='OSTF sanity tests to pass')

        with fuel_node.ssh() as remote:
            remote.check_call('service ironic-conductor status | grep running')

        instance = ironic.boot_instance(image=ubuntu_image,
                                        flavor=flavors[0],
                                        keypair=keypair)

        assert os_conn.nova.servers.get(instance.id).status == 'ACTIVE'

        if 'ceph' in roles:
            with fuel_node.ssh() as remote:
                result = remote.check_call('ceph -s')
            stdout = result.stdout_string
            assert 'HEALTH_OK' in stdout or 'HEALTH_WARN' in stdout

        self.__class__.fuel_node = fuel_node
Example #7
0
    def test_add_node(self, env, env_name, suffix, os_conn, ubuntu_image,
                      flavors, keypair, ironic, ironic_nodes, roles):
        """Test ironic work after add new ironic-conductor node to cluster

        Scenario:
            1. Create fuel-slave devops node
            2. Add node to cluster with 'ironic' role
            3. Deploy changes
            4. Run network verification
            5. Run OSTF sanity tests
            6. Boot ironic instance
        """
        if 'ceph' in roles and not conftest.is_ceph_enabled(env):
            pytest.skip('This test requires CEPH')

        devops_env = devops_client.DevopsClient.get_env(env_name)
        devops_node = devops_env.add_node(name='new-ironic_{}'.format(
            suffix[:4]),
                                          memory=4096,
                                          disks=(50, 50, 50))

        fuel_node = common.wait(
            lambda: env.get_node_by_devops_node(devops_node),
            timeout_seconds=10 * 60,
            sleep_seconds=20,
            waiting_for='node to be discovered')

        # Rename node
        fuel_node.set({'name': 'new_ironic'})

        env.assign([fuel_node], roles)

        # Make devops network.id -> fuel networks mapping
        controller = env.get_nodes_by_role('controller')[0]
        interfaces_map = {}
        for fuel_if, devop_if in map_interfaces(devops_env, controller):
            interfaces_map[devop_if.network_id] = fuel_if['assigned_networks']

        # Assign fuel networks to corresponding interfaces
        interfaces = []
        for fuel_if, devop_if in map_interfaces(devops_env, fuel_node):
            fuel_if['assigned_networks'] = interfaces_map[devop_if.network_id]
            interfaces.append(fuel_if)

        fuel_node.upload_node_attribute('interfaces', interfaces)

        # Verify network
        result = env.wait_network_verification()
        assert result.status == 'ready'

        # Deploy changes
        task = env.deploy_changes()

        common.wait(lambda: common.is_task_ready(task),
                    timeout_seconds=80 * 60,
                    sleep_seconds=60,
                    waiting_for='changes to be deployed')

        fuel_node = env.get_node_by_devops_node(devops_node)

        result = env.wait_network_verification()
        assert result.status == 'ready'

        common.wait(lambda: env.is_ostf_tests_pass('sanity'),
                    timeout_seconds=5 * 60,
                    waiting_for='OSTF sanity tests to pass')

        with fuel_node.ssh() as remote:
            remote.check_call('service ironic-conductor status | grep running')

        instance = ironic.boot_instance(image=ubuntu_image,
                                        flavor=flavors[0],
                                        keypair=keypair)

        assert os_conn.nova.servers.get(instance.id).status == 'ACTIVE'

        if 'ceph' in roles:
            with fuel_node.ssh() as remote:
                result = remote.check_call('ceph -s')
            stdout = result.stdout_string
            assert 'HEALTH_OK' in stdout or 'HEALTH_WARN' in stdout

        self.__class__.fuel_node = fuel_node