コード例 #1
0
    def check_virtual_router(self):
        if not self.env.d_env.has_snapshot(self.snapshot_name):
            raise SkipTest()

        self.env.revert_snapshot(self.snapshot_name)
        cluster_id = self.fuel_web.get_last_created_cluster()
        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                assert_true(
                    check_ping(remote, DNS, deadline=120, interval=10),
                    "No Internet access from {0}".format(node['fqdn'])
                )

        devops_node = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])
        file_name = DOWNLOAD_LINK.split('/')[-1]
        file_path = '/root/tmp'
        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            remote.execute(
                "screen -S download -d -m bash -c 'mkdir -p {0} &&"
                " cd {0} && wget --limit-rate=100k {1}'".format(file_path,
                                                                DOWNLOAD_LINK))

        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            try:
                wait(
                    lambda: remote.execute("ls -1 {0}/{1}".format(
                        file_path, file_name))['exit_code'] == 0, timeout=60)
            except TimeoutError:
                raise TimeoutError(
                    "File download was not started")

        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            file_size1 = get_file_size(remote, file_name, file_path)
            time.sleep(60)
            file_size2 = get_file_size(remote, file_name, file_path)
        assert_true(file_size2 > file_size1,
                    "File download was interrupted, size of downloading "
                    "does not change")
        devops_node.destroy()
        try:
            wait(
                lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
                    devops_node)['online'], timeout=60 * 6)
        except TimeoutError:
            raise TimeoutError(
                "Primary controller was not destroyed")
        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            assert_true(
                check_ping(remote, DNS, deadline=120, interval=10),
                "No Internet access from {0}".format(node['fqdn'])
            )
        if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
            with self.fuel_web.get_ssh_for_node('slave-05') as remote:
                file_size1 = get_file_size(remote, file_name, file_path)
                time.sleep(60)
                file_size2 = get_file_size(remote, file_name, file_path)
            assert_true(file_size2 > file_size1,
                        "File download was interrupted, size of downloading "
                        "does not change")
コード例 #2
0
    def check_virtual_router(self):
        if not self.env.d_env.has_snapshot(self.snapshot_name):
            raise SkipTest()

        self.env.revert_snapshot(self.snapshot_name)
        cluster_id = self.fuel_web.get_last_created_cluster()
        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                assert_true(check_ping(remote, DNS, deadline=120, interval=10),
                            "No Internet access from {0}".format(node['fqdn']))

        devops_node = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])
        file_name = DOWNLOAD_LINK.split('/')[-1]
        file_path = '/root/tmp'
        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            remote.execute("screen -S download -d -m bash -c 'mkdir -p {0} &&"
                           " cd {0} && wget --limit-rate=100k {1}'".format(
                               file_path, DOWNLOAD_LINK))

        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            try:
                wait(lambda: remote.execute("ls -1 {0}/{1}".format(
                    file_path, file_name))['exit_code'] == 0,
                     timeout=60)
            except TimeoutError:
                raise TimeoutError("File download was not started")

        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            file_size1 = get_file_size(remote, file_name, file_path)
            time.sleep(60)
            file_size2 = get_file_size(remote, file_name, file_path)
        assert_true(
            file_size2 > file_size1,
            "File download was interrupted, size of downloading "
            "does not change")
        devops_node.destroy()
        try:
            wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
                devops_node)['online'],
                 timeout=60 * 6)
        except TimeoutError:
            raise TimeoutError("Primary controller was not destroyed")
        with self.fuel_web.get_ssh_for_node('slave-05') as remote:
            assert_true(check_ping(remote, DNS, deadline=120, interval=10),
                        "No Internet access from {0}".format(node['fqdn']))
        if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
            with self.fuel_web.get_ssh_for_node('slave-05') as remote:
                file_size1 = get_file_size(remote, file_name, file_path)
                time.sleep(60)
                file_size2 = get_file_size(remote, file_name, file_path)
            assert_true(
                file_size2 > file_size1,
                "File download was interrupted, size of downloading "
                "does not change")
コード例 #3
0
    def default_hostname(self):
        """Verify that the default hostnames (e.g. 'node-1') are applied

        Scenario:
            1. Create a cluster
            2. Add 3 nodes with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Verify network configuration on controller
            6. Run OSTF
            7. Verify that the default hostname is applied on cluster nodes

        Duration: 70m
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT_TYPE,
            })
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        hostname_pattern = "node-\d{1,2}"
        admin_remote = self.env.d_env.get_admin_remote()

        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            devops_node = self.fuel_web.get_devops_node_by_nailgun_node(node)

            # Get hostname of a node and compare it against
            # the default hostname format
            assert_true(
                match(hostname_pattern, node['hostname']),
                "Default host naming format ('node-#') has not been applied "
                "to '{0}' node. Current hostname is "
                "'{1}'".format(devops_node.name, node['hostname']))

            # Verify that a node is accessible by the default hostname
            assert_true(
                check_ping(admin_remote, node['hostname']),
                "{0} node is not accessible by its default "
                "hostname {1}".format(devops_node.name, node['hostname']))

        admin_remote.clear()

        self.env.make_snapshot("default_hostname")
コード例 #4
0
    def default_hostname(self):
        """Verify that the default hostnames (e.g. 'node-1') are applied

        Scenario:
            1. Create a cluster
            2. Add 3 nodes with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Verify network configuration on controller
            6. Run OSTF
            7. Verify that the default hostname is applied on cluster nodes

        Duration: 70m
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])

        hostname_pattern = "node-\d{1,2}"
        admin_remote = self.env.d_env.get_admin_remote()

        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            devops_node = self.fuel_web.get_devops_node_by_nailgun_node(node)

            # Get hostname of a node and compare it against
            # the default hostname format
            assert_true(
                match(hostname_pattern, node['hostname']),
                "Default host naming format ('node-#') has not been applied "
                "to '{0}' node. Current hostname is "
                "'{1}'".format(devops_node.name, node['hostname']))

            # Verify that a node is accessible by the default hostname
            assert_true(
                check_ping(admin_remote, node['hostname']),
                "{0} node is not accessible by its default "
                "hostname {1}".format(devops_node.name, node['hostname']))

        admin_remote.clear()

        self.env.make_snapshot("default_hostname")
コード例 #5
0
    def upgrade_control_plane(self):
        """Upgrade control plane

        Scenario:
            1. Revert snapshot upgrade_ceph
            2. run octane upgrade-control <target_env_id> <seed_env_id>
            3. run octane upgrade-node <seed_cluster_id> <node_id> <node_id>

        """

        if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE:
            raise SkipTest()

        self.check_run('upgrade_control_plane')
        self.env.revert_snapshot("upgrade_ceph")

        target_cluster_id = self.fuel_web.client.get_cluster_id(
            'TestOSupgrade')
        seed_cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            octane_upgrade_control = remote.execute(
                "octane upgrade-control {0} {1}".format(
                    target_cluster_id, seed_cluster_id))

        assert_equal(
            0, octane_upgrade_control['exit_code'],
            "octane upgrade-control returns non zero status code,"
            "current result is {}".format(octane_upgrade_control))

        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            seed_cluster_id, ["controller"])
        old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            target_cluster_id, ["compute"])
        old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            target_cluster_id, ["controller"])

        ping_ips = []
        for node in controllers + old_computes:
            for data in node["network_data"]:
                if data["name"] == "management":
                    ping_ips.append(data["ip"].split("/")[0])
        ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id))

        non_ping_ips = []
        for node in old_controllers:
            for data in node["network_data"]:
                if data["name"] == "management":
                    non_ping_ips.append(data["ip"].split("/")[0])

        for node in controllers + old_computes:
            self.ssh_manager.execute(node["ip"], "ip -s -s neigh flush all")

            for ip in ping_ips:
                assert_true(
                    checkers.check_ping(node["ip"], ip),
                    "Can not ping {0} from {1}"
                    "need to check network"
                    " connectivity".format(ip, node["ip"]))

            for ip in non_ping_ips:
                assert_false(
                    checkers.check_ping(node["ip"], ip),
                    "Patch ports from old controllers"
                    "isn't removed")

        time.sleep(180)  # TODO need to remove
        # after fix of https://bugs.launchpad.net/fuel/+bug/1499696

        with self.env.d_env.get_ssh_to_remote(controllers[0]["ip"]) as remote:
            stdout = remote.execute("crm resource status")["stdout"]

        while stdout:
            current = stdout.pop(0)
            if "vip" in current:
                assert_true("Started" in current)
            elif "master_p" in current:
                next_element = stdout.pop(0)
                assert_true("Masters: [ node-" in next_element)
            elif any(x in current for x in ["ntp", "mysql", "dns"]):
                next_element = stdout.pop(0)
                assert_true("Started" in next_element)
            elif any(
                    x in current for x in
                ["nova", "cinder", "keystone", "heat", "neutron", "glance"]):
                next_element = stdout.pop(0)
                assert_true("Started" in next_element)

        with self.env.d_env.get_admin_remote() as remote:
            octane_upgrade_node = remote.execute(
                "octane upgrade-node {0} {1} {2}".format(
                    seed_cluster_id, old_controllers[0]["id"],
                    old_controllers[1]["id"]))
        assert_equal(
            0, octane_upgrade_node['exit_code'],
            "octane upgrade-node returns non zero"
            "status code,"
            "current result {}".format(octane_upgrade_node))
        tasks_started_by_octane = [
            task for task in self.fuel_web.client.get_tasks()
            if task['cluster'] == seed_cluster_id
        ]

        for task in tasks_started_by_octane:
            self.fuel_web.assert_task_success(task)

        self.env.make_snapshot("upgrade_control_plane", is_make=True)
コード例 #6
0
    def manual_cic_maintenance_mode(self):
        """Check manual maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Switch in maintenance mode
            3. Wait until controller is rebooting
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            _ip = self.fuel_web.get_nailgun_node_by_name(
                devops_node.name)['ip']
            logger.info('Maintenance mode for node {0}'.format(
                devops_node.name))
            asserts.assert_true('True' in checkers.check_available_mode(_ip),
                                "Maintenance mode is not available")
            self.ssh_manager.execute_on_remote(ip=_ip, cmd="umm on")

            logger.info('Wait a {0} node offline status after switching '
                        'maintenance mode '.format(devops_node.name))
            try:
                wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
                    devops_node)['online'],
                     timeout=60 * 10)
            except TimeoutError:
                asserts.assert_false(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'], 'Node {0} has not become offline after'
                    'switching maintenance mode'.format(devops_node.name))

            logger.info('Check that {0} node in maintenance mode after '
                        'switching'.format(devops_node.name))

            _ip = self.fuel_web.get_nailgun_node_by_name(
                devops_node.name)['ip']
            asserts.assert_true(
                checkers.check_ping(self.env.get_admin_node_ip(),
                                    _ip,
                                    deadline=600),
                "Host {0} is not reachable by ping during 600 sec".format(_ip))

            asserts.assert_true('True' in checkers.check_auto_mode(_ip),
                                "Maintenance mode is not switch")

            self.ssh_manager.execute_on_remote(ip=_ip, cmd="umm off")

            logger.info('Wait a {0} node online status'.format(
                devops_node.name))
            try:
                wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
                    devops_node)['online'],
                     timeout=60 * 10)
            except TimeoutError:
                asserts.assert_true(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'], 'Node {0} has not become online after '
                    'exiting maintenance mode'.format(devops_node.name))

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up([n.name for n in d_ctrls])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up([n.name for n in d_ctrls])

            _wait(lambda: self.fuel_web.run_single_ostf_test(
                cluster_id,
                test_sets=['sanity'],
                test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                    'Check that required services are running')),
                  timeout=1500)
            logger.debug("Required services are running")

            _wait(lambda: self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
                  timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 600 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
コード例 #7
0
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            _ip = self.fuel_web.get_nailgun_node_by_name(
                devops_node.name)['ip']
            asserts.assert_true('True' in checkers.check_available_mode(_ip),
                                "Maintenance mode is not available")
            logger.info('Change UMM.CONF on node {0}'.format(devops_node.name))

            command1 = ("echo -e 'UMM=yes\nREBOOT_COUNT=0\n"
                        "COUNTER_RESET_TIME=10' > /etc/umm.conf")
            self.ssh_manager.execute_on_remote(ip=_ip, cmd=command1)

            self.ssh_manager.execute_on_remote(ip=_ip, cmd="umm disable")

            asserts.assert_false('True' in checkers.check_available_mode(_ip),
                                 "Maintenance mode should not be available")

            command2 = 'reboot --force >/dev/null & '

            logger.info('Unexpected reboot on node {0}'.format(
                devops_node.name))

            self.ssh_manager.execute_on_remote(ip=_ip, cmd=command2)

            wait(lambda: not checkers.check_ping(self.env.get_admin_node_ip(),
                                                 _ip),
                 timeout=60 * 10)

            # Node don't have enough time for set offline status
            # after reboot --force
            # Just waiting

            asserts.assert_true(
                checkers.check_ping(self.env.get_admin_node_ip(),
                                    _ip,
                                    deadline=600),
                "Host {0} is not reachable by ping during 600 sec".format(_ip))
            logger.info('Wait a {0} node online status after unexpected '
                        'reboot'.format(devops_node.name))

            self.fuel_web.wait_nodes_get_online_state([devops_node])

            logger.info('Check that {0} node not in maintenance mode after'
                        ' unexpected reboot'.format(devops_node.name))

            asserts.assert_false('True' in checkers.check_auto_mode(_ip),
                                 "Maintenance mode should not switched")

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up([n.name for n in d_ctrls])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up([n.name for n in d_ctrls])

            _wait(lambda: self.fuel_web.run_single_ostf_test(
                cluster_id,
                test_sets=['sanity'],
                test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                    'Check that required services are running')),
                  timeout=1500)
            logger.debug("Required services are running")

            _wait(lambda: self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
                  timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 600 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
コード例 #8
0
ファイル: test_os_upgrade.py プロジェクト: avgoor/fuel-qa
    def upgrade_control_plane(self):
        """Upgrade control plane

        Scenario:
            1. Revert snapshot upgrade_ceph
            2. run octane upgrade-control <target_env_id> <seed_env_id>
            3. run octane upgrade-node <seed_cluster_id> <node_id> <node_id>

        """

        if settings.OPENSTACK_RELEASE_UBUNTU not in settings.OPENSTACK_RELEASE:
            raise SkipTest('Openstack release is not Ubuntu')

        self.check_run('upgrade_control_plane')
        self.env.revert_snapshot("upgrade_ceph")

        target_cluster_id = self.fuel_web.client.get_cluster_id(
            'TestOSupgrade'
        )
        seed_cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            octane_upgrade_control = remote.execute(
                "octane upgrade-control {0} {1}".format(
                    target_cluster_id, seed_cluster_id)
            )

        assert_equal(0, octane_upgrade_control['exit_code'],
                     "octane upgrade-control returns non zero status code,"
                     "current result is {}".format(octane_upgrade_control))

        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            seed_cluster_id, ["controller"]
        )
        old_computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            target_cluster_id, ["compute"]
        )
        old_controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            target_cluster_id, ["controller"]
        )

        ping_ips = []
        for node in controllers + old_computes:
            for data in node["network_data"]:
                if data["name"] == "management":
                    ping_ips.append(data["ip"].split("/")[0])
        ping_ips.append(self.fuel_web.get_mgmt_vip(seed_cluster_id))

        non_ping_ips = []
        for node in old_controllers:
            for data in node["network_data"]:
                if data["name"] == "management":
                    non_ping_ips.append(data["ip"].split("/")[0])

        for node in controllers + old_computes:
            self.ssh_manager.execute(node["ip"], "ip -s -s neigh flush all")

            for ip in ping_ips:
                assert_true(checkers.check_ping(node["ip"], ip),
                            "Can not ping {0} from {1}"
                            "need to check network"
                            " connectivity".format(ip, node["ip"]))

            for ip in non_ping_ips:
                assert_false(checkers.check_ping(node["ip"], ip),
                             "Patch ports from old controllers"
                             "isn't removed")

        time.sleep(180)  # TODO need to remove
        # after fix of https://bugs.launchpad.net/fuel/+bug/1499696

        with self.env.d_env.get_ssh_to_remote(controllers[0]["ip"]) as remote:
            stdout = remote.execute("crm resource status")["stdout"]

        while stdout:
            current = stdout.pop(0)
            if "vip" in current:
                assert_true("Started" in current)
            elif "master_p" in current:
                next_element = stdout.pop(0)
                assert_true("Masters: [ node-" in next_element)
            elif any(x in current for x in ["ntp", "mysql", "dns"]):
                next_element = stdout.pop(0)
                assert_true("Started" in next_element)
            elif any(x in current for x in ["nova", "cinder", "keystone",
                                            "heat", "neutron", "glance"]):
                next_element = stdout.pop(0)
                assert_true("Started" in next_element)

        with self.env.d_env.get_admin_remote() as remote:
            octane_upgrade_node = remote.execute(
                "octane upgrade-node {0} {1} {2}".format(
                    seed_cluster_id, old_controllers[0]["id"],
                    old_controllers[1]["id"])
            )
        assert_equal(0, octane_upgrade_node['exit_code'],
                     "octane upgrade-node returns non zero"
                     "status code,"
                     "current result {}".format(octane_upgrade_node))
        tasks_started_by_octane = [
            task for task in self.fuel_web.client.get_tasks()
            if task['cluster'] == seed_cluster_id
        ]

        for task in tasks_started_by_octane:
            self.fuel_web.assert_task_success(task)

        self.env.make_snapshot("upgrade_control_plane", is_make=True)
    def add_custom_nodegroup_after_master_upgrade(self):
        """Add new nodegroup to existing operational environment after
        Fuel Master upgrade

        Scenario:
            1. Revert "upgrade_multirack_restore" snapshot
            2. Create new nodegroup for the environment and configure
               it's networks
            3. Bootstrap slave node from custom-2 nodegroup
            4. Add node from new nodegroup to the environment with compute role
            5. Run network verification
            6. Deploy changes
            7. Run network verification
            8. Run OSTF
            9. Check that nodes from 'default' nodegroup can reach nodes
               from new nodegroup via management and storage networks

        Duration 50m
        Snapshot add_custom_nodegroup_after_master_upgrade
        """

        self.show_step(1)
        self.env.revert_snapshot(self.snapshot_name)
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        asserts.assert_true(not any(ng['name'] == NODEGROUPS[2]['name'] for ng
                                    in self.fuel_web.client.get_nodegroups()),
                            'Custom nodegroup {0} already '
                            'exists!'.format(NODEGROUPS[2]['name']))

        self.show_step(2)
        new_nodegroup = self.fuel_web.client.create_nodegroup(
            cluster_id, NODEGROUPS[2]['name'])
        logger.debug('Updating custom nodegroup ID in network configuration..')
        network_config_new = self.fuel_web.client.get_networks(cluster_id)
        with open(self.netgroup_description_file, "r") as file_obj:
            netconf_all_groups = yaml.load(file_obj)

        asserts.assert_true(netconf_all_groups is not None,
                            'Network configuration for nodegroups is empty!')

        for network in netconf_all_groups['networks']:
            if network['group_id'] is not None and \
                    not any(network['group_id'] == ng['id']
                            for ng in self.fuel_web.client.get_nodegroups()):
                network['group_id'] = new_nodegroup['id']
                for new_network in network_config_new['networks']:
                    if new_network['name'] == network['name'] and \
                       new_network['group_id'] == network['group_id']:
                        network['id'] = new_network['id']

        self.fuel_web.client.update_network(
            cluster_id,
            netconf_all_groups['networking_parameters'],
            netconf_all_groups['networks'])

        self.show_step(3)
        self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]])

        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-07': [['compute'], new_nodegroup['name']]},
            True, False
        )

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(9)
        primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node(
            self.fuel_web.get_nailgun_primary_node(
                slave=self.env.d_env.nodes().slaves[0]))

        with self.fuel_web.get_ssh_for_node('slave-07') as remote:
            new_node_networks = utils.get_net_settings(remote)

        for interface in ('br-storage', 'br-mgmt'):
            if interface in new_node_networks:
                logger.info("Checking new node is accessible from primary "
                            "controller via {0} interface.".format(interface))
                for ip in new_node_networks[interface]['ip_addresses']:
                    address = ip.split('/')[0]
                    result = check_ping(primary_ctrl['ip'],
                                        address,
                                        timeout=3)
                    asserts.assert_true(result,
                                        "New node isn't accessible from "
                                        "primary controller via {0} interface"
                                        ": {1}.".format(interface, result))

        self.env.make_snapshot("add_custom_nodegroup_after_master_upgrade")
コード例 #10
0
    def add_custom_nodegroup(self):
        """Add new nodegroup to operational environment

        Scenario:
            1. Revert snapshot with operational cluster
            2. Create new nodegroup for the environment and configure
               it's networks
            3. Bootstrap slave node from custom-2 nodegroup
            4. Add node from new nodegroup to the environment with compute role
            5. Run network verification
            6. Deploy changes
            7. Run network verification
            8. Run OSTF
            9. Check that nodes from 'default' nodegroup can reach nodes
               from new nodegroup via management and storage networks

        Duration 50m
        Snapshot add_custom_nodegroup
        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("deploy_neutron_tun_ha_nodegroups")
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        asserts.assert_true(
            not any(ng["name"] == NODEGROUPS[2]["name"] for ng in self.fuel_web.client.get_nodegroups()),
            "Custom nodegroup {0} already " "exists!".format(NODEGROUPS[2]["name"]),
        )

        self.show_step(2)
        new_nodegroup = self.fuel_web.client.create_nodegroup(cluster_id, NODEGROUPS[2]["name"])
        logger.debug("Updating custom nodegroup ID in network configuration..")
        network_config_new = self.fuel_web.client.get_networks(cluster_id)
        asserts.assert_true(self.netconf_all_groups is not None, "Network configuration for nodegroups is empty!")

        for network in self.netconf_all_groups["networks"]:
            if network["group_id"] is not None and not any(
                network["group_id"] == ng["id"] for ng in self.fuel_web.client.get_nodegroups()
            ):
                network["group_id"] = new_nodegroup["id"]
                for new_network in network_config_new["networks"]:
                    if new_network["name"] == network["name"] and new_network["group_id"] == network["group_id"]:
                        network["id"] = new_network["id"]

        self.fuel_web.client.update_network(
            cluster_id, self.netconf_all_groups["networking_parameters"], self.netconf_all_groups["networks"]
        )

        self.show_step(3)
        self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]])

        self.show_step(4)
        self.fuel_web.update_nodes(cluster_id, {"slave-07": [["compute"], new_nodegroup["name"]]}, True, False)

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(9)
        primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node(
            self.fuel_web.get_nailgun_primary_node(slave=self.env.d_env.nodes().slaves[0])
        )

        with self.fuel_web.get_ssh_for_node("slave-07") as remote:
            new_node_networks = utils.get_net_settings(remote)

        for interface in ("br-storage", "br-mgmt"):
            if interface in new_node_networks:
                logger.info(
                    "Checking new node is accessible from primary " "controller via {0} interface.".format(interface)
                )
                for ip in new_node_networks[interface]["ip_addresses"]:
                    address = ip.split("/")[0]
                    result = check_ping(primary_ctrl["ip"], address, timeout=3)
                    asserts.assert_true(
                        result,
                        "New node isn't accessible from "
                        "primary controller via {0} interface"
                        ": {1}.".format(interface, result),
                    )

        self.env.make_snapshot("add_custom_nodegroup")
コード例 #11
0
    def manual_cic_maintenance_mode(self):
        """Check manual maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Switch in maintenance mode
            3. Wait until controller is rebooting
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']
        logger.info('Maintenance mode for node-{0}'.format(_id))
        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        self.ssh_manager.check_call(ip=_ip, command="umm on", expected=[-1])

        self.fuel_web.wait_node_is_offline(dregular_ctrl)

        asserts.assert_true(
            checkers.check_ping(self.env.get_admin_node_ip(),
                                _ip,
                                deadline=600),
            "Host {0} is not reachable by ping during 600 sec".format(_ip))

        asserts.assert_true('True' in check_auto_mode(_ip),
                            "Maintenance mode is not switched on")

        self.ssh_manager.check_call(ip=_ip, command="umm off")

        self.fuel_web.wait_node_is_online(dregular_ctrl)

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up([dregular_ctrl.name])

        # Wait until RabbitMQ cluster is UP
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['ha'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'RabbitMQ availability')),
                  timeout=1500)
        logger.info('RabbitMQ cluster is available')

        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['sanity'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'Check that required services are running')),
                  timeout=1500)
        logger.info("Required services are running")

        # TODO(astudenov): add timeout_msg
        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 600 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
コード例 #12
0
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']

        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        logger.info('Disable UMM  on node-{0}'.format(_id))

        change_config(_ip, umm=False, reboot_count=0)

        asserts.assert_false('True' in check_available_mode(_ip),
                             "Maintenance mode should not be available")

        logger.info('Unexpected reboot on node-{0}'.format(_id))

        self.ssh_manager.check_call(ip=_ip, command='reboot >/dev/null & ')

        wait(
            lambda: not checkers.check_ping(self.env.get_admin_node_ip(), _ip),
            timeout=60 * 10,
            timeout_msg='Node {} still responds to ping'.format(
                dregular_ctrl.name))

        # Node don't have enough time for set offline status
        # after reboot
        # Just waiting

        asserts.assert_true(
            checkers.check_ping(self.env.get_admin_node_ip(),
                                _ip,
                                deadline=600),
            "Host {0} is not reachable by ping during 600 sec".format(_ip))

        self.fuel_web.wait_node_is_online(dregular_ctrl)

        logger.info('Check that node-{0} not in maintenance mode after'
                    ' unexpected reboot'.format(_id))

        wait(lambda: tcp_ping(_ip, 22),
             timeout=60 * 10,
             timeout_msg='Node {} still is not available by SSH'.format(
                 dregular_ctrl.name))

        asserts.assert_false('True' in check_auto_mode(_ip),
                             "Maintenance mode should not switched")

        # Wait until MySQL Galera is UP on some controller
        self.fuel_web.wait_mysql_galera_is_up([dregular_ctrl.name])

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up([dregular_ctrl.name])

        # Wait until RabbitMQ cluster is UP
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['ha'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'RabbitMQ availability')),
                  timeout=1500)
        logger.info('RabbitMQ cluster is available')

        # TODO(astudenov): add timeout_msg
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['sanity'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'Check that required services are running')),
                  timeout=1500)
        logger.info("Required services are running")

        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 600 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
コード例 #13
0
    def auto_cic_maintenance_mode(self):
        """Check auto maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Unexpected reboot
            3. Wait until controller is switching in maintenance mode
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']

        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")

        change_config(_ip, reboot_count=0)

        logger.info('Change UMM.CONF on node-{0}'.format(_id))

        logger.info('Unexpected reboot on node-{0}'.format(_id))

        command = 'reboot --force >/dev/null & '

        self.ssh_manager.execute_on_remote(ip=_ip, cmd=command)

        wait(
            lambda: not checkers.check_ping(self.env.get_admin_node_ip(), _ip),
            timeout=60 * 10,
            timeout_msg='Node {} still responds to ping'.format(
                dregular_ctrl.name))

        self.fuel_web.wait_node_is_offline(dregular_ctrl)

        logger.info('Check that node-{0} in maintenance mode after'
                    ' unexpected reboot'.format(_id))
        asserts.assert_true(
            checkers.check_ping(self.env.get_admin_node_ip(),
                                _ip,
                                deadline=600),
            "Host {0} is not reachable by ping during 600 sec".format(_ip))

        asserts.assert_true('True' in check_auto_mode(_ip),
                            "Maintenance mode is not switched on")

        logger.info('turn off Maintenance mode')
        self.ssh_manager.execute_on_remote(ip=_ip, cmd="umm off")
        time.sleep(30)

        change_config(_ip)

        self.fuel_web.wait_node_is_online(dregular_ctrl)

        # Wait until MySQL Galera is UP on some controller
        self.fuel_web.wait_mysql_galera_is_up([dregular_ctrl.name])

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up([dregular_ctrl.name])

        # Wait until RabbitMQ cluster is UP
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['ha'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'RabbitMQ availability')),
                  timeout=1500)
        logger.info('RabbitMQ cluster is available')

        # Wait until all Openstack services are UP
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['sanity'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'Check that required services are running')),
                  timeout=1500)
        logger.info("Required services are running")

        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 600 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
コード例 #14
0
    def manual_cic_maintenance_mode(self):
        """Check manual maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Switch in maintenance mode
            3. Wait until controller is rebooting
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']
        logger.info('Maintenance mode for node-{0}'.format(_id))
        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        self.ssh_manager.execute_on_remote(
            ip=_ip,
            cmd="umm on")

        self.fuel_web.wait_node_is_offline(dregular_ctrl)

        asserts.assert_true(
            checkers.check_ping(self.env.get_admin_node_ip(),
                                _ip,
                                deadline=600),
            "Host {0} is not reachable by ping during 600 sec"
            .format(_ip))

        asserts.assert_true('True' in check_auto_mode(_ip),
                            "Maintenance mode is not switched on")

        self.ssh_manager.execute_on_remote(
            ip=_ip,
            cmd="umm off")

        self.fuel_web.wait_node_is_online(dregular_ctrl)

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up(
            [dregular_ctrl.name])

        # Wait until RabbitMQ cluster is UP
        wait_pass(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['ha'],
                      test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                          'RabbitMQ availability')),
                  timeout=1500)
        logger.info('RabbitMQ cluster is available')

        wait_pass(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['sanity'],
                      test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                          'Check that required services are running')),
                  timeout=1500)
        logger.info("Required services are running")

        # TODO(astudenov): add timeout_msg
        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 600 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
コード例 #15
0
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']

        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        logger.info('Disable UMM  on node-{0}'.format(_id))

        change_config(_ip, umm=False, reboot_count=0)

        asserts.assert_false('True' in check_available_mode(_ip),
                             "Maintenance mode should not be available")

        command = 'reboot --force >/dev/null & '

        logger.info('Unexpected reboot on node-{0}'
                    .format(_id))

        self.ssh_manager.execute_on_remote(
            ip=_ip,
            cmd=command)

        wait(lambda:
             not checkers.check_ping(self.env.get_admin_node_ip(),
                                     _ip),
             timeout=60 * 10,
             timeout_msg='Node {} still responds to ping'.format(
                 dregular_ctrl.name))

        # Node don't have enough time for set offline status
        # after reboot --force
        # Just waiting

        asserts.assert_true(
            checkers.check_ping(self.env.get_admin_node_ip(),
                                _ip,
                                deadline=600),
            "Host {0} is not reachable by ping during 600 sec"
            .format(_ip))

        self.fuel_web.wait_node_is_online(dregular_ctrl)

        logger.info('Check that node-{0} not in maintenance mode after'
                    ' unexpected reboot'.format(_id))

        wait(lambda: tcp_ping(_ip, 22),
             timeout=60 * 10,
             timeout_msg='Node {} still is not available by SSH'.format(
                 dregular_ctrl.name))

        asserts.assert_false('True' in check_auto_mode(_ip),
                             "Maintenance mode should not switched")

        # Wait until MySQL Galera is UP on some controller
        self.fuel_web.wait_mysql_galera_is_up(
            [dregular_ctrl.name])

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up(
            [dregular_ctrl.name])

        # Wait until RabbitMQ cluster is UP
        wait_pass(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['ha'],
                      test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                          'RabbitMQ availability')),
                  timeout=1500)
        logger.info('RabbitMQ cluster is available')

        # TODO(astudenov): add timeout_msg
        wait_pass(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['sanity'],
                      test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                          'Check that required services are running')),
                  timeout=1500)
        logger.info("Required services are running")

        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 600 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])