Пример #1
0
    def check_ceph_partitions_after_reboot(self):
        """Check that Ceph OSD partitions are remounted after reboot

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute and Ceph OSD roles
            4. Add 1 node with Ceph OSD role
            5. Deploy the cluster
            7. Check Ceph status
            8. Read current partitions
            9. Warm-reboot Ceph nodes
            10. Read partitions again
            11. Check Ceph health
            12. Cold-reboot Ceph nodes
            13. Read partitions again
            14. Check Ceph health

        Snapshot check_ceph_partitions_after_reboot

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise proboscis.SkipTest()

        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False
            }
        )

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['ceph-osd']
            }
        )
        # Deploy cluster
        self.fuel_web.deploy_cluster_wait(cluster_id)
        for node in ["slave-02", "slave-03"]:
            logger.info("Get partitions for {node}".format(node=node))
            before_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            logger.info("Warm-restart nodes")
            self.fuel_web.warm_restart_nodes(
                [self.fuel_web.environment.get_virtual_environment().
                    node_by_name(node)])

            logger.info("Get partitions for {node} once again".format(
                node=node
            ))
            after_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            logger.info("Check Ceph health is ok after reboot")
            check_ceph_health(
                self.env.get_ssh_to_remote_by_name(node))

            logger.info("Cold-restart nodes")
            self.fuel_web.cold_restart_nodes(
                [self.fuel_web.environment.get_virtual_environment().
                    node_by_name(node)])

            after_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            logger.info("Check Ceph health is ok after reboot")
            check_ceph_health(
                self.env.get_ssh_to_remote_by_name(node))
Пример #2
0
    def check_ceph_partitions_after_reboot(self):
        """Check that Ceph OSD partitions are remounted after reboot

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute and Ceph OSD roles
            4. Add 1 node with Ceph OSD role
            5. Deploy the cluster
            7. Check Ceph status
            8. Read current partitions
            9. Warm-reboot Ceph nodes
            10. Read partitions again
            11. Check Ceph health
            12. Cold-reboot Ceph nodes
            13. Read partitions again
            14. Check Ceph health

        Snapshot check_ceph_partitions_after_reboot

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise proboscis.SkipTest()

        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False
            }
        )

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['ceph-osd']
            }
        )
        # Deploy cluster
        self.fuel_web.deploy_cluster_wait(cluster_id)
        for node in ["slave-02", "slave-03"]:
            logger.info("Get partitions for {node}".format(node=node))
            before_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            logger.info("Warm-restart nodes")
            self.fuel_web.warm_restart_nodes(
                [self.fuel_web.environment.get_virtual_environment().
                    node_by_name(node)])

            logger.info("Get partitions for {node} once again".format(
                node=node
            ))
            after_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)

            logger.info("Cold-restart nodes")
            self.fuel_web.cold_restart_nodes(
                [self.fuel_web.environment.get_virtual_environment().
                    node_by_name(node)])

            after_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)
Пример #3
0
    def check_ceph_partitions_after_reboot(self):
        """Check that Ceph OSD partitions are remounted after reboot

        Scenario:
            1. Create cluster in Ha mode with 1 controller
            2. Add 1 node with controller role
            3. Add 1 node with compute and Ceph OSD roles
            4. Add 1 node with Ceph OSD role
            5. Deploy the cluster
            6. Check Ceph status
            7. Read current partitions
            8. Warm-reboot Ceph nodes
            9. Read partitions again
            10. Check Ceph health
            11. Cold-reboot Ceph nodes
            12. Read partitions again
            13. Check Ceph health

        Duration 40m
        Snapshot check_ceph_partitions_after_reboot

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'osd_pool_size': '2',
                'ephemeral_ceph': True,
                'volumes_lvm': False,
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['ceph-osd']
            }
        )

        self.show_step(5)
        # Deploy cluster
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(6)
        for node in ["slave-02", "slave-03"]:

            self.show_step(7, node)
            logger.info("Get partitions for {node}".format(node=node))
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                before_reboot_partitions = [checkers.get_ceph_partitions(
                    remote,
                    "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            self.show_step(8, node)
            logger.info("Warm-restart nodes")
            self.fuel_web.warm_restart_nodes(
                [self.fuel_web.environment.d_env.get_node(name=node)])

            self.show_step(9, node)
            logger.info("Get partitions for {node} once again".format(
                node=node
            ))
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                after_reboot_partitions = [checkers.get_ceph_partitions(
                    remote,
                    "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            self.show_step(10, node)
            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)

            self.show_step(11, node)
            logger.info("Cold-restart nodes")
            self.fuel_web.cold_restart_nodes(
                [self.fuel_web.environment.d_env.get_node(name=node)])

            self.show_step(12, node)
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                after_reboot_partitions = [checkers.get_ceph_partitions(
                    remote,
                    "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            self.show_step(13, node)
            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)
Пример #4
0
    def check_ceph_partitions_after_reboot(self):
        """Check that Ceph OSD partitions are remounted after reboot

        Scenario:
            1. Create cluster in Ha mode with 1 controller
            2. Add 1 node with controller role
            3. Add 1 node with compute and Ceph OSD roles
            4. Add 1 node with Ceph OSD role
            5. Deploy the cluster
            6. Check Ceph status
            7. Read current partitions
            8. Warm-reboot Ceph nodes
            9. Read partitions again
            10. Check Ceph health
            11. Cold-reboot Ceph nodes
            12. Read partitions again
            13. Check Ceph health

        Duration 40m
        Snapshot check_ceph_partitions_after_reboot

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False,
                'net_provider': 'neutron',
                'net_segment_type': NEUTRON_SEGMENT_TYPE,
            })

        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['ceph-osd']
            })

        self.show_step(5)
        # Deploy cluster
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(6)
        for node in ["slave-02", "slave-03"]:

            self.show_step(7, node)
            logger.info("Get partitions for {node}".format(node=node))
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                before_reboot_partitions = [
                    checkers.get_ceph_partitions(remote,
                                                 "/dev/vd{p}".format(p=part))
                    for part in ["b", "c"]
                ]

            self.show_step(8, node)
            logger.info("Warm-restart nodes")
            self.fuel_web.warm_restart_nodes(
                [self.fuel_web.environment.d_env.get_node(name=node)])

            self.show_step(9, node)
            logger.info(
                "Get partitions for {node} once again".format(node=node))
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                after_reboot_partitions = [
                    checkers.get_ceph_partitions(remote,
                                                 "/dev/vd{p}".format(p=part))
                    for part in ["b", "c"]
                ]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            self.show_step(10, node)
            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)

            self.show_step(11, node)
            logger.info("Cold-restart nodes")
            self.fuel_web.cold_restart_nodes(
                [self.fuel_web.environment.d_env.get_node(name=node)])

            self.show_step(12, node)
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                after_reboot_partitions = [
                    checkers.get_ceph_partitions(remote,
                                                 "/dev/vd{p}".format(p=part))
                    for part in ["b", "c"]
                ]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            self.show_step(13, node)
            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)