コード例 #1
0
 def run_instance_delete_wait(self, expected_states=['SHUTDOWN']):
     if self.has_do_not_delete_instance:
         self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
                         "specified, skipping delete wait...")
         raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE "
                                  "was specified.")
     self.assert_all_gone(self.instance_info.id, expected_states[-1])
     self.assert_server_group_gone(self.instance_info.srv_grp_id)
コード例 #2
0
    def run_instance_delete(self, expected_http_code=202):
        if self.has_do_not_delete_instance:
            self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
                            "specified, skipping delete...")
            raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE "
                                     "was specified.")

        self.assert_instance_delete(self.instance_info.id, expected_http_code)
コード例 #3
0
    def run_instance_delete(self,
                            expected_states=['SHUTDOWN'],
                            expected_http_code=202):
        if self.has_do_not_delete_instance:
            self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
                            "specified, skipping delete...")
            raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE "
                                     "was specified.")

        # make sure the server is cached before we delete it, or we
        # can't check that the server group is gone
        self.get_server(self.instance_info.id)
        self.assert_instance_delete(self.instance_info.id, expected_states,
                                    expected_http_code)
        self.assert_server_group(self.instance_info.id, False)
コード例 #4
0
    def check_ceph_partitions_after_reboot(self):
        """Check that Ceph OSD partitions are remounted after reboot

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute and Ceph OSD roles
            4. Add 1 node with Ceph OSD role
            5. Deploy the cluster
            7. Check Ceph status
            8. Read current partitions
            9. Warm-reboot Ceph nodes
            10. Read partitions again
            11. Check Ceph health
            12. Cold-reboot Ceph nodes
            13. Read partitions again
            14. Check Ceph health

        Snapshot check_ceph_partitions_after_reboot

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise proboscis.SkipTest()

        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False
            }
        )

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['ceph-osd']
            }
        )
        # Deploy cluster
        self.fuel_web.deploy_cluster_wait(cluster_id)
        for node in ["slave-02", "slave-03"]:
            logger.info("Get partitions for {node}".format(node=node))
            before_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            logger.info("Warm-restart nodes")
            self.fuel_web.warm_restart_nodes(
                [self.fuel_web.environment.get_virtual_environment().
                    node_by_name(node)])

            logger.info("Get partitions for {node} once again".format(
                node=node
            ))
            after_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)

            logger.info("Cold-restart nodes")
            self.fuel_web.cold_restart_nodes(
                [self.fuel_web.environment.get_virtual_environment().
                    node_by_name(node)])

            after_reboot_partitions = [checkers.get_ceph_partitions(
                self.env.get_ssh_to_remote_by_name(node),
                "/dev/vd{p}".format(p=part)) for part in ["b", "c"]]

            if before_reboot_partitions != after_reboot_partitions:
                logger.info("Partitions don`t match")
                logger.info("Before reboot: %s" % before_reboot_partitions)
                logger.info("After reboot: %s" % after_reboot_partitions)
                raise Exception()

            logger.info("Check Ceph health is ok after reboot")
            self.fuel_web.check_ceph_status(cluster_id)
コード例 #5
0
    def migrate_vm_backed_with_ceph(self):
        """Check VM backed with ceph migration in simple mode

        Scenario:
            1. Create cluster
            2. Add 1 node with controller and ceph OSD roles
            3. Add 2 node with compute and ceph OSD roles
            4. Deploy the cluster
            5. Check ceph status
            6. Run OSTF
            7. Create a new VM, assign floating ip
            8. Migrate VM
            9. Check cluster and server state after migration
            10. Terminate VM

        Snapshot vm_backed_with_ceph_live_migration

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise proboscis.SkipTest()

        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False
            }
        )

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute', 'ceph-osd']
            }
        )
        creds = ("cirros", "test")

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        def _check():
            # Run volume test several times with hope that it pass
            test_path = map_ostf.OSTF_TEST_MAPPING.get(
                'Create volume and attach it to instance')
            logger.debug('Start to run test {0}'.format(test_path))
            self.fuel_web.run_single_ostf_test(
                cluster_id, test_sets=['smoke'],
                test_name=test_path)
        try:
            _check()
        except AssertionError:
            logger.debug(AssertionError)
            logger.debug("Test failed from first probe,"
                         " we sleep 60 second try one more time "
                         "and if it fails again - test will fails ")
            time.sleep(60)
            _check()

        # Run ostf
        self.fuel_web.run_ostf(cluster_id)

        # Create new server
        os = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name("slave-01")["ip"])

        logger.info("Create new server")
        srv = os.create_server_for_migration(
            scenario='./fuelweb_test/helpers/instance_initial_scenario')
        logger.info("Srv is currently in status: %s" % srv.status)

        logger.info("Assigning floating ip to server")
        floating_ip = os.assign_floating_ip(srv)
        srv_host = os.get_srv_host_name(srv)
        logger.info("Server is on host %s" % srv_host)

        time.sleep(100)

        md5before = os.get_md5sum(
            "/home/test_file",
            self.env.get_ssh_to_remote_by_name("slave-01"),
            floating_ip.ip, creds)

        logger.info("Get available computes")
        avail_hosts = os.get_hosts_for_migr(srv_host)

        logger.info("Migrating server")
        new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200)
        logger.info("Check cluster and server state after migration")

        md5after = os.get_md5sum(
            "/home/test_file",
            self.env.get_ssh_to_remote_by_name("slave-01"),
            floating_ip.ip, creds)

        assert_true(
            md5after in md5before,
            "Md5 checksums don`t match."
            "Before migration md5 was equal to: {bef}"
            "Now it eqals: {aft}".format(bef=md5before, aft=md5after))

        res = os.execute_through_host(
            self.env.get_ssh_to_remote_by_name("slave-01"),
            floating_ip.ip, "ping -q -c3 -w10 %s | grep 'received' |"
            " grep -v '0 packets received'", creds)
        logger.info("Ping 8.8.8.8 result on vm is: %s" % res)

        logger.info("Check Ceph health is ok after migration")
        self.fuel_web.check_ceph_status(cluster_id)

        logger.info("Server is now on host %s" %
                    os.get_srv_host_name(new_srv))

        logger.info("Terminate migrated server")
        os.delete_instance(new_srv)
        assert_true(os.verify_srv_deleted(new_srv),
                    "Verify server was deleted")

        # Create new server
        os = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name("slave-01")["ip"])

        logger.info("Create new server")
        srv = os.create_server_for_migration(
            scenario='./fuelweb_test/helpers/instance_initial_scenario')
        logger.info("Srv is currently in status: %s" % srv.status)

        logger.info("Assigning floating ip to server")
        floating_ip = os.assign_floating_ip(srv)
        srv_host = os.get_srv_host_name(srv)
        logger.info("Server is on host %s" % srv_host)

        logger.info("Create volume")
        vol = os.create_volume()
        logger.info("Attach volume to server")
        os.attach_volume(vol, srv)

        time.sleep(100)
        logger.info("Create filesystem and mount volume")
        os.execute_through_host(
            self.env.get_ssh_to_remote_by_name('slave-01'),
            floating_ip.ip, 'sudo sh /home/mount_volume.sh', creds)

        os.execute_through_host(
            self.env.get_ssh_to_remote_by_name('slave-01'),
            floating_ip.ip, 'sudo touch /mnt/file-on-volume', creds)

        logger.info("Get available computes")
        avail_hosts = os.get_hosts_for_migr(srv_host)

        logger.info("Migrating server")
        new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120)
        logger.info("Check cluster and server state after migration")

        logger.info("Mount volume after migration")
        out = os.execute_through_host(
            self.env.get_ssh_to_remote_by_name('slave-01'),
            floating_ip.ip, 'sudo mount /dev/vdb /mnt', creds)

        logger.info("out of mounting volume is: %s" % out)

        assert_true("file-on-volume" in os.execute_through_host(
                    self.env.get_ssh_to_remote_by_name('slave-01'),
                    floating_ip.ip, "sudo ls /mnt", creds),
                    "File is abscent in /mnt")

        logger.info("Check Ceph health is ok after migration")
        self.fuel_web.check_ceph_status(cluster_id)

        logger.info("Server is now on host %s" %
                    os.get_srv_host_name(new_srv))

        logger.info("Terminate migrated server")
        os.delete_instance(new_srv)
        assert_true(os.verify_srv_deleted(new_srv),
                    "Verify server was deleted")

        self.env.make_snapshot(
            "vm_backed_with_ceph_live_migration")