Пример #1
0
def test_rbd_pvc(rook_cluster):
    # create a CephBlockPool
    output = rook_cluster.kubernetes.kubectl_apply(
        os.path.join(rook_cluster.ceph_dir, 'csi/rbd/storageclass.yaml'))

    if output[0] != 0:
        pytest.fail("Could not create a CephBlockPool StorageClass")

    # check if StorageClass is up and available
    pattern = re.compile(r'.*rook-ceph-block*')
    common.wait_for_result(rook_cluster.kubernetes.kubectl,
                           "get sc",
                           matcher=common.regex_matcher(pattern),
                           attempts=10,
                           interval=6)

    # create an rbd based PVC
    output = rook_cluster.kubernetes.kubectl_apply(
        os.path.join(rook_cluster.ceph_dir, 'csi/rbd/pvc.yaml'))

    if output[0] != 0:
        pytest.fail("Could not create a rbd-PVC")

    pattern = re.compile(r'.*Bound*')
    common.wait_for_result(rook_cluster.kubernetes.kubectl,
                           "get pvc rbd-pvc",
                           matcher=common.regex_matcher(pattern),
                           attempts=10,
                           interval=6)

    # create a pod using the PVC
    output = rook_cluster.kubernetes.kubectl_apply(
        os.path.join(rook_cluster.ceph_dir, 'csi/rbd/pod.yaml'))

    if output[0] != 0:
        pytest.fail("Could not create a rbd-pod")

    pattern = re.compile(r'.*Running*')
    common.wait_for_result(rook_cluster.kubernetes.kubectl,
                           "get pod csirbd-demo-pod",
                           matcher=common.regex_matcher(pattern),
                           attempts=10,
                           interval=10)

    rook_cluster.kubernetes.kubectl('delete pod csirbd-demo-pod')
    rook_cluster.kubernetes.kubectl('delete pvc rbd-pvc')
    rook_cluster.kubernetes.kubectl('delete sc rook-ceph-block')
Пример #2
0
    def install_rook(self):
        if not self._rook_built:
            raise Exception("Rook must be built before being installed")
        # TODO(jhesketh): We may want to provide ways for tests to override
        #                 these
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'common.yaml'))
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'operator.yaml'))

        # TODO(jhesketh): Check if sleeping is necessary
        time.sleep(10)

        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'cluster.yaml'))
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'toolbox.yaml'))

        time.sleep(10)

        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'csi/rbd/storageclass.yaml'))

        logger.info("Wait for OSD prepare to complete "
                    "(this may take a while...)")
        pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed')

        common.wait_for_result(self.kubernetes.kubectl,
                               "--namespace rook-ceph get pods",
                               matcher=common.regex_count_matcher(pattern, 3),
                               attempts=90,
                               interval=10)

        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'filesystem.yaml'))

        logger.info("Wait for 2 mdses to start")
        pattern = re.compile(r'.*rook-ceph-mds-myfs.*Running')

        common.wait_for_result(self.kubernetes.kubectl,
                               "--namespace rook-ceph get pods",
                               matcher=common.regex_count_matcher(pattern, 2),
                               attempts=20,
                               interval=5)

        logger.info("Wait for myfs to be active")
        pattern = re.compile(r'.*active')

        common.wait_for_result(self.execute_in_ceph_toolbox,
                               "ceph fs status myfs",
                               matcher=common.regex_matcher(pattern),
                               attempts=20,
                               interval=5)
Пример #3
0
    def install(self):
        self.kubernetes.kubectl("create namespace rook-ceph")
        self._install_operator()

        # reduce wait time to discover devices
        self.kubernetes.kubectl(
            "-n rook-ceph set env "
            "deployment/rook-ceph-operator ROOK_DISCOVER_DEVICES_INTERVAL=2m")

        # Load cluster.yaml as object
        # Modify livenessProbe
        # Save yaml somewhere
        # apply new yaml

        self.kubernetes.kubectl_apply(
            self._modify_liveness(os.path.join(self.ceph_dir, 'cluster.yaml')))
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'toolbox.yaml'))

        logger.info("Wait for OSD prepare to complete "
                    "(this may take a while...)")
        pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed')
        common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods"
                               " -l app=rook-ceph-osd-prepare",
                               matcher=common.regex_count_matcher(pattern, 3),
                               attempts=120,
                               interval=15)

        logger.info("Wait for rook-ceph-tools running")
        pattern = re.compile(r'.*rook-ceph-tools.*Running')
        common.wait_for_result(self.kubernetes.kubectl,
                               "-n rook-ceph get pods",
                               matcher=common.regex_count_matcher(pattern, 1),
                               attempts=30,
                               interval=10)

        # As of Nautilus v14.2.20 and Octopus v15.2.11, clusters intentionally
        # come up in HEALTH_WARN (AUTH_INSECURE_GLOBAL_ID_RECLAIM) and to
        # achieve HEALTH_OK we have to disallow this insecure feature.
        self.execute_in_ceph_toolbox(
            "ceph config set mon auth_allow_insecure_global_id_reclaim false"
            " || true")

        logger.info("Wait for Ceph HEALTH_OK")
        pattern = re.compile(r'.*HEALTH_OK')
        common.wait_for_result(self.execute_in_ceph_toolbox,
                               "ceph status",
                               matcher=common.regex_matcher(pattern),
                               attempts=60,
                               interval=10)

        logger.info("Rook successfully installed and ready!")
Пример #4
0
    def install(self):
        self.kubernetes.kubectl("create namespace rook-ceph")
        self._install_operator()

        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'cluster.yaml'))
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'toolbox.yaml'))

        # reduce wait time to discover devices
        self.kubernetes.kubectl(
            "-n rook-ceph set env "
            "deployment/rook-ceph-operator ROOK_DISCOVER_DEVICES_INTERVAL=2m")

        logger.info("Wait for rook-ceph-operator running")
        pattern = re.compile(r'.*rook-ceph-operator.*Running')
        common.wait_for_result(self.kubernetes.kubectl,
                               "-n rook-ceph get pods",
                               matcher=common.regex_count_matcher(pattern, 1),
                               attempts=30,
                               interval=10)

        logger.info("Wait for OSD prepare to complete "
                    "(this may take a while...)")
        pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed')
        common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods"
                               " -l app=rook-ceph-osd-prepare",
                               matcher=common.regex_count_matcher(pattern, 3),
                               attempts=120,
                               interval=15)

        logger.info("Wait for rook-ceph-tools running")
        pattern = re.compile(r'.*rook-ceph-tools.*Running')
        common.wait_for_result(self.kubernetes.kubectl,
                               "-n rook-ceph get pods",
                               matcher=common.regex_count_matcher(pattern, 1),
                               attempts=30,
                               interval=10)

        logger.info("Wait for Ceph HEALTH_OK")
        pattern = re.compile(r'.*HEALTH_OK')
        common.wait_for_result(self.execute_in_ceph_toolbox,
                               "ceph status",
                               matcher=common.regex_matcher(pattern),
                               attempts=60,
                               interval=10)

        logger.info("Rook successfully installed and ready!")
Пример #5
0
    def deploy_filesystem(self):
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'filesystem.yaml'))
        logger.info("Wait for 2 mdses to start")
        pattern = re.compile(r'.*rook-ceph-mds-myfs.*Running')
        common.wait_for_result(
            self.kubernetes.kubectl, "-n rook-ceph get pods",
            log_stdout=False,
            matcher=common.regex_count_matcher(pattern, 2),
            attempts=120, interval=10)

        logger.info("Wait for myfs to be active")
        pattern = re.compile(r'.*active')
        common.wait_for_result(
            self.execute_in_ceph_toolbox, "ceph fs status myfs",
            log_stdout=False,
            matcher=common.regex_matcher(pattern),
            attempts=120, interval=10)
        logger.info("Ceph FS successfully installed and ready!")
Пример #6
0
    def install(self):
        # TODO(jhesketh): We may want to provide ways for tests to override
        #                 these
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'common.yaml'))
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'operator.yaml'))

        # TODO(jhesketh): Check if sleeping is necessary
        time.sleep(10)

        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'cluster.yaml'))
        self.kubernetes.kubectl_apply(
            os.path.join(self.ceph_dir, 'toolbox.yaml'))

        logger.info("Wait for OSD prepare to complete "
                    "(this may take a while...)")
        pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed')

        common.wait_for_result(self.kubernetes.kubectl,
                               "--namespace rook-ceph get pods",
                               matcher=common.regex_count_matcher(pattern, 3),
                               attempts=90,
                               interval=10)

        logger.info("Wait for Ceph HEALTH_OK")
        pattern = re.compile(r'.*HEALTH_OK')

        common.wait_for_result(self.execute_in_ceph_toolbox,
                               "ceph status",
                               matcher=common.regex_matcher(pattern),
                               attempts=20,
                               interval=5)

        logger.info("Rook successfully installed and ready!")