def wait_for_pods_by_app_label(self, label, count=1, sleep=5, attempts=120, namespace="rook-ceph"): pattern = re.compile(r'.*Running') common.wait_for_result( self.kubectl, f'-n {namespace} get pod -l app="{label}" --no-headers', matcher=common.regex_count_matcher(pattern, count), attempts=attempts, interval=sleep)
def install_kubernetes(self): super().install_kubernetes() self.join(self.hardware.workers) logger.info("Wait for all nodes to be in 'Ready' state" "(this may take a while...)") pattern = re.compile(r' +Ready.*') common.wait_for_result(self.workspace.execute, f"{self._skuba} cluster status", matcher=common.regex_count_matcher( pattern, len(self.hardware.nodes)), attempts=60, interval=10, chdir=self._clusterpath, capture=True) # Give skupa-update/zypper some time to release lock time.sleep(5)
def deploy_filesystem(self): self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'filesystem.yaml')) logger.info("Wait for 2 mdses to start") pattern = re.compile(r'.*rook-ceph-mds-myfs.*Running') common.wait_for_result( self.kubernetes.kubectl, "-n rook-ceph get pods", log_stdout=False, matcher=common.regex_count_matcher(pattern, 2), attempts=120, interval=10) logger.info("Wait for myfs to be active") pattern = re.compile(r'.*active') common.wait_for_result( self.execute_in_ceph_toolbox, "ceph fs status myfs", log_stdout=False, matcher=common.regex_matcher(pattern), attempts=120, interval=10) logger.info("Ceph FS successfully installed and ready!")
def install(self): self.kubernetes.kubectl("create namespace rook-ceph") self._install_operator() self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'cluster.yaml')) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'toolbox.yaml')) # reduce wait time to discover devices self.kubernetes.kubectl( "-n rook-ceph set env " "deployment/rook-ceph-operator ROOK_DISCOVER_DEVICES_INTERVAL=2m") logger.info("Wait for rook-ceph-operator running") pattern = re.compile(r'.*rook-ceph-operator.*Running') common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 1), attempts=30, interval=10) logger.info("Wait for OSD prepare to complete " "(this may take a while...)") pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed') common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods" " -l app=rook-ceph-osd-prepare", matcher=common.regex_count_matcher(pattern, 3), attempts=120, interval=15) logger.info("Wait for rook-ceph-tools running") pattern = re.compile(r'.*rook-ceph-tools.*Running') common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 1), attempts=30, interval=10) logger.info("Wait for Ceph HEALTH_OK") pattern = re.compile(r'.*HEALTH_OK') common.wait_for_result(self.execute_in_ceph_toolbox, "ceph status", matcher=common.regex_matcher(pattern), attempts=60, interval=10) logger.info("Rook successfully installed and ready!")
def _install_operator(self): """ Install operator using either kubectl of helm """ if settings.OPERATOR_INSTALLER == "helm": logger.info('Deploying rook operator - using Helm') self._install_operator_helm() else: logger.info('Deploying rook operator - using kubectl apply ...') self._install_operator_kubectl() logger.info("Wait for rook-ceph-operator running") pattern = re.compile(r'.*rook-ceph-operator.*Running') common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 1), attempts=30, interval=10) # set operator log level self.kubernetes.kubectl( "--namespace rook-ceph set env " "deployment/rook-ceph-operator ROOK_LOG_LEVEL=DEBUG")
def install(self): # TODO(jhesketh): We may want to provide ways for tests to override # these self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'common.yaml')) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'operator.yaml')) # TODO(jhesketh): Check if sleeping is necessary time.sleep(10) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'cluster.yaml')) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'toolbox.yaml')) logger.info("Wait for OSD prepare to complete " "(this may take a while...)") pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed') common.wait_for_result(self.kubernetes.kubectl, "--namespace rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 3), attempts=90, interval=10) logger.info("Wait for Ceph HEALTH_OK") pattern = re.compile(r'.*HEALTH_OK') common.wait_for_result(self.execute_in_ceph_toolbox, "ceph status", matcher=common.regex_matcher(pattern), attempts=20, interval=5) logger.info("Rook successfully installed and ready!")
def install_rook(self): if not self._rook_built: raise Exception("Rook must be built before being installed") # TODO(jhesketh): We may want to provide ways for tests to override # these self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'common.yaml')) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'operator.yaml')) # TODO(jhesketh): Check if sleeping is necessary time.sleep(10) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'cluster.yaml')) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'toolbox.yaml')) time.sleep(10) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'csi/rbd/storageclass.yaml')) logger.info("Wait for OSD prepare to complete " "(this may take a while...)") pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed') common.wait_for_result(self.kubernetes.kubectl, "--namespace rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 3), attempts=90, interval=10) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'filesystem.yaml')) logger.info("Wait for 2 mdses to start") pattern = re.compile(r'.*rook-ceph-mds-myfs.*Running') common.wait_for_result(self.kubernetes.kubectl, "--namespace rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 2), attempts=20, interval=5) logger.info("Wait for myfs to be active") pattern = re.compile(r'.*active') common.wait_for_result(self.execute_in_ceph_toolbox, "ceph fs status myfs", matcher=common.regex_matcher(pattern), attempts=20, interval=5)
def install(self): self.kubernetes.kubectl("create namespace rook-ceph") self._install_operator() # reduce wait time to discover devices self.kubernetes.kubectl( "-n rook-ceph set env " "deployment/rook-ceph-operator ROOK_DISCOVER_DEVICES_INTERVAL=2m") # Load cluster.yaml as object # Modify livenessProbe # Save yaml somewhere # apply new yaml self.kubernetes.kubectl_apply( self._modify_liveness(os.path.join(self.ceph_dir, 'cluster.yaml'))) self.kubernetes.kubectl_apply( os.path.join(self.ceph_dir, 'toolbox.yaml')) logger.info("Wait for OSD prepare to complete " "(this may take a while...)") pattern = re.compile(r'.*rook-ceph-osd-prepare.*Completed') common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods" " -l app=rook-ceph-osd-prepare", matcher=common.regex_count_matcher(pattern, 3), attempts=120, interval=15) logger.info("Wait for rook-ceph-tools running") pattern = re.compile(r'.*rook-ceph-tools.*Running') common.wait_for_result(self.kubernetes.kubectl, "-n rook-ceph get pods", matcher=common.regex_count_matcher(pattern, 1), attempts=30, interval=10) # As of Nautilus v14.2.20 and Octopus v15.2.11, clusters intentionally # come up in HEALTH_WARN (AUTH_INSECURE_GLOBAL_ID_RECLAIM) and to # achieve HEALTH_OK we have to disallow this insecure feature. self.execute_in_ceph_toolbox( "ceph config set mon auth_allow_insecure_global_id_reclaim false" " || true") logger.info("Wait for Ceph HEALTH_OK") pattern = re.compile(r'.*HEALTH_OK') common.wait_for_result(self.execute_in_ceph_toolbox, "ceph status", matcher=common.regex_matcher(pattern), attempts=60, interval=10) logger.info("Rook successfully installed and ready!")
def test_rbd_pvc(rook_cluster): # create a CephBlockPool output = rook_cluster.kubernetes.kubectl_apply( os.path.join(rook_cluster.ceph_dir, 'csi/rbd/storageclass.yaml')) if output[0] != 0: pytest.fail("Could not create a CephBlockPool StorageClass") # check if StorageClass is up and available pattern = re.compile(r'.*rook-ceph-block*') common.wait_for_result(rook_cluster.kubernetes.kubectl, "get sc", matcher=common.regex_matcher(pattern), attempts=10, interval=6) # create an rbd based PVC output = rook_cluster.kubernetes.kubectl_apply( os.path.join(rook_cluster.ceph_dir, 'csi/rbd/pvc.yaml')) if output[0] != 0: pytest.fail("Could not create a rbd-PVC") pattern = re.compile(r'.*Bound*') common.wait_for_result(rook_cluster.kubernetes.kubectl, "get pvc rbd-pvc", matcher=common.regex_matcher(pattern), attempts=10, interval=6) # create a pod using the PVC output = rook_cluster.kubernetes.kubectl_apply( os.path.join(rook_cluster.ceph_dir, 'csi/rbd/pod.yaml')) if output[0] != 0: pytest.fail("Could not create a rbd-pod") pattern = re.compile(r'.*Running*') common.wait_for_result(rook_cluster.kubernetes.kubectl, "get pod csirbd-demo-pod", matcher=common.regex_matcher(pattern), attempts=10, interval=10) rook_cluster.kubernetes.kubectl('delete pod csirbd-demo-pod') rook_cluster.kubernetes.kubectl('delete pvc rbd-pvc') rook_cluster.kubernetes.kubectl('delete sc rook-ceph-block')