Exemplo n.º 1
0
    def teardown(self):
        """
        Delete objects created in roughly reverse order of how they were created.

        """
        self.cb_examples.delete()
        self.cb_worker.delete()
        self.cb_deploy.delete()
        self.pod_obj.exec_oc_cmd(
            command="delete rolebinding couchbase-operator-rolebinding")
        self.pod_obj.exec_oc_cmd(
            command="delete serviceaccount couchbase-operator")
        self.operator_role.delete()
        self.couchbase_obj.delete()
        switch_to_project('default')
        self.pod_obj.delete_project(constants.COUCHBASE_OPERATOR)
        for adm_yaml in self.admission_parts:
            adm_data = templating.load_yaml(adm_yaml)
            adm_obj = OCS(**adm_data)
            adm_obj.delete()
        # Before the code below was added, the teardown task would sometimes
        # fail with the leftover objects because it would still see one of the
        # couchbase pods.
        for admin_pod in TimeoutSampler(self.WAIT_FOR_TIME, 3,
                                        get_pod_name_by_pattern, 'couchbase',
                                        'default'):
            if admin_pod:
                continue
            else:
                break
        PillowFight.cleanup(self)
        switch_to_default_rook_cluster_project()
Exemplo n.º 2
0
def svt_cleanup():
    """
    Removes clonned SVT project and virtual environemt and Projects
    Created while running SVT

    Raises:
        BaseException: In case any erros occured while removing project and ENV.

    Returns:
        bool: True if No exceptions, False otherwise

    """
    ns_obj = ocp.OCP(kind='namespace')
    try:
        shutil.rmtree('/tmp/svt')
        shutil.rmtree('/tmp/venv')
    except BaseException:
        log.error("Error while cleaning SVT project")

    try:
        project_list = [
            "cakephp-mysql0", "dancer-mysql0", "django-postgresql0",
            "eap64-mysql0", "nodejs-mongodb0", "rails-postgresql0",
            "tomcat8-mongodb0"
        ]
        # Reset namespace to default
        ocp.switch_to_default_rook_cluster_project()
        for project in project_list:
            run_cmd(f'oc delete project {project}')
            ns_obj.wait_for_delete(resource_name=project)

        return True
    except Exception:
        return False
Exemplo n.º 3
0
    def cleanup(self, namespace=constants.AMQ_NAMESPACE):
        """
        Clean up function,
        will start to delete from amq cluster operator
        then amq-connector, persistent, bridge, at the end it will delete the created namespace

        Args:
            namespace (str): Created namespace for amq
        """
        if self.amq_is_setup:
            if self.messaging:
                self.consumer_pod.delete()
                self.producer_pod.delete()
                self.kafka_user.delete()
                self.kafka_topic.delete()
            self.kafka_persistent.delete()
            self.kafka_connect.delete()
            self.kafka_bridge.delete()
            run_cmd(f'oc delete -f {self.amq_dir}',
                    shell=True,
                    check=True,
                    cwd=self.dir)
        run_cmd(f'oc delete project {namespace}')

        # Reset namespace to default
        switch_to_default_rook_cluster_project()
        self.ns_obj.wait_for_delete(resource_name=namespace)
Exemplo n.º 4
0
        def finalizer():
            log.info("Clean up and remove namespace")
            ocp_obj.exec_oc_cmd(command=f"delete project {self.project_name}")

            # Reset namespace to default
            ocp.switch_to_default_rook_cluster_project()
            ocp_obj.wait_for_delete(resource_name=self.project_name)
Exemplo n.º 5
0
 def cleanup(self):
     run(f'oc delete -f {self.crd}', shell=True, cwd=self.dir)
     run(f'oc delete -f {self.operator}', shell=True, cwd=self.dir)
     run('oc delete -f deploy', shell=True, cwd=self.dir)
     run_cmd(f'oc delete project {self.namespace}')
     self.ns_obj.wait_for_delete(resource_name=self.namespace)
     # Reset namespace to default
     switch_to_default_rook_cluster_project()
Exemplo n.º 6
0
 def finalizer():
     """
     Delete the Ceph block pool
     """
     for instance in instances:
         ocp.switch_to_default_rook_cluster_project()
         instance.delete(resource_name=instance.namespace)
         instance.wait_for_delete(instance.namespace)
Exemplo n.º 7
0
 def finalizer():
     """
     Delete the project
     """
     ocp.switch_to_default_rook_cluster_project()
     class_instance.project_obj.delete(
         resource_name=class_instance.namespace)
     class_instance.project_obj.wait_for_delete(class_instance.namespace)
Exemplo n.º 8
0
 def cleanup(self):
     run(f"oc delete -f {self.crd}", shell=True, cwd=self.dir)
     run(f"oc delete -f {self.operator}", shell=True, cwd=self.dir)
     run("oc delete -f deploy", shell=True, cwd=self.dir)
     run_cmd(f"oc delete project {self.namespace}")
     self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=180)
     # Reset namespace to default
     switch_to_default_rook_cluster_project()
Exemplo n.º 9
0
    def cleanup(
        self,
        kafka_namespace=constants.AMQ_NAMESPACE,
        tiller_namespace=AMQ_BENCHMARK_NAMESPACE,
    ):
        """
        Clean up function,
        will start to delete from amq cluster operator
        then amq-connector, persistent, bridge, at the end it will delete the created namespace

        Args:
            kafka_namespace (str): Created namespace for amq
            tiller_namespace (str): Created namespace for benchmark

        """
        if self.amq_is_setup:
            if self.messaging:
                self.consumer_pod.delete()
                self.producer_pod.delete()
                self.kafka_user.delete()
                self.kafka_topic.delete()
            if self.benchmark:
                # Delete the helm app
                try:
                    purge_cmd = f"linux-amd64/helm delete benchmark --purge --tiller-namespace {tiller_namespace}"
                    run(purge_cmd, shell=True, cwd=self.dir, check=True)
                except (CommandFailed, CalledProcessError) as cf:
                    log.error("Failed to delete help app")
                    raise cf

                # Delete the pods and namespace created
                self.sa_tiller.delete()
                self.crb_tiller.delete()
                run_cmd(f"oc delete project {tiller_namespace}")
                self.ns_obj.wait_for_delete(resource_name=tiller_namespace)

            self.kafka_persistent.delete()
            self.kafka_connect.delete()
            self.kafka_bridge.delete()
            run_cmd(f"oc delete -f {self.amq_dir}",
                    shell=True,
                    check=True,
                    cwd=self.dir)

            ocs_pvc_obj = get_all_pvc_objs(namespace=kafka_namespace)

        run_cmd(f"oc delete project {kafka_namespace}")

        self.ns_obj.wait_for_delete(resource_name=kafka_namespace, timeout=90)
        for pvc in ocs_pvc_obj:
            logging.info(pvc.name)
            validate_pv_delete(pvc.backed_pv)
        # Reset namespace to default
        switch_to_default_rook_cluster_project()
Exemplo n.º 10
0
 def delete_test_project(self):
     """
     Deleting the performance test project (namespace)
     """
     log.info(f"Deleting the test namespace : {self.namespace}")
     switch_to_default_rook_cluster_project()
     try:
         self.proj.delete(resource_name=self.namespace)
         self.proj.wait_for_delete(
             resource_name=self.namespace, timeout=60, sleep=10
         )
     except CommandFailed:
         log.error(f"Cannot delete project {self.namespace}")
         raise CommandFailed(f"{self.namespace} was not created")
Exemplo n.º 11
0
 def cleanup(self):
     """
     Clean up function,
     will start to delete from amq cluster operator
     then amq-connector, persistent, bridge, at the end it will delete the created namespace
     """
     if self.amq_is_setup:
         self.kafka_persistent.delete()
         self.kafka_connect.delete()
         self.kafka_bridge.delete()
         run_cmd(f'oc delete -f {self.amq_dir}', shell=True, check=True, cwd=self.dir)
         run_cmd(f'oc delete -f {self.amq_dir_examples}', shell=True, check=True, cwd=self.dir)
     run_cmd(f'oc delete project {self.namespace}')
     # Reset namespace to default
     switch_to_default_rook_cluster_project()
     self.ns_obj.wait_for_delete(resource_name=self.namespace)
Exemplo n.º 12
0
 def cleanup(self):
     run(f"oc delete -f {self.crd}", shell=True, cwd=self.dir)
     run(f"oc delete -f {self.operator}", shell=True, cwd=self.dir)
     run("oc delete -f deploy", shell=True, cwd=self.dir)
     run_cmd(f"oc delete project {self.namespace}")
     run(
         "oc delete -f resources/kernel-cache-drop-clusterrole.yaml",
         shell=True,
         check=True,
         cwd=self.dir,
     )
     self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=180)
     # Reset namespace to default
     switch_to_default_rook_cluster_project()
     helpers.remove_label_from_worker_node(self.worker_nodes,
                                           label_key="kernel-cache-dropper")
Exemplo n.º 13
0
    def teardown(self):
        """
        Cleaning up the resources created during Couchbase deployment

        """
        if self.cb_create_cb_secret:
            self.cb_secrets.delete()
        if self.cb_create_cb_cluster:
            self.cb_example.delete()
        if self.cb_create_bucket:
            self.cb_bucket.delete()
        self.subscription_yaml.delete()
        switch_to_project("default")
        self.ns_obj.delete_project(constants.COUCHBASE_OPERATOR)
        self.ns_obj.wait_for_delete(resource_name=constants.COUCHBASE_OPERATOR,
                                    timeout=90)
        PillowFight.cleanup(self)
        switch_to_default_rook_cluster_project()
Exemplo n.º 14
0
        def finalizer():
            log.info("Clean up and remove namespace")
            ocp_obj.exec_oc_cmd(command=f"delete project {self.project_name}")

            # Reset namespace to default
            ocp.switch_to_default_rook_cluster_project()
            ocp_obj.wait_for_delete(resource_name=self.project_name)

            # Validate replica count is set to 2
            config_obj = ocp.OCP(
                kind=constants.IMAGE_REGISTRY_CONFIG,
                namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE,
            )
            replica_count = config_obj.get().get("spec").get("replicas")
            if replica_count != 2:
                modify_registry_pod_count(count=2)

                # Validate image registry pods
                validate_registry_pod_status()
Exemplo n.º 15
0
    def cleanup(self):
        """
        Clean up the cluster from the benchmark operator project

        """
        # Reset namespace to default
        switch_to_default_rook_cluster_project()

        log.info("Delete the benchmark-operator project")
        run("make undeploy", shell=True, check=True, cwd=self.dir)
        # Wait until the benchmark-operator project deleted
        self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=180)

        # remove from workers the label used for cache dropping
        log.info("Remove labels from worker nodes.")
        helpers.remove_label_from_worker_node(self.worker_nodes, label_key=BMO_LABEL)

        # wait another 10 sec. after cleanup done.
        time.sleep(10)
Exemplo n.º 16
0
    def teardown():

        # Delete created app pods and pvcs
        assert pod.delete_pods(pod_objs)
        assert pvc.delete_pvcs(pvc_objs)

        # Switch to default project
        ret = ocp.switch_to_default_rook_cluster_project()
        assert ret, 'Failed to switch to default rook cluster project'

        # Delete created projects
        for prj in namespace_list:
            prj.delete(resource_name=prj.namespace)
Exemplo n.º 17
0
    def teardown(self):
        """
        Delete objects created in roughly reverse order of how they were created.

        """
        self.cb_examples.delete()
        self.cb_worker.delete()
        self.cb_deploy.delete()
        self.pod_obj.exec_oc_cmd(
            command=
            "delete rolebinding couchbase-operator-rolebinding -n couchbase-operator-namespace"
        )
        self.pod_obj.exec_oc_cmd(
            command=
            "delete serviceaccount couchbase-operator -n couchbase-operator-namespace"
        )
        self.operator_role.delete()
        self.couchbase_obj.delete()
        switch_to_project("default")
        self.ns_obj.delete_project(constants.COUCHBASE_OPERATOR)
        self.ns_obj.wait_for_delete(resource_name=constants.COUCHBASE_OPERATOR,
                                    timeout=90)
        for adm_obj in self.adm_objects:
            adm_obj.delete()

        # Before the code below was added, the teardown task would sometimes
        # fail with the leftover objects because it would still see one of the
        # couchbase pods.
        for admin_pod in TimeoutSampler(self.WAIT_FOR_TIME, 3,
                                        get_pod_name_by_pattern, "couchbase",
                                        "default"):
            if admin_pod:
                continue
            else:
                break
        PillowFight.cleanup(self)
        switch_to_default_rook_cluster_project()
def teardown(self):
    """
    Delete PVCs
    Delete project
    """
    # Delete newly created PVCs
    assert delete_pvcs(self.pvc_objs_new), 'Failed to delete PVCs'
    log.info(f'Newly created {self.number_of_pvc} PVCs are now deleted.')

    # Switch to default project
    ret = ocp.switch_to_default_rook_cluster_project()
    assert ret, 'Failed to switch to default rook cluster project'

    # Delete project created for the test case
    self.project_obj.delete(resource_name=self.namespace)
Exemplo n.º 19
0
    def teardown(self):
        """
        Cleaning up the environment :
            Delete all snapshot
            Delete the POD
            Delete the PVC and the PV
            Delete the StorageClass
            Delete the VolumeSnapshotClass
            Delete the data pool
            Switch to the default namespace
            Delete the tested namespace

        """
        log.info("Cleanup the test environment")

        if self.full_teardown:
            # Getting the name of the PCV's backed PV
            try:
                pv = self.pvc_obj.get("spec")["spec"]["volumeName"]
            except KeyError:
                log.error(
                    f"Cannot found key in the PVC object {json.dumps(self.pvc_obj.get('spec').get('spec'), indent=3)}"
                )

            # Getting the list of all snapshots
            try:
                snapshot_list = self.snapshot.get(all_namespaces=True)["items"]
            except Exception as err:
                log.error(f"Cannot get the list of snapshots : {err}")
                snapshot_list = []

            # Deleting al snapshots from the cluster
            log.info(f"Trying to delete all ({len(snapshot_list)}) Snapshots")
            log.debug(
                f"The list of all snapshots is : {json.dumps(snapshot_list, indent=3)}"
            )
            for vs in snapshot_list:
                snap_name = vs["metadata"]["name"]
                log.info(f"Try to delete {snap_name}")
                try:
                    self.snapshot.delete(resource_name=snap_name)
                except Exception as err:
                    log.error(f"Cannot delete {snap_name} : {err}")

            # Deleting the pod which wrote data to the pvc
            log.info(f"Deleting the test POD : {self.pod_obj.name}")
            try:
                self.pod_obj.delete()
                log.info("Wait until the pod is deleted.")
                self.pod_obj.ocp.wait_for_delete(
                    resource_name=self.pod_obj.name)
            except Exception as ex:
                log.error(f"Cannot delete the test pod : {ex}")

            # Deleting the PVC which used in the test.
            try:
                log.info(f"Delete the PVC : {self.pvc_obj.name}")
                self.pvc_obj.delete()
                log.info("Wait until the pvc is deleted.")
                self.pvc_obj.ocp.wait_for_delete(
                    resource_name=self.pvc_obj.name)
            except Exception as ex:
                log.error(f"Cannot delete the test pvc : {ex}")

            # Delete the backend PV of the PVC
            log.info(f"Try to delete the backend PV : {pv}")
            try:
                run_oc_command(f"delete pv {pv}")
            except Exception as ex:
                err_msg = f"cannot delete PV {pv} - [{ex}]"
                log.error(err_msg)

            # Deleting the StorageClass used in the test
            log.info(f"Deleting the test StorageClass : {self.sc_obj.name}")
            try:
                self.sc_obj.delete()
                log.info("Wait until the SC is deleted.")
                self.sc_obj.ocp.wait_for_delete(resource_name=self.sc_obj.name)
            except Exception as ex:
                log.error(f"Can not delete the test sc : {ex}")

            # Deleting the VolumeSnapshotClass used in the test
            log.info(
                f"Deleting the test Snapshot Class : {self.snap_class.name}")
            try:
                self.snap_class.delete()
                log.info("Wait until the VSC is deleted.")
                self.snap_class.ocp.wait_for_delete(
                    resource_name=self.snap_class.name)
            except Exception as ex:
                log.error(f"Can not delete the test vsc : {ex}")

            # Deleting the Data pool
            log.info(f"Deleting the test storage pool : {self.sc_name}")
            self.delete_ceph_pool(self.sc_name)
            # Verify deletion by checking the backend CEPH pools using the toolbox
            results = self.ceph_cluster.toolbox.exec_cmd_on_pod(
                "ceph osd pool ls")
            log.debug(f"Existing pools are : {results}")
            if self.sc_name in results.split():
                log.warning(
                    "The pool did not deleted by CSI, forcing delete it manually"
                )
                self.ceph_cluster.toolbox.exec_cmd_on_pod(
                    f"ceph osd pool delete {self.sc_name} {self.sc_name} "
                    "--yes-i-really-really-mean-it")
            else:
                log.info(f"The pool {self.sc_name} was deleted successfully")

            # Deleting the namespace used by the test
            log.info(f"Deleting the test namespace : {self.nss_name}")
            switch_to_default_rook_cluster_project()
            try:
                self.proj.delete(resource_name=self.nss_name)
                self.proj.wait_for_delete(resource_name=self.nss_name,
                                          timeout=60,
                                          sleep=10)
            except CommandFailed:
                log.error(f"Can not delete project {self.nss_name}")
                raise CommandFailed(f"{self.nss_name} was not created")

            # After deleting all data from the cluster, we need to wait until it will re-balance
            ceph_health_check(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
                              tries=30,
                              delay=60)

        super(TestPvcMultiSnapshotPerformance, self).teardown()