def __init__(self, kube_config_file):
     super(OperatorInstaller, self).__init__(kube_config_file=kube_config_file)
     self.og_obj = OperatorGroup(kube_config_file=self.kube_config_file)
     self.sub_obj = Subscription(kube_config_file=self.kube_config_file)
     self.os_obj = OperatorSource(kube_config_file=self.kube_config_file)
     self.csc_obj = CatalogSourceConfig(kube_config_file=self.kube_config_file)
     self.cs_obj = CatalogSource(kube_config_file=self.kube_config_file)
     self.ohp_obj = OperatorhubPackages(kube_config_file=self.kube_config_file)
     self.proj_obj = OcpProjects(kube_config_file=self.kube_config_file)
 def __init__(self, kube_config_file: Optional[str] = None):
     super(OperatorInstaller,
           self).__init__(kube_config_file=kube_config_file)
     self.og_obj = OperatorGroup(kube_config_file=self.kube_config_file)
     self.sub_obj = Subscription(kube_config_file=self.kube_config_file)
     self.ohp_obj = OperatorhubPackages(
         kube_config_file=self.kube_config_file)
     self.proj_obj = OcpProjects(kube_config_file=self.kube_config_file)
     self.csv = ClusterServiceVersion(self.kube_config_file)
 def __init__(self, ocp_cluster_config, k8=None):
     self.is_populate_successful = False
     self.total_app_count = 0
     self.ocp_cluster_config = ocp_cluster_config
     # Create objects
     self.project_obj = OcpProjects(kube_config_file=k8)
     self.app_obj = OcpApps(kube_config_file=k8)
     self.dc_obj = OcpDeploymentconfigs(kube_config_file=k8)
     self.pod_obj = OcpPods(kube_config_file=k8)
     self.events_obj = OcpEvents(kube_config_file=k8)
     self.node_obj = OcpNodes(kube_config_file=k8)
     self.ocp_cluster_obj = PopulateOcpCluster.get_ocp_cluster_objects_from_template(self.ocp_cluster_config)
     self.python_version = tuple(sys.version[:5].split(".")[:2])
     self.lock = Lock()
Beispiel #4
0
def setup_params(get_kubeconfig):
    params_dict = {
        'project_api_obj': OcpProjects(kube_config_file=get_kubeconfig),
        'project1': {
            'name': 'test-project1',
            'label': {
                'test': '1'
            }
        },
        'project2': {
            'name': 'test-project2',
            'label': {
                'test': '2'
            }
        },
        'project3': {
            'name': 'openshift-test1',
            'label': {
                'test': '3'
            }
        },
        'project4': {
            'name': 'openshift-test2',
            'label': {
                'test': '4'
            }
        }
    }
    return params_dict
def setup_params(get_kubeconfig):
    params_dict = {}
    params_dict['test_project'] = 'template-project'
    params_dict['project_api_obj'] = OcpProjects(kube_config_file=get_kubeconfig)
    params_dict['template_api_obj'] = OcpTemplates(kube_config_file=get_kubeconfig)
    params_dict['ident'] = randint(0, 10)
    params_dict['app_name'] = 'httpd-example'
    return params_dict
 def __init__(self):
     self.op_hub_obj = OperatorhubPackages(kube_config_file=get_kubeconfig)
     self.sub_obj = Subscription(kube_config_file=get_kubeconfig)
     self.og_obj = OperatorGroup(kube_config_file=get_kubeconfig)
     self.csv_obj = ClusterServiceVersion(kube_config_file=get_kubeconfig)
     self.project_obj = OcpProjects(kube_config_file=get_kubeconfig)
     self.oi_obj = OperatorInstaller(kube_config_file=get_kubeconfig)
     self.cs_obj = CatalogSource(kube_config_file=get_kubeconfig)
Beispiel #7
0
def setup_params(get_kubeconfig):
    params_dict = {
        "project_api_obj": OcpProjects(kube_config_file=get_kubeconfig),
        "project1": {"name": "test-project1", "label": {"test": "1"}},
        "project2": {"name": "test-project2", "label": {"test": "2"}},
        "project3": {"name": "openshift-test1", "label": {"test": "3"}},
        "project4": {"name": "openshift-test2", "label": {"test": "4"}},
    }
    return params_dict
def setup_params(get_kubeconfig):
    params_dict = {}
    params_dict["test_project"] = "template-project"
    params_dict["project_api_obj"] = OcpProjects(
        kube_config_file=get_kubeconfig)
    params_dict["template_api_obj"] = OcpTemplates(
        kube_config_file=get_kubeconfig)
    params_dict["ident"] = randint(0, 10)
    params_dict["app_name"] = "httpd-example"
    return params_dict
def setup_params(get_kubeconfig):
    params_dict = {}
    params_dict['app_api_obj'] = OcpApps(kube_config_file=get_kubeconfig)
    params_dict['project_api_obj'] = OcpProjects(kube_config_file=get_kubeconfig)
    params_dict['template_api_obj'] = OcpTemplates(kube_config_file=get_kubeconfig)
    params_dict['test_project'] = 'app-project'
    params_dict['ident'] = randint(0, 10)
    params_dict['test_app_params'] = {'MEMORY_LIMIT': '768Mi'}
    params_dict['template_name'] = 'httpd-example'
    base_api = OcpBase(kube_config_file=get_kubeconfig)
    dyn_client = base_api.dyn_client
    params_dict['v1_deploymentconfig'] = dyn_client.resources.get(api_version='v1', kind='DeploymentConfig')
    return params_dict
def setup_params(get_kubeconfig):
    params_dict = {}
    params_dict["app_api_obj"] = OcpApps(kube_config_file=get_kubeconfig)
    params_dict["project_api_obj"] = OcpProjects(
        kube_config_file=get_kubeconfig)
    params_dict["template_api_obj"] = OcpTemplates(
        kube_config_file=get_kubeconfig)
    params_dict["test_project"] = "app-project"
    params_dict["ident"] = randint(0, 10)
    params_dict["test_app_params"] = {"MEMORY_LIMIT": "768Mi"}
    params_dict["template_name"] = "httpd-example"
    base_api = OcpBase(kube_config_file=get_kubeconfig)
    dyn_client = base_api.dyn_client
    params_dict["v1_deploymentconfig"] = dyn_client.resources.get(
        api_version="v1", kind="DeploymentConfig")
    return params_dict
class OperatorInstaller(OcpBase):
    def __init__(self, kube_config_file):
        super(OperatorInstaller, self).__init__(kube_config_file=kube_config_file)
        self.og_obj = OperatorGroup(kube_config_file=self.kube_config_file)
        self.sub_obj = Subscription(kube_config_file=self.kube_config_file)
        self.os_obj = OperatorSource(kube_config_file=self.kube_config_file)
        self.csc_obj = CatalogSourceConfig(kube_config_file=self.kube_config_file)
        self.cs_obj = CatalogSource(kube_config_file=self.kube_config_file)
        self.ohp_obj = OperatorhubPackages(kube_config_file=self.kube_config_file)
        self.proj_obj = OcpProjects(kube_config_file=self.kube_config_file)

    def _source_processor(self, source):
        """
        Takes in a source as eihter a path to a JSON/YAML, or the soure in dict format.
        Currently supported are:
        For OCP4.1: CatalogSourceConfig and OperatorSource
        For OCP4.2: OperatorSource
        """
        def _source_path_processor(source_path):
            with open(source_path, 'r') as f:
                source = f.read()
            source_dict = dict()
            valid_source = True
            try:
                source_dict = yaml.safe_load(source)
                logger.debug("Successfully loaded the source file.")
            except ValueError as e:
                logger.error("Could not load the source file: {}".format(e))
                valid_source = False
            if valid_source:
                return _source_dict_processor(source_dict)
            else:
                err_msg = "The provided Json/Yaml source file could not be loaded"
                logger.exception(err_msg)
                raise TypeError(err_msg)

        def _source_dict_processor(source_dict):
            """
            A helper method that takes in a source object in dict form. We invoke the
            appropriate create method based on the resource kind and then we finally
            check that a catalogsource object has been created as a result.
            """
            resource_kind = source_dict['kind']
            logger.info("Operator source is of kind: {}".format(resource_kind))
            if resource_kind == 'CatalogSourceConfig':
                if self.version != ('4', '1'):
                    err_msg = "Source type CatalogSourceConfig is only supported for OCP version 4.1"
                    logger.exception(err_msg)
                    raise OcpUnsupportedVersion(err_msg)
                else:
                    resp = self.csc_obj.create_catalog_source_config(body=source_dict)
            elif resource_kind == 'OperatorSource':
                resp = self.os_obj.create_operator_source(body=source_dict)
            if resp.kind == 'CatalogSourceConfig':
                cs_namespace = resp.spec.targetNamespace
            else:
                cs_namespace = resp.metadata.namespace
            logger.info("Cheking if CatalogSource {} exists in namespace {}".format(resp.metadata.name, cs_namespace))
            assert self.cs_obj.is_catalog_source_present(resp.metadata.name, namespace=cs_namespace)
            cs = self.cs_obj.get_catalog_source(resp.metadata.name, namespace=cs_namespace)
            return cs.metadata.name, cs_namespace

        if isinstance(source, str):
            return _source_path_processor(source)
        elif isinstance(source, dict):
            return _source_dict_processor(source)

    def _derive_install_mode_from_target_namespaces(self, operator_name, target_namespaces):
        """
        Length of target_namespaces list enables us to derive the appropriate install mode
        Look at the length of the target_namespaces list:
            1. If it's zero, then we verify if 'AllNamespaces' is supported.
            2. If length is 1, then we verify if 'SingleNamespace' is supported.
            3. If length > 1, then we verify 'MultiNamespace' is supported.
        """
        target_namespaces_count = len(target_namespaces)
        if target_namespaces_count == 0:
            install_mode = 'AllNamespaces'
        elif target_namespaces_count == 1:
            install_mode = 'SingleNamespace'
        elif target_namespaces_count > 1:
            install_mode = 'MultiNamespace'
        return install_mode

    def _create_og(self, operator_name, install_mode, target_namespaces):
        """
        A helper method that creates a placeholder project that will contain the
        subscription and operator group objects necessary to install an operator
        For openshift 4.1, the operator installation only succeeds if these objects
        are installed in the opeshift-marketplace namespace. In openshift 4.2+, we
        create a project with the following naming convention:
        test + operator name + install mode + og-sub-project
        """
        if self.version == ('4', '1'):
            og_namespace = 'openshift-marketplace'
        else:
            og_name = operator_name + '-' + install_mode.lower() + '-og'
            og_namespace = 'test-' + operator_name + '-' + install_mode.lower() + '-og-sub-project'
        logger.info("Creating Project: {} that will hold the subscription and operator group".format(og_namespace))
        assert self.proj_obj.create_a_project(og_namespace)
        assert self.og_obj.create_operator_group(og_name, og_namespace, target_namespaces)
        return og_name, og_namespace

    def add_operator_to_cluster(self, operator_name, source=None, target_namespaces=[]):
        """
        Install an operator in a list of targeted namespaces
        :param operator_name: (required | str) The name of the operator to be installed
        :param source: (optional | str) The source of the operator to be installed. This parameter
                       can be in the form of a path to a source YAML or JSON, or it can also be
                       passed as a dictionary. If not specified, the package is assumed to be already
                       be visible throught the operator hub and so the source can be discovered.
        :param target_namespaces: (optional | list) A list of namespace/Projects where want the
                                   operator to be enabled in. If left unspecified, the operartor
                                   will be installed/enabled throughout the entire cluster.
        """
        if source:
            cs_name, cs_namespace = self._source_processor(source)
            if not self.ohp_obj.watch_package_manifest_present(operator_name):
                err_msg = "A package manifest for {} could not be found".format(operator_name)
                logger.exception(err_msg)
                raise ApiException(err_msg)
        else:
            pkg_obj = self.ohp_obj.get_package_manifest(operator_name)
            if pkg_obj:
                cs_name = pkg_obj.status.catalogSource
                cs_namespace = pkg_obj.metadata.namespace
            else:
                logger.exception(err_msg)
                raise ApiException(err_msg)
        install_mode = self._derive_install_mode_from_target_namespaces(operator_name, target_namespaces)
        og_name, og_namespace = self._create_og(operator_name, install_mode, target_namespaces)
        subscription = self.sub_obj.create_subscription(operator_name, install_mode, og_namespace)
        assert subscription.spec.source == cs_name
        assert subscription.spec.sourceNamespace == cs_namespace
        return True

    def delete_operator_from_cluster(self, operator_name):
        """
        Uninstall an operator from a cluster
        """
        pass
class PopulateOcpCluster:
    cleanup_project_list = []

    def __init__(self, ocp_cluster_config, k8=None):
        self.is_populate_successful = False
        self.total_app_count = 0
        self.ocp_cluster_config = ocp_cluster_config
        # Create objects
        self.project_obj = OcpProjects(kube_config_file=k8)
        self.app_obj = OcpApps(kube_config_file=k8)
        self.dc_obj = OcpDeploymentconfigs(kube_config_file=k8)
        self.pod_obj = OcpPods(kube_config_file=k8)
        self.events_obj = OcpEvents(kube_config_file=k8)
        self.node_obj = OcpNodes(kube_config_file=k8)
        self.ocp_cluster_obj = PopulateOcpCluster.get_ocp_cluster_objects_from_template(self.ocp_cluster_config)
        self.python_version = tuple(sys.version[:5].split(".")[:2])
        self.lock = Lock()

    @staticmethod
    def get_ocp_cluster_objects_from_template(ocp_cluster_config):
        """
        Read the ocp_config.yaml config file and
        use the data to populate the cluster.
        :param ocp_cluster_config: config template to create ocp objects
        :return: ocp_cluster_object
        """
        try:
            return populate_ocp_cluster_config(ocp_cluster_config)
        except Exception as e:
            logger.exception("Failed to create ocp_cluster object: %s", e)
            sys.exit(1)

    def populate_cluster(self, filter="all"):
        """
        filters can be used to select subsets of projects to be deployed
        for any specific run.
        :param filter: A list of strings used to filter which projects
                       to deploy.
        :return: True
        """
        start = time.time()
        if "all" in filter:
            filtered_projects = self.ocp_cluster_obj.projects
        else:
            filtered_projects = [
                p for p in self.ocp_cluster_obj.projects if bool(set(filter) & set(p.project_labels.values()))
            ]
        if not filtered_projects:
            logger.error(
                "None of the filters you provided matched any project labels."
                "Make sure you are entering the values correctly and try"
                "re-running the script."
            )
            raise ocp_exceptions.ConfigError(
                "None of the filters you provided matched any "
                "project labels. Make sure you are entering the values "
                "correctly and try re-running the script."
            )

        else:

            def populate(project):
                logger.info("Starting thread - %s", threading.currentThread().getName())
                current_project = project.project_name
                current_project_labels = project.project_labels
                logger.info("-" * 60)
                logger.info("%s - Current project is: %s", threading.currentThread().getName(), current_project)
                logger.info("-" * 60)

                self.project_obj.create_a_project(current_project, labels_dict=current_project_labels)

                # For every project in the outer loop,
                # loop through the apps to be deployed
                for app in project.apps:
                    with self.lock:
                        self.total_app_count += app.app_count

                    logger.info(
                        "****** %s - Current app template used is: %s",
                        threading.currentThread().getName(),
                        app.app_template,
                    )
                    logger.debug("App Labels : %s", app.app_labels)
                    logger.debug("APP_PARAM : %s", app.app_params)
                    # For every app, deploy the desired count
                    for i in range(app.app_count):
                        logger.info(
                            "----> %s - Now deploying: %s",
                            threading.currentThread().getName(),
                            app.app_template + "-" + str(i),
                        )
                        _, dc_names = self.app_obj.create_app_from_template(
                            current_project, app.app_template, i, app.app_params
                        )

                        if dc_names is None:
                            logger.error(
                                " %s -Failed to deploy template: %s",
                                threading.currentThread().getName(),
                                app.app_template + "-" + str(i),
                            )
                            continue

                        for dc in dc_names:
                            # For every Deployment Config (dc) in an app,
                            # Poll for readiness and list its Pod events
                            dc_available = self.dc_obj.check_dc_status_conditions_availability(
                                current_project, dc, timeout=600
                            )
                            if not dc_available:
                                logger.error(
                                    " %s - Timed out waiting for expected status conditions for " "deploymentconfig %s",
                                    threading.currentThread().getName(),
                                    dc,
                                )
                                sys.exit(1)

                            dc_ready = self.dc_obj.is_dc_ready(current_project, dc, timeout=1200)

                            if dc_ready:
                                # Show any deploymentconfig events
                                dc_events = self.events_obj.list_dc_events_in_a_namespace(current_project, dc)
                                if dc_events:
                                    logger.debug(
                                        "%s - Deploymentconfig events for %s:\n",
                                        threading.currentThread().getName(),
                                        dc,
                                    )
                                    for event in dc_events:
                                        logger.debug(
                                            " %s - \n\tProject: %s\n\tResource: %s\n\tFirstTimestamp: %s"
                                            "\n\tMessage: %s\n"
                                            % (
                                                threading.currentThread().getName(),
                                                event.involvedObject.namespace,
                                                event.involvedObject.name,
                                                event.firstTimestamp,
                                                event.message,
                                            )
                                        )
                                # Show any events from pods associated with this deploymentconfig
                                pod_events = self.events_obj.list_pod_events_in_a_namespace(current_project, dc)
                                if pod_events is not None:
                                    logger.debug("Pod events for %s:\n" % dc)
                                    for event in pod_events:
                                        logger.debug(
                                            "\tProject: %s\n\tResource: %s\n\tFirstTimestamp: %s\n\tMessage: %s\n"
                                            % (
                                                event.involvedObject.namespace,
                                                event.involvedObject.name,
                                                event.firstTimestamp,
                                                event.message,
                                            )
                                        )
                                # Check for currently existing pods associated with this deploymentconfig
                                dc_pod = self.pod_obj.list_pods_in_a_deployment(current_project, dc)
                                if len(dc_pod) == 0:
                                    logger.error(
                                        " %s - No pods for deploymentconfig %s were found in the cluster.",
                                        threading.currentThread().getName(),
                                        dc,
                                    )
                            else:
                                logger.error(
                                    "%s - Timed out waiting for the deploymentconfig %s to become ready."
                                    "Exiting now ...",
                                    threading.currentThread().getName(),
                                    dc,
                                )
                                raise ocp_exceptions.ExecutionError(
                                    "Timed out waiting for the deploymentconfig %s to become ready. Exiting now ..."
                                    % dc
                                )
                            # Update replicas as specified in config file
                            logger.info(
                                "%s - Now updating replicas for app %s", threading.currentThread().getName(), dc
                            )
                            self.dc_obj.update_deployment_replicas(current_project, dc, app.app_replicas)
                            # Label the deployment configs of this app
                            logger.info(
                                "%s - Now labeling deploymentconfig %s", threading.currentThread().getName(), dc
                            )
                            app_labels = app.app_labels
                            self.dc_obj.label_dc(current_project, dc, app_labels)

                    self.is_populate_successful = True

            """
            ThreadPoolExecutor with ContextManager is the recommended way to handle thread in Python3 but it's not
            supported in Python2. Hence we have to use traditional threading module. We have also used threading.Lock()
            class to implement primitive lock objects. Once a thread has acquired a lock, subsequent attempts
            to acquire it block, until it is released.

            For Python2 : We have used threading.Thread to create thread with thread_name and function as args.We will
            loop over all projects from config yaml specified in with --cluster-config option and creates a thread.
            Store all thread in lists and use join() method to complete all threads before it switch to main_thread.

            For Python3 : We have used ThreadPoolExecutor with thread_name_prefix and default process counts. Based on
            python3 doc, default number of thread created by ThreadPoolExecutor will be number processor on server * 5.
            if server has 8 processor, Total number of concurrent thread will be 40. If we have 100 projects, It will
            create 40 thread for first 40 projects and remaining 60 project will be in pool. As any of active thread is
            completed, It will pick next projects from pool. ThreadPoolExecutor with ContextManager will handle all
            threads before switching to main_thread.
            """

            if self.python_version <= ("2", "7"):
                logger.info("Python version is %s", self.python_version)
                threads = list()
                for project in filtered_projects:
                    thread = threading.Thread(
                        target=populate, name="Thread_{}".format(project.project_name), args=(project,)
                    )
                    threads.append(thread)
                    thread.start()
                for thread in threads:
                    thread.join()
            else:
                logger.info("Python version is %s", self.python_version)
                with ThreadPoolExecutor(thread_name_prefix="Thread") as executor:
                    executor.map(populate, filtered_projects)
                    # result_futures = list(map(lambda x: executor.submit(populate, x), filtered_projects))
                    # results = [f.result() for f in futures.as_completed(result_futures)]

            end = time.time()
            logger.info(
                "Time taken to populate %s projects and %s apps is : %s ",
                len(filtered_projects),
                self.total_app_count,
                round(end - start, 2),
            )

        return self.is_populate_successful

    def longevity(self, duration, scale_replicas=5):
        """
        This method scans all namespaces for deployments labeled 'scalable=True',
        loads them into a list and periodically picks one at random. Once a
        deployment is picked, it will update its replicas to
        random number in the range specified by the args.replicas param.
        The param args.longevity determines the duration of this method.
        :param duration: The duration of the longevity test in seconds.
                         default is 3600.
        :return: boolean True when successful, raise custom exceptions on failure.
        """
        logger.info("*** Starting longevity component. Duration will be: %d seconds ***", duration)
        end = time.time() + duration
        while time.time() < end:
            dc_list = self.dc_obj.list_deployments_in_all_namespaces(label_selector="scalable=True")
            if not dc_list.items:
                logger.error(
                    "No deployments labeled scalable=True were found"
                    "in the cluster. Longevity can only run when"
                    "scalable deployments are present in the cluster."
                    "exiting now ..."
                )
                raise ocp_exceptions.ExecutionError(
                    "No deployments labeled scalable=True were found "
                    "in the cluster. Longevity can only run when "
                    "scalable deployments are present in the cluster."
                    "exiting now ..."
                )
            else:
                bad_dcs = self.dc_obj.find_unhealthy_dcs_in_namespace_list(dc_list.items)
                if bad_dcs:
                    for dc in bad_dcs:
                        current_dc = dc.metadata.name
                        current_namespace = dc.metadata.namespace

                        logger.error(
                            "The DeploymentConfig %s in namespace %s "
                            "is in an invalid state."
                            "See log entries below:",
                            current_dc,
                            current_namespace,
                        )

                        dc_log = self.dc_obj.read_dc_log(current_namespace, current_dc)
                        for line in dc_log:
                            logger.error(line)

                    raise ocp_exceptions.OcpDeploymentConfigInvalidStateError(
                        "Some of the DeploymentConfigs are in invalid state. See log file for more details"
                    )
                else:
                    # # List mem and cpu usage for each node in the cluster # TODO
                    # node_list = node_obj.get_all_nodes().items
                    # # Separator for better log readability
                    # logger.info("="*70)
                    # logger.info("Listing mem and cpu usage for each node in cluster before modifying dc:")
                    # for node in node_list:
                    #     node_name = node.metadata.name
                    #     logger.info("\n\tAllocated resources for node %s :", node_name)
                    #     allocated_resources_list = node_obj.list_node_memory_cpu_usage(node_name)
                    #     for line in allocated_resources_list:
                    #         logger.info("\t%s:", line)

                    # Update replicas
                    rand_dc = random.choice(dc_list.items)
                    rand_dc_name = rand_dc.metadata.name
                    rand_dc_namespace = rand_dc.metadata.namespace
                    updated_replicas = randint(1, scale_replicas)
                    logger.info(
                        "Modifying dc %s in project %s to scale to : %d replicas",
                        rand_dc_name,
                        rand_dc_namespace,
                        updated_replicas,
                    )
                    self.dc_obj.update_deployment_replicas(rand_dc_namespace, rand_dc_name, updated_replicas)
                    # Buffer between replica updates
                    sleep(10)

        return True

    def cleanup(self, filter="all"):
        """ Cleanup projects created by this script """
        if filter == "all":
            filtered_projects = self.ocp_cluster_obj.projects
        else:
            filtered_projects = [
                p for p in self.ocp_cluster_obj.projects if bool(set(filter) & set(p.project_labels.values()))
            ]
        if not filtered_projects:
            logger.error(
                "None of the filters you provided matched any project labels."
                "Make sure you are entering the values correctly and try"
                "re-running the script."
            )
            raise ocp_exceptions.ConfigError(
                "None of the filters you provided matched any project labels."
                "Make sure you are entering the values correctly and try "
                "re-running the script."
            )
        else:
            logger.info("Starting cleanup ... now deleting" " specified projects")
            # TODO : Use watch() instead
            PopulateOcpCluster.cleanup_project_list = filtered_projects[:]
            for project in filtered_projects:
                logger.info("Now deleting Project %s", project.project_name)
                self.project_obj.delete_a_project(project.project_name)
                PopulateOcpCluster.cleanup_project_list.remove(project)
            return PopulateOcpCluster.cleanup_project_list
def ocp_project(get_kubeconfig) -> OcpProjects:
    return OcpProjects(get_kubeconfig)
class OperatorInstaller(OcpBase):
    def __init__(self, kube_config_file: Optional[str] = None):
        super(OperatorInstaller,
              self).__init__(kube_config_file=kube_config_file)
        self.og_obj = OperatorGroup(kube_config_file=self.kube_config_file)
        self.sub_obj = Subscription(kube_config_file=self.kube_config_file)
        self.ohp_obj = OperatorhubPackages(
            kube_config_file=self.kube_config_file)
        self.proj_obj = OcpProjects(kube_config_file=self.kube_config_file)
        self.csv = ClusterServiceVersion(self.kube_config_file)

    def _derive_install_mode_from_target_namespaces(
            self, target_namespaces: Union[List[str], str]) -> str:
        """
        Length of target_namespaces list enables us to derive the appropriate install mode
        Look at the length of the target_namespaces list:
            1. If the wildcard '*' is used, it gets interpreted as'AllNamespaces'.
            2. If it's zero, we return 'OwnNamespace'.
            3. If length is 1, we return 'SingleNamespace'.
            4. If length > 1, we return 'MultiNamespace'.
        """
        if target_namespaces == '*':
            install_mode = "AllNamespaces"
        else:
            target_namespaces_count = len(target_namespaces)
            if target_namespaces_count == 0:
                install_mode = "OwnNamespace"
            elif target_namespaces_count == 1:
                install_mode = "SingleNamespace"
            elif target_namespaces_count > 1:
                install_mode = "MultiNamespace"
        return install_mode

    def _create_operator_namespace(self, operator_name: str,
                                   operator_namespace: str,
                                   channel_name: str) -> str:
        """
        A helper method that creates the namespace where both the subscription and operator
        group objects will be created to install an operator.
        """
        if not operator_namespace:
            operator_namespace = self.ohp_obj.get_channel_suggested_namespace(
                operator_name, channel_name)
        if not operator_namespace:
            operator_namespace = f"openshift-{operator_name}"
        assert self.proj_obj.create_a_namespace(operator_namespace)
        return operator_namespace

    def _create_og(self, operator_name: str, channel_name: str,
                   operator_namespace: str,
                   target_namespaces: Union[list, str]) -> Tuple[str, str]:
        """
        A helper method that creates the operator group in the generated operator namespace
        """
        derived_install_mode = self._derive_install_mode_from_target_namespaces(
            target_namespaces)
        if not self.ohp_obj.is_install_mode_supported_by_channel(
                operator_name, channel_name, derived_install_mode):
            err_msg = f"The specified channel doesn't support {channel_name} installs"
            logger.exception(err_msg)
            raise UnsupportedInstallMode(err_msg)
        else:
            operator_namespace = self._create_operator_namespace(
                operator_name, operator_namespace, channel_name)
            og_name = f"{operator_name}-og"
            assert self.og_obj.create_operator_group(og_name,
                                                     operator_namespace,
                                                     target_namespaces)
        return og_name, operator_namespace

    def add_operator_to_cluster(
            self,
            operator_name: str,
            channel_name: str = '',
            operator_namespace: str = '',
            target_namespaces: Union[List[str], str] = []) -> bool:
        """
        Install an operator in a list of target namespaces
        :param operator_name: (required | str) The name of the operator to be installed
        :param channel_name: (Optional | str) The name of the channel we want to subscribe to. This is what determines
                             what version of the operator we want to install. If left unspecified, the operator default
                             channel is selected.
        :param operator_namespace: (optional | str) The name of the namespace that will hold the subscription
                                   and operatorgroup objects. If left unspecified, a suggested namespace name
                                   will be searched for in the channel object. If not found, a namespace will be
                                   created with the naming convention 'openshift-<operator-name>'
        :param target_namespaces: (optional | list) A list of namespace/Projects where want the
                                   operator to be enabled in. If left unspecified, the operartor
                                   will be installed/enabled throughout the entire cluster.
        :return: (bool) True if install is successful, False otherwise.
        """
        pkg_obj = self.ohp_obj.get_package_manifest(operator_name)
        if not pkg_obj:
            err_msg = f"A package manifest for {operator_name} could not be found"
            logger.exception(err_msg)
            raise ApiException(err_msg)
        if not channel_name:
            channel_name = self.ohp_obj.get_package_default_channel(
                operator_name)
        _, operator_namespace = self._create_og(operator_name, channel_name,
                                                operator_namespace,
                                                target_namespaces)
        sub_resp = self.sub_obj.create_subscription(operator_name,
                                                    channel_name,
                                                    operator_namespace)
        return sub_resp is not None

    def is_operator_installed(self, operator_name: str,
                              operator_namespace: str) -> bool:
        """
        Check if operator is installed and returned true or false
        :param operator_name: name of the operator.
        :param operator_namespace: namespace of the operator
        return: installed or not
        """
        return self.csv.get_cluster_service_version(
            operator_name, operator_namespace) is not None

    def delete_operator_from_cluster(self, operator_name: str,
                                     namespace: str) -> bool:
        """
        Uninstall an operator from a cluster
        :param operator_name: name of the operator
        :param namespace: name of the namespace the operator is installed
        :return: success or failure
        """
        try:
            subscription = self.sub_obj.get_subscription(
                operator_name, namespace)
            csv_name = subscription.status.currentCSV
        except ApiException:
            logger.error("Failed to retrieve subscription")
            return False

        try:
            self.sub_obj.delete_subscription(operator_name, namespace)
            self.csv.delete_cluster_service_version(csv_name, namespace)
        except ApiException:
            logger.error(f"Failed to uninstall operator {operator_name}")
            return False

        return True
def project_resource(get_kubeconfig) -> OcpProjects:
    return OcpProjects(kube_config_file=get_kubeconfig)