def _resize_cluster(self, cluster_name, node_count, **kwargs):
        """Resize the cluster of a given name to given number of worker nodes.

        :param str cluster_name: Name of the cluster
        :param int node_count: New size of the worker nodes
        """
        result = {}
        cluster_api = ClusterApiV1(api_client=self.client_v1)
        LOGGER.debug(f"Sending request to PKS:{self.pks_host_uri} to resize "
                     f"the cluster with name: {cluster_name} to "
                     f"{node_count} worker nodes")

        resize_params = UpdateClusterParameters(
            kubernetes_worker_instances=node_count)
        try:
            cluster_api.update_cluster(cluster_name, body=resize_params)
        except v1Exception as err:
            LOGGER.debug(f"Resizing cluster {cluster_name} failed with "
                         f"error:\n {err}")
            raise PksServerError(err.status, err.body)

        LOGGER.debug(f"PKS: {self.pks_host_uri} accepted the request to resize"
                     f" the cluster: {cluster_name}")

        result['name'] = cluster_name
        result['task_status'] = 'in progress'

        return result
    def get_cluster_config(self, data):
        """Get the configuration of the cluster with the given name in PKS.

        System administrator gets the given cluster config regardless of
        who is the owner of the cluster. Other users get config only on
        the cluster they own.

        :return: Configuration of the cluster.

        :rtype: str
        """
        cluster_name = data[RequestKey.CLUSTER_NAME]

        if self.tenant_client.is_sysadmin() or \
                is_org_admin(self.client_session):
            cluster_info = self.get_cluster_info(data)
            qualified_cluster_name = cluster_info['pks_cluster_name']
        else:
            qualified_cluster_name = self._append_user_id(cluster_name)

        self._check_cluster_isolation(cluster_name, qualified_cluster_name)

        cluster_api = ClusterApiV1(api_client=self.client_v1)

        LOGGER.debug(f"Sending request to PKS: {self.pks_host_uri} to get"
                     f" detailed configuration of cluster with name: "
                     f"{cluster_name}")
        config = cluster_api.create_user(cluster_name=qualified_cluster_name)
        LOGGER.debug(f"Received response from PKS: {self.pks_host_uri} on "
                     f"cluster: {cluster_name} with details: {config}")
        cluster_config = yaml.safe_dump(config, default_flow_style=False)

        return self.filter_traces_of_user_context(cluster_config)
    def _delete_cluster(self, cluster_name):
        """Delete the cluster with a given name in PKS environment.

        Also deletes associated NSX-T Distributed Firewall rules that kept the
        cluster network isolated from other clusters.

        :param str cluster_name: Name of the cluster
        """
        result = {}

        cluster_api = ClusterApiV1(api_client=self.client_v1)

        LOGGER.debug(f"Sending request to PKS: {self.pks_host_uri} to delete "
                     f"the cluster with name: {cluster_name}")
        try:
            cluster_api.delete_cluster(cluster_name=cluster_name)
        except v1Exception as err:
            LOGGER.debug(f"Deleting cluster {cluster_name} failed with "
                         f"error:\n {err}")
            raise PksServerError(err.status, err.body)

        LOGGER.debug(f"PKS: {self.pks_host_uri} accepted the request to delete"
                     f" the cluster: {cluster_name}")

        result['name'] = cluster_name
        result['task_status'] = 'in progress'
        return result
    def delete_cluster(self, data):
        """Delete the cluster with a given name in PKS environment.

        System administrator can delete the given cluster regardless of
        who is the owner of the cluster. Other users can only delete
        the cluster they own.

        :param str cluster_name: Name of the cluster
        """
        cluster_name = data[RequestKey.CLUSTER_NAME]

        if self.tenant_client.is_sysadmin() \
                or is_org_admin(self.client_session):
            cluster_info = self.get_cluster_info(data)
            qualified_cluster_name = cluster_info['pks_cluster_name']
        else:
            qualified_cluster_name = self._append_user_id(cluster_name)

        result = {}
        cluster_api = ClusterApiV1(api_client=self.client_v1)
        LOGGER.debug(f"Sending request to PKS: {self.pks_host_uri} to delete "
                     f"the cluster with name: {qualified_cluster_name}")
        try:
            cluster_api.delete_cluster(cluster_name=qualified_cluster_name)
        except v1Exception as err:
            LOGGER.debug(f"Deleting cluster {qualified_cluster_name} failed"
                         f" with error:\n {err}")
            raise PksServerError(err.status, err.body)
        LOGGER.debug(f"PKS: {self.pks_host_uri} accepted the request to delete"
                     f" the cluster: {qualified_cluster_name}")
        result['name'] = qualified_cluster_name
        result['task_status'] = 'in progress'

        # remove cluster network isolation
        LOGGER.debug(f"Removing network isolation of cluster {cluster_name}.")
        try:
            cluster_network_isolater = ClusterNetworkIsolater(self.nsxt_client)
            cluster_network_isolater.remove_cluster_isolation(
                qualified_cluster_name)
        except Exception as err:
            # NSX-T oprations are idempotent so they should not cause erros
            # if say NSGroup is missing. But for any other exception, simply
            # catch them and ignore.
            LOGGER.debug(f"Error {err} occured while deleting cluster "
                         f"isolation rules for cluster {cluster_name}")

        self._restore_original_name(result)
        self._filter_pks_properties(result)
        return result
    def resize_cluster(self, data):
        """Resize the cluster of a given name to given number of worker nodes.

        System administrator can resize the given cluster regardless of
        who is the owner of the cluster. Other users can only resize
        the cluster they own.


        :return: response status

        :rtype: dict

        """
        cluster_name = data[RequestKey.CLUSTER_NAME]
        num_workers = data[RequestKey.NUM_WORKERS]

        if self.tenant_client.is_sysadmin() \
                or is_org_admin(self.client_session):
            cluster_info = self.get_cluster_info(data)
            qualified_cluster_name = cluster_info['pks_cluster_name']
        else:
            qualified_cluster_name = self._append_user_id(cluster_name)

        self._check_cluster_isolation(cluster_name, qualified_cluster_name)

        result = {}
        cluster_api = ClusterApiV1(api_client=self.client_v1)
        LOGGER.debug(f"Sending request to PKS:{self.pks_host_uri} to resize "
                     f"the cluster with name: {qualified_cluster_name} to "
                     f"{num_workers} worker nodes")
        resize_params = \
            UpdateClusterParameters(kubernetes_worker_instances=num_workers)
        try:
            cluster_api.update_cluster(qualified_cluster_name,
                                       body=resize_params)
        except v1Exception as err:
            LOGGER.debug(f"Resizing cluster {qualified_cluster_name} failed"
                         f" with error:\n {err}")
            raise PksServerError(err.status, err.body)
        LOGGER.debug(f"PKS: {self.pks_host_uri} accepted the request to resize"
                     f" the cluster: {qualified_cluster_name}")

        result['name'] = qualified_cluster_name
        result['task_status'] = 'in progress'
        self._restore_original_name(result)
        self._filter_pks_properties(result)
        return result
    def _get_cluster_config(self, cluster_name):
        """Get the configuration of the cluster with the given name in PKS.

        :param str cluster_name: Name of the cluster
        :return: Configuration of the cluster.

        :rtype: str
        """
        cluster_api = ClusterApiV1(api_client=self.client_v1)

        LOGGER.debug(f"Sending request to PKS: {self.pks_host_uri} to get"
                     f" detailed configuration of cluster with name: "
                     f"{cluster_name}")
        config = cluster_api.create_user(cluster_name=cluster_name)

        LOGGER.debug(f"Received response from PKS: {self.pks_host_uri} on "
                     f"cluster: {cluster_name} with details: {config}")
        cluster_config = yaml.safe_dump(config, default_flow_style=False)
        return cluster_config
    def _list_clusters(self):
        """Get list of clusters in PKS environment.

        :return: a list of cluster-dictionaries

        :rtype: list
        """
        cluster_api = ClusterApiV1(api_client=self.client_v1)

        LOGGER.debug(f"Sending request to PKS: {self.pks_host_uri} "
                     f"to list all clusters")
        try:
            clusters = cluster_api.list_clusters()
        except v1Exception as err:
            LOGGER.debug(f"Listing PKS clusters failed with error:\n {err}")
            raise PksServerError(err.status, err.body)

        list_of_cluster_dicts = []
        for cluster in clusters:
            # TODO() Below is a temporary fix to retrieve compute_profile_name.
            #  Expensive _get_cluster_info() call must be removed once PKS team
            #  moves list_clusters to v1beta endpoint.
            v1_beta_cluster = self._get_cluster_info(cluster_name=cluster.name)
            v1_beta_cluster[K8S_PROVIDER_KEY] = K8sProvider.PKS
            # cluster_dict = {
            #     'name': cluster.name,
            #     'plan_name': cluster.plan_name,
            #     'uuid': cluster.uuid,
            #     'status': cluster.last_action_state,
            #     'last_action': cluster.last_action,
            #     'k8_master_ips': cluster.kubernetes_master_ips,
            #     'compute_profile_name': cluster.compute_profile_name,
            #     'worker_count':
            #     cluster.parameters.kubernetes_worker_instances
            # }
            # list_of_cluster_dicts.append(cluster_dict)
            list_of_cluster_dicts.append(v1_beta_cluster)

        LOGGER.debug(f"Received response from PKS: {self.pks_host_uri} on the"
                     f" list of clusters: {list_of_cluster_dicts}")
        return list_of_cluster_dicts
    def _delete_cluster(self, cluster_name):
        """Delete the cluster with a given name in PKS environment.

        Also deletes associated NSX-T Distributed Firewall rules that kept the
        cluster network isolated from other clusters.

        :param str cluster_name: Name of the cluster
        """
        result = {}

        cluster_api = ClusterApiV1(api_client=self.client_v1)

        LOGGER.debug(f"Sending request to PKS: {self.pks_host_uri} to delete "
                     f"the cluster with name: {cluster_name}")
        try:
            cluster_api.delete_cluster(cluster_name=cluster_name)
        except v1Exception as err:
            LOGGER.debug(f"Deleting cluster {cluster_name} failed with "
                         f"error:\n {err}")
            raise PksServerError(err.status, err.body)

        LOGGER.debug(f"PKS: {self.pks_host_uri} accepted the request to delete"
                     f" the cluster: {cluster_name}")

        # remove cluster network isolation
        try:
            LOGGER.debug("Removing network isolation of cluster "
                         f"{cluster_name}.")
            cluster_network_isolater = ClusterNetworkIsolater(self.nsxt_client)
            cluster_network_isolater.remove_cluster_isolation(cluster_name)
        except Exception:
            # NSX-T oprations are idempotent so they should not cause erros
            # if say NSGroup is missing. But for any other exception, simply
            # them and ignore.
            pass

        result['cluster_name'] = cluster_name
        result['task_status'] = 'in progress'
        return result