def delete_cluster(self, data):
        """Start the delete cluster operation.

        Common broker function that validates data for 'delete cluster'
        operation. Deleting nodes is an asynchronous task, so the returned
        `result['task_href']` can be polled to get updates on task progress.

        Required data: cluster_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [RequestKey.CLUSTER_NAME]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {RequestKey.ORG_NAME: None, RequestKey.OVDC_NAME: None}
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]

        cluster = get_cluster(self.tenant_client,
                              cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])
        cluster_id = cluster['cluster_id']
        # must _update_task here or else self.task_resource is None
        # do not logout of sys admin, or else in pyvcloud's session.request()
        # call, session becomes None
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Deleting cluster {cluster_name} ({cluster_id})")
        self._delete_cluster_async(cluster_name=cluster_name,
                                   cluster_vdc_href=cluster['vdc_href'])

        return {
            'cluster_name': cluster_name,
            'task_href': self.task_resource.get('href')
        }
    def get_node_info(self, data):
        """Get node metadata as dictionary.

        Required data: cluster_name, node_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.NODE_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        node_name = validated_data[RequestKey.NODE_NAME]

        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])

        vapp = VApp(self.tenant_client, href=cluster['vapp_href'])
        vms = vapp.get_all_vms()
        node_info = None
        for vm in vms:
            vm_name = vm.get('name')
            if node_name != vm_name:
                continue

            node_info = {
                'name': vm_name,
                'numberOfCpus': '',
                'memoryMB': '',
                'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))),
                'ipAddress': ''
            }
            if hasattr(vm, 'VmSpecSection'):
                node_info['numberOfCpus'] = vm.VmSpecSection.NumCpus.text
                node_info['memoryMB'] = vm.VmSpecSection.MemoryResourceMb.Configured.text # noqa: E501
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm_name)
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node {vm_name}")
            if vm_name.startswith(NodeType.MASTER):
                node_info['node_type'] = 'master'
            elif vm_name.startswith(NodeType.WORKER):
                node_info['node_type'] = 'worker'
            elif vm_name.startswith(NodeType.NFS):
                node_info['node_type'] = 'nfs'
                node_info['exports'] = self._get_nfs_exports(node_info['ipAddress'], vapp, vm_name) # noqa: E501
        if node_info is None:
            raise NodeNotFoundError(f"Node '{node_name}' not found in "
                                    f"cluster '{cluster_name}'")
        return node_info
예제 #3
0
def ovdc_info(request_data, tenant_auth_token):
    """Request handler for ovdc info operation.

    Required data: org_name, ovdc_name

    :return: Dictionary with org VDC k8s provider metadata.
    """
    required = [RequestKey.OVDC_ID]
    utils.ensure_keys_in_dict(required, request_data, dict_name='data')

    return ovdc_utils.get_ovdc_k8s_provider_metadata(
        ovdc_id=request_data[RequestKey.OVDC_ID])
def ovdc_compute_policy_list(request_data, tenant_auth_token):
    """Request handler for ovdc compute-policy list operation.

    Required data: ovdc_id

    :return: Dictionary with task href.
    """
    required = [RequestKey.OVDC_ID]
    utils.ensure_keys_in_dict(required, request_data, dict_name='data')
    client, _ = vcd_utils.connect_vcd_user_via_token(tenant_auth_token)

    cpm = ComputePolicyManager(client)
    return cpm.list_compute_policies_on_vdc(request_data[RequestKey.OVDC_ID])
    def delete_nodes(self, data):
        """Start the delete nodes operation.

        Validates data for the 'delete nodes' operation. Deleting nodes is an
        asynchronous task, so the returned `result['task_href']` can be polled
        to get updates on task progress.

        Required data: cluster_name, node_names_list
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.NODE_NAMES_LIST
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        node_names_list = validated_data[RequestKey.NODE_NAMES_LIST]
        # check that there are nodes to delete
        if len(node_names_list) == 0:
            LOGGER.debug("No nodes specified to delete")
            return {'body': {}}
        # check that master node is not in specified nodes
        for node in node_names_list:
            if node.startswith(NodeType.MASTER):
                raise CseServerError(f"Can't delete a master node: '{node}'.")

        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])
        cluster_id = cluster['cluster_id']
        # must _update_task here or else self.task_resource is None
        # do not logout of sys admin, or else in pyvcloud's session.request()
        # call, session becomes None
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Deleting {len(node_names_list)} node(s)"
                    f" from cluster {cluster_name}({cluster_id})")
        self._delete_nodes_async(
            cluster_name=cluster_name,
            cluster_vapp_href=cluster['vapp_href'],
            node_names_list=validated_data[RequestKey.NODE_NAMES_LIST])

        return {
            'cluster_name': cluster_name,
            'task_href': self.task_resource.get('href')
        }
    def resize_cluster(self, data):
        """Start the resize cluster operation.

        Common broker function that validates data for the 'resize cluster'
        operation. Native clusters cannot be resized down. Creating nodes is an
        asynchronous task, so the returned `result['task_href']` can be polled
        to get updates on task progress.

        Required data: cluster_name, network, num_nodes
        Optional data and default values: org_name=None, ovdc_name=None,
            rollback=True, template_name=None, template_revision=None
        """
        # TODO default template for resizing should be master's template
        required = [
            RequestKey.CLUSTER_NAME, RequestKey.NUM_WORKERS,
            RequestKey.NETWORK_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None,
            RequestKey.ROLLBACK: True,
            RequestKey.TEMPLATE_NAME: None,
            RequestKey.TEMPLATE_REVISION: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        num_workers_wanted = validated_data[RequestKey.NUM_WORKERS]
        if num_workers_wanted < 1:
            raise CseServerError(f"Worker node count must be > 0 "
                                 f"(received {num_workers_wanted}).")

        # cluster_handler.py already makes a cluster info API call to vCD, but
        # that call does not return any node info, so this additional
        # cluster info call must be made
        cluster_info = self.get_cluster_info(validated_data)
        num_workers = len(cluster_info['nodes'])
        if num_workers > num_workers_wanted:
            raise CseServerError(f"Automatic scale down is not supported for "
                                 f"vCD powered Kubernetes clusters. Use "
                                 f"'vcd cse delete node' command.")
        elif num_workers == num_workers_wanted:
            raise CseServerError(f"Cluster '{cluster_name}' already has "
                                 f"{num_workers} worker nodes.")

        validated_data[
            RequestKey.
            NUM_WORKERS] = num_workers_wanted - num_workers  # noqa: E501
        return self.create_nodes(validated_data)
    def get_cluster_config(self, data):
        """Get the cluster's kube config contents.

        Common broker function that validates data for 'cluster config'
        operation and returns the cluster's kube config file contents
        as a string.

        Required data: cluster_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}

        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])
        vapp = VApp(self.tenant_client, href=cluster['vapp_href'])
        node_names = get_node_names(vapp, NodeType.MASTER)

        all_results = []
        try:
            for node_name in node_names:
                LOGGER.debug(f"getting file from node {node_name}")
                password = vapp.get_admin_password(node_name)
                vs = vs_utils.get_vsphere(self.sys_admin_client, vapp,
                                          vm_name=node_name, logger=LOGGER)
                vs.connect()
                moid = vapp.get_vm_moid(node_name)
                vm = vs.get_vm_by_moid(moid)
                filename = '/root/.kube/config'
                result = vs.download_file_from_guest(vm, 'root',
                                                     password,
                                                     filename)
                all_results.append(result)
        finally:
            self.logout_sys_admin_client()

        if len(all_results) == 0 or all_results[0].status_code != requests.codes.ok: # noqa: E501
            raise ClusterOperationError("Couldn't get cluster configuration")
        return all_results[0].content.decode()
    def create_cluster(self, data):
        """Create cluster in PKS environment.

        To retain the user context, user-id of the logged-in user is appended
        to the original cluster name before the actual cluster creation.

        :param dict cluster_spec: named parameters necessary to create
        cluster (cluster_name, node_count, pks_plan, pks_ext_host, compute-
        profile_name)

        :return: Details of the cluster

        :rtype: dict
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.PKS_PLAN_NAME,
            'pks_ext_host',  # noqa: E501 TODO this should not be part of the request spec
            RequestKey.ORG_NAME,
            RequestKey.OVDC_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')

        cluster_name = data[RequestKey.CLUSTER_NAME]
        qualified_cluster_name = self._append_user_id(cluster_name)
        data[RequestKey.CLUSTER_NAME] = qualified_cluster_name

        if not self.nsxt_server:
            raise CseServerError(
                "NSX-T server details not found for PKS server selected for "
                f"cluster : {cluster_name}. Aborting creation of cluster.")

        # this needs to be refactored
        cluster_info = self._create_cluster(
            cluster_name=data[RequestKey.CLUSTER_NAME],
            num_workers=data[RequestKey.NUM_WORKERS],
            pks_plan_name=data[RequestKey.PKS_PLAN_NAME],
            pks_ext_host=data['pks_ext_host'])

        self._isolate_cluster(cluster_name, qualified_cluster_name,
                              cluster_info.get('uuid'))

        self._restore_original_name(cluster_info)
        if not self.tenant_client.is_sysadmin():
            self._filter_pks_properties(cluster_info)

        return cluster_info
def cluster_create(request_data, tenant_auth_token):
    """Request handler for cluster create operation.

    Required data: org_name, ovdc_name, cluster_name
    Conditional data and default values:
        if k8s_provider is 'native':
            network_name, num_nodes=2, num_cpu=None, mb_memory=None,
            storage_profile_name=None, template_name=default,
            template_revision=default, ssh_key_filepath=None, enable_nfs=False,
            rollback=True

    (data validation handled in brokers)

    :return: Dict
    """
    required = [
        RequestKey.CLUSTER_NAME
    ]
    utils.ensure_keys_in_dict(required, request_data, dict_name='data')
    cluster_name = request_data[RequestKey.CLUSTER_NAME]
    # TODO HACK 'is_org_admin_search' is used here to prevent users from
    # creating clusters with the same name, including clusters in PKS
    # True means that the cluster list is filtered by the org name of
    # the logged-in user to check that there are no duplicate clusters
    request_data['is_org_admin_search'] = True

    try:
        broker_manager.get_cluster_and_broker(request_data, tenant_auth_token)
        raise ClusterAlreadyExistsError(f"Cluster {cluster_name} "
                                        f"already exists.")
    except ClusterNotFoundError:
        pass

    k8s_metadata = \
        ovdc_utils.get_ovdc_k8s_provider_metadata(
            org_name=request_data[RequestKey.ORG_NAME],
            ovdc_name=request_data[RequestKey.OVDC_NAME],
            include_credentials=True,
            include_nsxt_info=True)
    if k8s_metadata.get(K8S_PROVIDER_KEY) == K8sProvider.PKS:
        request_data[RequestKey.PKS_PLAN_NAME] = k8s_metadata[PKS_PLANS_KEY][0]
        request_data['pks_ext_host'] = \
            f"{cluster_name}.{k8s_metadata[PKS_CLUSTER_DOMAIN_KEY]}"
    broker = broker_manager.get_broker_from_k8s_metadata(k8s_metadata,
                                                         tenant_auth_token)
    return broker.create_cluster(request_data)
예제 #10
0
def ovdc_compute_policy_update(request_data, tenant_auth_token):
    """Request handler for ovdc compute-policy update operation.

    Required data: ovdc_id, compute_policy_action, compute_policy_names

    :return: Dictionary with task href.
    """
    required = [
        RequestKey.OVDC_ID, RequestKey.COMPUTE_POLICY_ACTION,
        RequestKey.COMPUTE_POLICY_NAME
    ]
    utils.ensure_keys_in_dict(required, request_data, dict_name='data')
    client, _ = vcd_utils.connect_vcd_user_via_token(tenant_auth_token)
    action = request_data[RequestKey.COMPUTE_POLICY_ACTION]
    cp_name = request_data[RequestKey.COMPUTE_POLICY_NAME]
    ovdc_id = request_data[RequestKey.OVDC_ID]

    cpm = ComputePolicyManager(client)
    cp = cpm.get_policy(cp_name)
    if cp is None:
        raise ValueError(f"Compute policy '{cp_name}' does not exist")
    cp_href = cp['href']

    if action == ComputePolicyAction.ADD:
        cpm.add_compute_policy_to_vdc(cp_href, vdc_id=ovdc_id)
        return f"Added compute policy '{cp_name}' to ovdc '{ovdc_id}'"

    if action == ComputePolicyAction.REMOVE:
        try:
            cpm.remove_compute_policy_from_vdc(cp_href, vdc_id=ovdc_id)
        except EntityNotFoundException:
            raise EntityNotFoundException(f"Compute policy '{cp_name}' not "
                                          f"found in ovdc '{ovdc_id}'")
        except Exception:
            # This ensures that BadRequestException message is printed
            # to console. (error when ovdc currently has VMs/vApp
            # templates with the compute policy reference, so compute policy
            # cannot be removed)
            raise Exception(f"Could not remove compute policy '{cp_name}' "
                            f"from ovdc '{ovdc_id}' (check that no vApp"
                            f" templates or VMs currently contain a reference"
                            f" to the compute policy).")
        return f"Removed compute policy '{cp_name}' from ovdc '{ovdc_id}'"

    raise ValueError("Unsupported compute policy action")
    def get_cluster_info(self, data):
        """Get cluster metadata as well as node data.

        Common broker function that validates data for the 'cluster info'
        operation and returns cluster/node metadata as dictionary.

        Required data: cluster_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])

        cluster[K8S_PROVIDER_KEY] = K8sProvider.NATIVE
        vapp = VApp(self.tenant_client, href=cluster['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {
                'name': vm.get('name'),
                'ipAddress': ''
            }
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name'))
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node "
                             f"{vm.get('name')}")
            if vm.get('name').startswith(NodeType.MASTER):
                cluster.get('master_nodes').append(node_info)
            elif vm.get('name').startswith(NodeType.WORKER):
                cluster.get('nodes').append(node_info)
            elif vm.get('name').startswith(NodeType.NFS):
                cluster.get('nfs_nodes').append(node_info)

        return cluster
예제 #12
0
def ovdc_update(request_data, tenant_auth_token):
    """Request handler for ovdc enable, disable operations.

    Required data: org_name, ovdc_name, k8s_provider
    Conditional data:
        if k8s_provider is 'ent-pks': pks_plan_name, pks_cluster_domain

    :return: Dictionary with org VDC update task href.
    """
    # TODO the data flow here should be better understood.
    # org_name and ovdc_name seem redundant if we already have ovdc_id
    required = [
        RequestKey.ORG_NAME, RequestKey.OVDC_NAME, RequestKey.K8S_PROVIDER,
        RequestKey.OVDC_ID
    ]
    utils.ensure_keys_in_dict(required, request_data, dict_name='data')
    validated_data = request_data

    k8s_provider = validated_data[RequestKey.K8S_PROVIDER]
    k8s_provider_info = {K8S_PROVIDER_KEY: k8s_provider}

    if k8s_provider == K8sProvider.PKS:
        if not utils.is_pks_enabled():
            raise CseServerError('CSE is not configured to work with PKS.')
        required = [RequestKey.PKS_PLAN_NAME, RequestKey.PKS_CLUSTER_DOMAIN]
        utils.ensure_keys_in_dict(required, validated_data, dict_name='data')

        k8s_provider_info = ovdc_utils.construct_k8s_metadata_from_pks_cache(
            ovdc_id=validated_data[RequestKey.OVDC_ID],
            org_name=validated_data[RequestKey.ORG_NAME],
            pks_plans=validated_data[RequestKey.PKS_PLAN_NAME],
            pks_cluster_domain=validated_data[RequestKey.PKS_CLUSTER_DOMAIN],
            k8s_provider=k8s_provider)
        ovdc_utils.create_pks_compute_profile(k8s_provider_info,
                                              tenant_auth_token,
                                              validated_data)

    task = ovdc_utils.update_ovdc_k8s_provider_metadata(
        ovdc_id=validated_data[RequestKey.OVDC_ID],
        k8s_provider_data=k8s_provider_info,
        k8s_provider=k8s_provider)

    return {'task_href': task.get('href')}
    def create_nodes(self, data):
        """Start the create nodes operation.

        Validates data for 'node create' operation. Creating nodes is an
        asynchronous task, so the returned `result['task_href']` can be polled
        to get updates on task progress.

        Required data: cluster_name, network_name
        Optional data and default values: num_nodes=2, num_cpu=None,
            mb_memory=None, storage_profile_name=None, ssh_key_filepath=None,
            template_name=default, template_revision=default, enable_nfs=False,
            rollback=True
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.NETWORK_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        cluster_name = data[RequestKey.CLUSTER_NAME]
        # check that requested/default template is valid
        template = get_template(
            name=data.get(RequestKey.TEMPLATE_NAME),
            revision=data.get(RequestKey.TEMPLATE_REVISION))
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None,
            RequestKey.NUM_WORKERS: 1,
            RequestKey.NUM_CPU: None,
            RequestKey.MB_MEMORY: None,
            RequestKey.STORAGE_PROFILE_NAME: None,
            RequestKey.SSH_KEY_FILEPATH: None,
            RequestKey.TEMPLATE_NAME: template[LocalTemplateKey.NAME],
            RequestKey.TEMPLATE_REVISION: template[LocalTemplateKey.REVISION],
            RequestKey.ENABLE_NFS: False,
            RequestKey.ROLLBACK: True,
        }
        validated_data = {**defaults, **data}

        # TODO HACK default dictionary combining needs to be fixed
        validated_data[RequestKey.TEMPLATE_NAME] = validated_data[RequestKey.TEMPLATE_NAME] or template[LocalTemplateKey.NAME] # noqa: E501
        validated_data[RequestKey.TEMPLATE_REVISION] = validated_data[RequestKey.TEMPLATE_REVISION] or template[LocalTemplateKey.REVISION] # noqa: E501

        template_name = validated_data[RequestKey.TEMPLATE_NAME]
        template_revision = validated_data[RequestKey.TEMPLATE_REVISION]

        num_workers = validated_data[RequestKey.NUM_WORKERS]
        if num_workers < 1:
            raise CseServerError(f"Worker node count must be > 0 "
                                 f"(received {num_workers}).")

        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])
        cluster_id = cluster['cluster_id']
        # must _update_task here or else self.task_resource is None
        # do not logout of sys admin, or else in pyvcloud's session.request()
        # call, session becomes None
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Creating {num_workers} node(s) from template "
                    f"'{template_name}' (revision {template_revision}) and "
                    f"adding to {cluster_name} ({cluster_id})")
        self._create_nodes_async(
            cluster_name=cluster_name,
            cluster_vdc_href=cluster['vdc_href'],
            cluster_vapp_href=cluster['vapp_href'],
            cluster_id=cluster_id,
            template_name=template_name,
            template_revision=template_revision,
            num_workers=validated_data[RequestKey.NUM_WORKERS],
            network_name=validated_data[RequestKey.NETWORK_NAME],
            num_cpu=validated_data[RequestKey.NUM_CPU],
            mb_memory=validated_data[RequestKey.MB_MEMORY],
            storage_profile_name=validated_data[RequestKey.STORAGE_PROFILE_NAME], # noqa: E501
            ssh_key_filepath=validated_data[RequestKey.SSH_KEY_FILEPATH],
            enable_nfs=validated_data[RequestKey.ENABLE_NFS],
            rollback=validated_data[RequestKey.ROLLBACK])

        return {
            'cluster_name': cluster_name,
            'task_href': self.task_resource.get('href')
        }
    def create_cluster(self, data):
        """Start the cluster creation operation.

        Common broker function that validates data for the 'create cluster'
        operation and returns a dictionary with cluster detail and task
        information. Calls the asyncronous cluster create function that
        actually performs the work. The returned `result['task_href']` can
        be polled to get updates on task progress.

        Required data: cluster_name, org_name, ovdc_name, network_name
        Optional data and default values: num_nodes=2, num_cpu=None,
            mb_memory=None, storage_profile_name=None, ssh_key_filepath=None,
            template_name=default, template_revision=default, enable_nfs=False,
            rollback=True
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.ORG_NAME,
            RequestKey.OVDC_NAME,
            RequestKey.NETWORK_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        cluster_name = data[RequestKey.CLUSTER_NAME]
        # check that cluster name is syntactically valid
        if not is_valid_cluster_name(cluster_name):
            raise CseServerError(f"Invalid cluster name '{cluster_name}'")
        # check that cluster name doesn't already exist
        try:
            get_cluster(self.tenant_client, cluster_name,
                        org_name=data[RequestKey.ORG_NAME],
                        ovdc_name=data[RequestKey.OVDC_NAME])
            raise ClusterAlreadyExistsError(f"Cluster {cluster_name} "
                                            f"already exists.")
        except ClusterNotFoundError:
            pass
        # check that requested/default template is valid
        template = get_template(
            name=data.get(RequestKey.TEMPLATE_NAME),
            revision=data.get(RequestKey.TEMPLATE_REVISION))
        defaults = {
            RequestKey.NUM_WORKERS: 2,
            RequestKey.NUM_CPU: None,
            RequestKey.MB_MEMORY: None,
            RequestKey.STORAGE_PROFILE_NAME: None,
            RequestKey.SSH_KEY_FILEPATH: None,
            RequestKey.TEMPLATE_NAME: template[LocalTemplateKey.NAME],
            RequestKey.TEMPLATE_REVISION: template[LocalTemplateKey.REVISION],
            RequestKey.ENABLE_NFS: False,
            RequestKey.ROLLBACK: True,
        }
        validated_data = {**defaults, **data}

        # TODO HACK default dictionary combining needs to be fixed
        validated_data[RequestKey.TEMPLATE_NAME] = validated_data[RequestKey.TEMPLATE_NAME] or template[LocalTemplateKey.NAME] # noqa: E501
        validated_data[RequestKey.TEMPLATE_REVISION] = validated_data[RequestKey.TEMPLATE_REVISION] or template[LocalTemplateKey.REVISION] # noqa: E501

        template_name = validated_data[RequestKey.TEMPLATE_NAME]
        template_revision = validated_data[RequestKey.TEMPLATE_REVISION]

        # check that requested number of worker nodes is at least more than 1
        num_workers = validated_data[RequestKey.NUM_WORKERS]
        if num_workers < 1:
            raise CseServerError(f"Worker node count must be > 0 "
                                 f"(received {num_workers}).")

        cluster_id = str(uuid.uuid4())
        # must _update_task or else self.task_resource is None
        # do not logout of sys admin, or else in pyvcloud's session.request()
        # call, session becomes None
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Creating cluster vApp '{cluster_name}' ({cluster_id})"
                    f" from template '{template_name}' "
                    f"(revision {template_revision})")
        self._create_cluster_async(
            org_name=validated_data[RequestKey.ORG_NAME],
            ovdc_name=validated_data[RequestKey.OVDC_NAME],
            cluster_name=cluster_name,
            cluster_id=cluster_id,
            template_name=template_name,
            template_revision=template_revision,
            num_workers=validated_data[RequestKey.NUM_WORKERS],
            network_name=validated_data[RequestKey.NETWORK_NAME],
            num_cpu=validated_data[RequestKey.NUM_CPU],
            mb_memory=validated_data[RequestKey.MB_MEMORY],
            storage_profile_name=validated_data[RequestKey.STORAGE_PROFILE_NAME], # noqa: E501
            ssh_key_filepath=validated_data[RequestKey.SSH_KEY_FILEPATH],
            enable_nfs=validated_data[RequestKey.ENABLE_NFS],
            rollback=validated_data[RequestKey.ROLLBACK])

        return {
            'name': cluster_name,
            'cluster_id': cluster_id,
            'task_href': self.task_resource.get('href')
        }
def ovdc_compute_policy_update(request_data, tenant_auth_token):
    """Request handler for ovdc compute-policy update operation.

    Required data: ovdc_id, compute_policy_action, compute_policy_names

    :return: Dictionary with task href.
    """
    required = [
        RequestKey.OVDC_ID, RequestKey.COMPUTE_POLICY_ACTION,
        RequestKey.COMPUTE_POLICY_NAME
    ]
    utils.ensure_keys_in_dict(required, request_data, dict_name='data')
    defaults = {
        RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS: False,
    }
    validated_data = {**defaults, **request_data}
    utils.ensure_keys_in_dict(required, validated_data, dict_name='data')
    action = validated_data[RequestKey.COMPUTE_POLICY_ACTION]
    cp_name = validated_data[RequestKey.COMPUTE_POLICY_NAME]
    ovdc_id = validated_data[RequestKey.OVDC_ID]
    remove_compute_policy_from_vms = validated_data[
        RequestKey.REMOVE_COMPUTE_POLICY_FROM_VMS]  # noqa: E501

    client, _ = vcd_utils.connect_vcd_user_via_token(tenant_auth_token)

    cpm = ComputePolicyManager(client)
    cp_href = None
    cp_id = None
    for _cp in cpm.list_compute_policies_on_vdc(ovdc_id):
        if _cp['name'] == cp_name:
            cp_href = _cp['href']
            cp_id = _cp['id']
    if cp_href is None:
        raise ValueError(f"Compute policy '{cp_name}' not found.")

    if action == ComputePolicyAction.ADD:
        cpm.add_compute_policy_to_vdc(ovdc_id, cp_href)
        return f"Added compute policy '{cp_name}' ({cp_id}) to ovdc " \
               f"({ovdc_id})"

    if action == ComputePolicyAction.REMOVE:
        try:
            cpm.remove_compute_policy_from_vdc(
                ovdc_id,
                cp_href,
                remove_compute_policy_from_vms=remove_compute_policy_from_vms)
        except EntityNotFoundException:
            raise EntityNotFoundException(
                f"Compute policy '{cp_name}' not found in ovdc ({ovdc_id})")
        except Exception:
            # This ensures that BadRequestException message is printed
            # to console. (error when ovdc currently has VMs/vApp
            # templates with the compute policy reference, so compute policy
            # cannot be removed)
            msg = f"Could not remove compute policy '{cp_name}' " \
                  f"({cp_id}) from ovdc ({ovdc_id})"
            LOGGER.error(msg, exc_info=True)
            raise Exception(
                f"{msg} (check that no vApp templates or VMs currently "
                f"contain a reference to the compute policy)")
        return f"Removed compute policy '{cp_name}' ({cp_id}) " \
               f"from ovdc ({ovdc_id})"

    raise ValueError("Unsupported compute policy action")