def create_nodes(self):
     result = {'body': {}}
     self.cluster_name = self.body['name']
     LOGGER.debug(f"About to add {self.body['node_count']} nodes to cluster"
                  " {self.cluster_name} on VDC {self.body['vdc']}, "
                  "sp={self.body['storage_profile']}")
     if self.body['node_count'] < 1:
         raise CseServerError(f"Invalid node count: "
                              f"{self.body['node_count']}.")
     self._connect_tenant()
     self._connect_sys_admin()
     clusters = load_from_metadata(self.tenant_client,
                                   name=self.cluster_name)
     if len(clusters) != 1:
         raise CseServerError('Cluster \'%s\' not found.' %
                              self.cluster_name)
     self.cluster = clusters[0]
     self.op = OP_CREATE_NODES
     self.cluster_id = self.cluster['cluster_id']
     self.update_task(
         TaskStatus.RUNNING,
         message=f"Adding {self.body['node_count']} node(s) to cluster "
         "{self.cluster_name}({self.cluster_id})")
     self.daemon = True
     self.start()
     response_body = {}
     response_body['cluster_name'] = self.cluster_name
     response_body['task_href'] = self.task_resource.get('href')
     result['body'] = response_body
     result['status_code'] = ACCEPTED
     return result
Пример #2
0
    def resize_cluster(self, cluster_name, node_count, curr_cluster_info=None):
        """Resize the cluster of a given name to given number of worker nodes.

        :param str name: Name of the cluster
        :param int node_count: New size of the worker nodes
        (should be greater than the current number).
        :param dict curr_cluster_info: Current properties of the cluster

        :return response: response returned by create_nodes()
        :rtype: dict
        """
        # TODO(resize_cluster) Once VcdBroker.create_nodes() is hooked to
        #  broker_manager, modify this method to return only
        #  response['body'] not the entire response.
        if curr_cluster_info:
            curr_worker_count = len(curr_cluster_info['nodes'])
        else:
            cluster = self.get_cluster_info(cluster_name=cluster_name)
            curr_worker_count = len(cluster['nodes'])

        if curr_worker_count > node_count:
            raise CseServerError(f"Automatic scale down is not supported for "
                                 f"vCD powered Kubernetes clusters. Use "
                                 f"'vcd cse delete node' command.")
        elif curr_worker_count == node_count:
            raise CseServerError(f"Cluster - {cluster_name} is already at the "
                                 f"size of {curr_worker_count}.")

        self.req_spec['node_count'] = node_count - curr_worker_count
        response = self.create_nodes()
        return response
Пример #3
0
    def delete_nodes(self):
        result = {'body': {}}
        self.cluster_name = self.req_spec['name']
        LOGGER.debug(f"About to delete nodes from cluster with name: "
                     f"{self.req_spec['name']}")

        if len(self.req_spec['nodes']) < 1:
            raise CseServerError(f"Invalid list of nodes: "
                                 f"{self.req_spec['nodes']}.")
        for node in self.req_spec['nodes']:
            if node.startswith(TYPE_MASTER):
                raise CseServerError(f"Can't delete a master node: '{node}'.")
        self._connect_tenant()
        self._connect_sys_admin()
        clusters = load_from_metadata(self.tenant_client,
                                      name=self.cluster_name)
        if len(clusters) != 1:
            raise CseServerError(f"Cluster '{self.cluster_name}' not found.")
        self.cluster = clusters[0]
        self.op = OP_DELETE_NODES
        self.cluster_id = self.cluster['cluster_id']
        self.update_task(
            TaskStatus.RUNNING,
            message=f"Deleting {len(self.req_spec['nodes'])} node(s) from "
            f"cluster {self.cluster_name}({self.cluster_id})")
        self.daemon = True
        self.start()
        response_body = {}
        response_body['cluster_name'] = self.cluster_name
        response_body['task_href'] = self.task_resource.get('href')
        result['body'] = response_body
        result['status_code'] = ACCEPTED
        return result
    def create_cluster(self, cluster_name, vdc_name, node_count,
                       storage_profile, network_name, template, **kwargs):

        # TODO(ClusterSpec) Create an inner class "ClusterSpec"
        #  in abstract_broker.py and have subclasses define and use it
        #  as instance variable.
        #  Method 'Create_cluster' in VcdBroker and PksBroker should take
        #  ClusterParams either as a param (or)
        #  read from instance variable (if needed only).

        if not network_name:
            raise CseServerError(f"Cluster cannot be created. "
                                 f"Please provide a valid value for org "
                                 f"vDC network param.")

        LOGGER.debug(f"About to create cluster {cluster_name} on {vdc_name} "
                     f"with {node_count} nodes, sp={storage_profile}")

        if not self._is_valid_name(cluster_name):
            raise CseServerError(f"Invalid cluster name '{cluster_name}'")
        self._connect_tenant()
        self._connect_sys_admin()
        self.cluster_name = cluster_name
        self.cluster_id = str(uuid.uuid4())
        self.op = OP_CREATE_CLUSTER
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Creating cluster {cluster_name}({self.cluster_id})")
        self.daemon = True
        self.start()
        result = {}
        result['name'] = self.cluster_name
        result['cluster_id'] = self.cluster_id
        result['task_href'] = self.task_resource.get('href')
        return result
 def create_nodes(self, headers, body):
     result = {'body': {}}
     self.cluster_name = body['name']
     LOGGER.debug('about to add %s nodes to cluster %s on VDC %s, sp=%s',
                  body['node_count'], self.cluster_name, body['vdc'],
                  body['storage_profile'])
     if body['node_count'] < 1:
         raise CseServerError('Invalid node count: %s.' %
                              body['node_count'])
     self.tenant_info = self._connect_tenant(headers)
     clusters = load_from_metadata(self.client_tenant,
                                   name=self.cluster_name)
     if len(clusters) != 1:
         raise CseServerError('Cluster \'%s\' not found.' %
                              self.cluster_name)
     self.cluster = clusters[0]
     self.headers = headers
     self.body = body
     self.op = OP_CREATE_NODES
     self._connect_sysadmin()
     self.cluster_id = self.cluster['cluster_id']
     self.update_task(
         TaskStatus.RUNNING,
         message='Adding %s node(s) to cluster %s(%s)' %
         (body['node_count'], self.cluster_name, self.cluster_id))
     self.daemon = True
     self.start()
     response_body = {}
     response_body['cluster_name'] = self.cluster_name
     response_body['task_href'] = self.task_resource.get('href')
     result['body'] = response_body
     result['status_code'] = ACCEPTED
     return result
    def resize_cluster(self, cluster_name, node_count, curr_cluster_info=None):
        """Resize the cluster of a given name to given number of worker nodes.

        :param str name: Name of the cluster
        :param int node_count: New size of the worker nodes
        (should be greater than the current number).
        :param dict curr_cluster_info: Current properties of the cluster

        :return response: response returned by create_nodes()
        :rtype: dict
        """
        if curr_cluster_info:
            curr_worker_count = len(curr_cluster_info['nodes'])
        else:
            cluster = self.get_cluster_info(cluster_name=cluster_name)
            curr_worker_count = len(cluster['nodes'])

        if curr_worker_count > node_count:
            raise CseServerError(f"Automatic scale down is not supported for "
                                 f"vCD powered Kubernetes clusters. Use "
                                 f"'vcd cse delete node' command.")
        elif curr_worker_count == node_count:
            raise CseServerError(f"Cluster - {cluster_name} is already at the "
                                 f"size of {curr_worker_count}.")

        self.req_spec[RequestKey.NUM_WORKERS] = node_count - curr_worker_count
        response = self.create_nodes()
        return response
    def get_node_info(self, cluster_name, node_name, headers):
        """Get the info of a given node in the cluster.

        :param cluster_name: (str): Name of the cluster
        :param node_name: (str): Name of the node
        :param headers: (str): Request headers

        :return: (dict): Info of the node.
        """
        result = {}

        result['body'] = []
        result['status_code'] = OK
        self._connect_tenant(headers)
        clusters = load_from_metadata(self.client_tenant, name=cluster_name)
        if len(clusters) == 0:
            raise CseServerError('Cluster \'%s\' not found.' % cluster_name)
        vapp = VApp(self.client_tenant, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        node_info = None
        for vm in vms:
            if (node_name == vm.get('name')):
                node_info = {
                    'name': vm.get('name'),
                    'numberOfCpus': '',
                    'memoryMB': '',
                    'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))),
                    'ipAddress': ''
                }
                if hasattr(vm, 'VmSpecSection'):
                    node_info['numberOfCpus'] = vm.VmSpecSection.NumCpus.text
                    node_info[
                        'memoryMB'] = \
                        vm.VmSpecSection.MemoryResourceMb.Configured.text
                try:
                    node_info['ipAddress'] = vapp.get_primary_ip(
                        vm.get('name'))
                except Exception:
                    LOGGER.debug('cannot get ip address '
                                 'for node %s' % vm.get('name'))
                if vm.get('name').startswith(TYPE_MASTER):
                    node_info['node_type'] = 'master'
                elif vm.get('name').startswith(TYPE_NODE):
                    node_info['node_type'] = 'node'
                elif vm.get('name').startswith(TYPE_NFS):
                    node_info['node_type'] = 'nfsd'
                    exports = self._get_nfs_exports(node_info['ipAddress'],
                                                    vapp, vm)
                    node_info['exports'] = exports
        if node_info is None:
            raise CseServerError('Node \'%s\' not found in cluster \'%s\'' %
                                 (node_name, cluster_name))
        result['body'] = node_info
        return result
Пример #8
0
    def resize_cluster(self, data):
        """Start the resize cluster operation.

        Common broker function that validates data for the 'resize cluster'
        operation. Native clusters cannot be resized down. Creating nodes is an
        asynchronous task, so the returned `result['task_href']` can be polled
        to get updates on task progress.

        Required data: cluster_name, network, num_nodes
        Optional data and default values: org_name=None, ovdc_name=None,
            rollback=True, template_name=None, template_revision=None
        """
        # TODO default template for resizing should be master's template
        required = [
            RequestKey.CLUSTER_NAME, RequestKey.NUM_WORKERS,
            RequestKey.NETWORK_NAME
        ]
        req_utils.validate_payload(data, required)

        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None,
            RequestKey.ROLLBACK: True,
            RequestKey.TEMPLATE_NAME: None,
            RequestKey.TEMPLATE_REVISION: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        num_workers_wanted = validated_data[RequestKey.NUM_WORKERS]
        if num_workers_wanted < 1:
            raise CseServerError(f"Worker node count must be > 0 "
                                 f"(received {num_workers_wanted}).")

        # cluster_handler.py already makes a cluster info API call to vCD, but
        # that call does not return any node info, so this additional
        # cluster info call must be made
        cluster_info = self.get_cluster_info(validated_data)
        num_workers = len(cluster_info['nodes'])
        if num_workers > num_workers_wanted:
            raise CseServerError(f"Automatic scale down is not supported for "
                                 f"vCD powered Kubernetes clusters. Use "
                                 f"'vcd cse delete node' command.")
        elif num_workers == num_workers_wanted:
            raise CseServerError(f"Cluster '{cluster_name}' already has "
                                 f"{num_workers} worker nodes.")

        validated_data[
            RequestKey.
            NUM_WORKERS] = num_workers_wanted - num_workers  # noqa: E501
        return self.create_nodes(validated_data)
Пример #9
0
def wait_until_ready_to_exec(vs, vm, password, tries=30):
    ready = False
    script = \
"""#!/usr/bin/env bash
uname -a
""" # NOQA
    for n in range(tries):
        try:
            result = vs.execute_script_in_guest(
                vm,
                'root',
                password,
                script,
                target_file=None,
                wait_for_completion=True,
                wait_time=5,
                get_output=True,
                delete_script=True,
                callback=wait_for_guest_execution_callback)
            if result[0] == 0:
                ready = True
                break
            raise Exception(f"script returned {result[0]}")
        except Exception:
            LOGGER.info("VM is not ready to execute scripts, yet")
            time.sleep(2)
    if not ready:
        raise CseServerError('VM is not ready to execute scripts')
    def ovdc_info_for_kubernetes(self):
        """Info on ovdc for k8s deployment on the given container provider.

        :return: result object

        :rtype: dict

        :raises CseServerError: if the user is not system administrator.
        """
        result = dict()
        self._connect_tenant()
        if self.tenant_client.is_sysadmin():
            ovdc_cache = OvdcCache(self.tenant_client)
            metadata = ovdc_cache.get_ovdc_container_provider_metadata(
                self.body.get('ovdc_name', None),
                ovdc_id=self.body.get('ovdc_id', None),
                org_name=self.body.get('org_name', None))
            # remove username, secret from sending to client
            metadata.pop('username', None)
            metadata.pop('secret', None)
            result = dict()
            result['status_code'] = OK
            result['body'] = metadata
            return result
        else:
            raise CseServerError("Unauthorized Operation")
    def enable_ovdc_for_kubernetes(self):
        """Enable ovdc for k8-cluster deployment on given container provider.

        :return: result object

        :rtype: dict

        :raises CseServerError: if the user is not system administrator.
        """
        result = dict()
        self._connect_tenant()
        if self.tenant_client.is_sysadmin():
            ovdc_cache = OvdcCache(self.tenant_client)
            task = ovdc_cache.set_ovdc_container_provider_metadata(
                self.body['ovdc_name'],
                ovdc_id=self.body.get('ovdc_id', None),
                container_provider=self.body.get('container_provider', None),
                pks_plans=self.body['pks_plans'],
                org_name=self.body.get('org_name', None))
            response_body = dict()
            response_body['ovdc_name'] = self.body['ovdc_name']
            response_body['task_href'] = task.get('href')
            result['body'] = response_body
            result['status_code'] = ACCEPTED
            return result
        else:
            raise CseServerError("Unauthorized Operation")
    def delete_cluster(self):
        result = {}
        result['body'] = {}
        LOGGER.debug(f"About to delete cluster with name: {self.body['name']}")

        self.cluster_name = self.body['name']
        self._connect_tenant()
        self._connect_sys_admin()
        self.op = OP_DELETE_CLUSTER
        clusters = load_from_metadata(self.tenant_client,
                                      name=self.cluster_name)
        if len(clusters) != 1:
            raise CseServerError(f"Cluster {self.cluster_name} not found.")
        self.cluster = clusters[0]
        self.cluster_id = self.cluster['cluster_id']
        self.update_task(TaskStatus.RUNNING,
                         message='Deleting cluster %s(%s)' %
                         (self.cluster_name, self.cluster_id))
        self.daemon = True
        self.start()
        response_body = {}
        response_body['cluster_name'] = self.cluster_name
        response_body['task_href'] = self.task_resource.get('href')
        result['body'] = response_body
        result['status_code'] = ACCEPTED
        return result
    def create_cluster(self):
        result = {}
        result['body'] = {}

        cluster_name = self.body['name']
        vdc_name = self.body['vdc']
        node_count = self.body['node_count']
        LOGGER.debug('About to create cluster %s on %s with %s nodes, sp=%s',
                     cluster_name, vdc_name, node_count,
                     self.body['storage_profile'])
        result['body'] = {
            'message': 'can\'t create cluster \'%s\'' % cluster_name
        }

        if not self.is_valid_name(cluster_name):
            raise CseServerError(f"Invalid cluster name \'{cluster_name}\'")
        self._connect_tenant()
        self._connect_sys_admin()
        self.cluster_name = cluster_name
        self.cluster_id = str(uuid.uuid4())
        self.op = OP_CREATE_CLUSTER
        self.update_task(TaskStatus.RUNNING,
                         message='Creating cluster %s(%s)' %
                         (cluster_name, self.cluster_id))
        self.daemon = True
        self.start()
        response_body = {}
        response_body['name'] = self.cluster_name
        response_body['cluster_id'] = self.cluster_id
        response_body['task_href'] = self.task_resource.get('href')
        result['body'] = response_body
        result['status_code'] = ACCEPTED
        return result
    def get_cluster_info(self, name):
        """Get the info of the cluster.

        :param cluster_name: (str): Name of the cluster

        :return: (dict): Info of the cluster.
        """
        result = {}
        result['body'] = []
        result['status_code'] = OK

        self._connect_tenant()
        clusters = load_from_metadata(self.tenant_client, name=name)
        if len(clusters) == 0:
            raise CseServerError('Cluster \'%s\' not found.' % name)
        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {'name': vm.get('name'), 'ipAddress': ''}
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name'))
            except Exception:
                LOGGER.debug('cannot get ip address for node %s' %
                             vm.get('name'))
            if vm.get('name').startswith(TYPE_MASTER):
                clusters[0].get('master_nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NODE):
                clusters[0].get('nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NFS):
                clusters[0].get('nfs_nodes').append(node_info)
        result['body'] = clusters[0]
        return result
Пример #15
0
    def delete_cluster(self, headers, body):
        result = {}
        result['body'] = {}
        LOGGER.debug('about to delete cluster with name: %s' % body['name'])
        result['status_code'] = INTERNAL_SERVER_ERROR

        self.cluster_name = body['name']
        self.tenant_info = self._connect_tenant(headers)
        self.headers = headers
        self.body = body
        self.op = OP_DELETE_CLUSTER
        self._connect_sysadmin()
        clusters = load_from_metadata(self.client_tenant,
                                      name=self.cluster_name)
        if len(clusters) != 1:
            raise CseServerError('Cluster %s not found.' % self.cluster_name)
        self.cluster = clusters[0]
        self.cluster_id = self.cluster['cluster_id']
        self.update_task(TaskStatus.RUNNING,
                         message='Deleting cluster %s(%s)' %
                         (self.cluster_name, self.cluster_id))
        self.daemon = True
        self.start()
        response_body = {}
        response_body['cluster_name'] = self.cluster_name
        response_body['task_href'] = self.task_resource.get('href')
        result['body'] = response_body
        result['status_code'] = ACCEPTED
        return result
    def create_cluster(self, **cluster_spec):
        """Create cluster in PKS environment.

        To retain the user context, user-id of the logged-in user is appended
        to the original cluster name before the actual cluster creation.

        :param dict cluster_spec: named parameters necessary to create
        cluster (cluster_name, node_count, pks_plan, pks_ext_host, compute-
        profile_name)

        :return: Details of the cluster

        :rtype: dict
        """
        cluster_name = cluster_spec['cluster_name']
        qualified_cluster_name = self._append_user_id(cluster_name)
        cluster_spec['cluster_name'] = qualified_cluster_name

        if not self.nsxt_server:
            raise CseServerError(
                "NSX-T server details not found for PKS server selected for "
                f"cluster : {cluster_name}. Aborting creation of cluster.")

        cluster_info = self._create_cluster(**cluster_spec)

        self._isolate_cluster(cluster_name, qualified_cluster_name,
                              cluster_info.get('uuid'))

        self._restore_original_name(cluster_info)
        if not self.tenant_client.is_sysadmin():
            self._filter_pks_properties(cluster_info)

        return cluster_info
Пример #17
0
    def _get_ovdc_params(self):
        ovdc_id = self.req_spec.get('ovdc_id')
        org_name = self.req_spec.get('org_name')
        pks_plans = self.req_spec['pks_plans']
        ovdc = self.ovdc_cache.get_ovdc(ovdc_id=ovdc_id)
        pvdc_id = self.ovdc_cache.get_pvdc_id(ovdc)

        pks_context = None
        if self.req_spec[CONTAINER_PROVIDER_KEY] == CtrProvType.PKS.value:
            if not self.pks_cache:
                raise CseServerError('PKS config file does not exist')
            pvdc_info = self.pks_cache.get_pvdc_info(pvdc_id)
            pks_account_info = self.pks_cache.get_pks_account_info(
                org_name, pvdc_info.vc)
            nsxt_info = self.pks_cache.get_nsxt_info(pvdc_info.vc)

            pks_compute_profile_name = \
                self.ovdc_cache.get_compute_profile_name(
                    ovdc_id, ovdc.resource.get('name'))
            pks_context = OvdcCache.construct_pks_context(
                pks_account_info=pks_account_info,
                pvdc_info=pvdc_info,
                nsxt_info=nsxt_info,
                pks_compute_profile_name=pks_compute_profile_name,
                pks_plans=pks_plans,
                credentials_required=True)

        return pks_context, ovdc
def _wait_until_ready_to_exec(vs, vm, password, tries=30):
    ready = False
    script = "#!/usr/bin/env bash\n" \
             "uname -a\n"
    for _ in range(tries):
        result = vs.execute_script_in_guest(
            vm,
            'root',
            password,
            script,
            target_file=None,
            wait_for_completion=True,
            wait_time=5,
            get_output=True,
            delete_script=True,
            callback=_wait_for_guest_execution_callback)
        if result[0] == 0:
            ready = True
            break
        LOGGER.info(f"Script returned {result[0]}; VM is not "
                    f"ready to execute scripts, yet")
        time.sleep(2)

    if not ready:
        raise CseServerError('VM is not ready to execute scripts')
Пример #19
0
    def get_cluster_info(self, cluster_name):
        """Get the info of the cluster.

        :param cluster_name: (str): Name of the cluster

        :return: (dict): Info of the cluster.
        """
        self._connect_tenant()
        clusters = load_from_metadata(self.tenant_client, name=cluster_name)
        if len(clusters) == 0:
            raise CseServerError(f"Cluster '{cluster_name}' not found.")
        cluster = clusters[0]
        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {'name': vm.get('name'), 'ipAddress': ''}
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name'))
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node "
                             f"{vm.get('name')}")
            if vm.get('name').startswith(TYPE_MASTER):
                cluster.get('master_nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NODE):
                cluster.get('nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NFS):
                cluster.get('nfs_nodes').append(node_info)
        return cluster
Пример #20
0
 def _create_cluster(self, **cluster_spec):
     cluster_name = cluster_spec['cluster_name']
     cluster = self._find_cluster_in_org(cluster_name)[0]
     if not cluster:
         broker = self.get_broker_based_on_vdc()
         return broker.create_cluster(**cluster_spec)
     else:
         raise CseServerError(f'Cluster with name: {cluster_name} '
                              f'already found')
Пример #21
0
 def get_cluster_config(self, cluster_name):
     self._connect_tenant()
     clusters = load_from_metadata(self.tenant_client, name=cluster_name)
     if len(clusters) != 1:
         raise CseServerError(f"Cluster '{cluster_name}' not found")
     vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
     template = self.get_template(name=clusters[0]['template'])
     server_config = get_server_runtime_config()
     result = get_cluster_config(server_config, vapp,
                                 template['admin_password'])
     return result
def construct_ctr_prov_ctx_from_pks_cache(ovdc_id, org_name, pks_plans,
                                          pks_cluster_domain,
                                          container_provider):
    client = None
    try:
        ctr_prov_context = {}
        ctr_prov_context[K8S_PROVIDER_KEY] = container_provider
        if container_provider == K8sProviders.PKS:
            if not is_pks_enabled():
                raise CseServerError('CSE is not configured to work with PKS.')

            client = get_sys_admin_client()
            ovdc = get_vdc(client=client, vdc_id=ovdc_id,
                           is_admin_operation=True)
            pks_cache = get_pks_cache()
            pvdc_id = get_pvdc_id(ovdc)
            pvdc_info = pks_cache.get_pvdc_info(pvdc_id)
            if not pvdc_info:
                LOGGER.debug(f"pvdc '{pvdc_id}' is not backed "
                             f"by PKS-managed-vSphere resources")
                raise CseServerError(f"VDC '{ovdc.get_resource().get('name')}'"
                                     " is not eligible to provide resources"
                                     " for PKS clusters.")
            pks_account_info = pks_cache.get_pks_account_info(
                org_name, pvdc_info.vc)
            nsxt_info = pks_cache.get_nsxt_info(pvdc_info.vc)

            pks_compute_profile_name = \
                _construct_pks_compute_profile_name(ovdc_id)
            ctr_prov_context = construct_pks_context(
                pks_account_info=pks_account_info,
                pvdc_info=pvdc_info,
                nsxt_info=nsxt_info,
                pks_compute_profile_name=pks_compute_profile_name,
                pks_plans=pks_plans,
                pks_cluster_domain=pks_cluster_domain,
                credentials_required=True)
        return ctr_prov_context
    finally:
        if client:
            client.logout()
Пример #23
0
 def get_cluster_config(self, cluster_name, headers):
     result = {}
     self._connect_tenant(headers)
     clusters = load_from_metadata(self.client_tenant, name=cluster_name)
     if len(clusters) != 1:
         raise CseServerError('Cluster \'%s\' not found' % cluster_name)
     vapp = VApp(self.client_tenant, href=clusters[0]['vapp_href'])
     template = self.get_template(name=clusters[0]['template'])
     result['body'] = get_cluster_config(self.config, vapp,
                                         template['admin_password'])
     result['status_code'] = OK
     return result
    def delete_nodes(self):
        result = {'body': {}}
        self.cluster_name = self.req_spec.get(RequestKey.CLUSTER_NAME)
        LOGGER.debug(f"About to delete nodes from cluster with name: "
                     f"{self.req_spec.get(RequestKey.CLUSTER_NAME)}")

        if len(self.req_spec.get(RequestKey.NODE_NAMES_LIST)) < 1:
            raise CseServerError(f"Invalid list of nodes: {self.req_spec.get(RequestKey.NODE_NAMES_LIST)}.") # noqa: E501
        for node in self.req_spec.get(RequestKey.NODE_NAMES_LIST):
            if node.startswith(NodeType.MASTER):
                raise CseServerError(f"Can't delete a master node: '{node}'.")
        self._connect_tenant()
        self._connect_sys_admin()
        clusters = load_from_metadata(
            self.tenant_client, name=self.cluster_name,
            org_name=self.req_spec.get(RequestKey.ORG_NAME),
            vdc_name=self.req_spec.get(RequestKey.OVDC_NAME))
        if len(clusters) <= 0:
            raise CseServerError(f"Cluster '{self.cluster_name}' not found.")

        if len(clusters) > 1:
            raise CseDuplicateClusterError(f"Multiple clusters of name "
                                           f"'{self.cluster_name}' detected.")
        self.cluster = clusters[0]
        self.op = OP_DELETE_NODES
        self.cluster_id = self.cluster['cluster_id']
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Deleting "
                    f"{len(self.req_spec.get(RequestKey.NODE_NAMES_LIST))} "
                    f"node(s) from cluster "
                    f"{self.cluster_name}({self.cluster_id})")
        self.daemon = True
        self.start()
        result = {
            'cluster_name': self.cluster_name,
            'task_href': self.task_resource.get('href')
        }
        return result
    def create_nodes(self):
        self.cluster_name = self.req_spec.get(RequestKey.CLUSTER_NAME)
        LOGGER.debug(f"About to add "
                     f"{self.req_spec.get(RequestKey.NUM_WORKERS)} nodes to "
                     f"cluster {self.cluster_name} on VDC "
                     f"{self.req_spec.get(RequestKey.OVDC_NAME)}")
        if self.req_spec.get(RequestKey.NUM_WORKERS) < 1:
            raise CseServerError(f"Invalid node count: {self.req_spec.get(RequestKey.NUM_WORKERS)}.") # noqa: E501
        if self.req_spec.get(RequestKey.NETWORK_NAME) is None:
            raise CseServerError(f'Network name is missing from the request.')

        self._connect_tenant()
        self._connect_sys_admin()
        clusters = load_from_metadata(
            self.tenant_client, name=self.cluster_name,
            org_name=self.req_spec.get(RequestKey.ORG_NAME),
            vdc_name=self.req_spec.get(RequestKey.OVDC_NAME))

        if len(clusters) > 1:
            raise CseDuplicateClusterError(f"Multiple clusters of name "
                                           f"'{self.cluster_name}' detected.")
        if len(clusters) == 0:
            raise ClusterNotFoundError(
                f"Cluster '{self.cluster_name}' not found.")

        self.cluster = clusters[0]
        self.op = OP_CREATE_NODES
        self.cluster_id = self.cluster['cluster_id']
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Adding {self.req_spec.get(RequestKey.NUM_WORKERS)} "
                    f"node(s) to cluster "
                    f"{self.cluster_name}({self.cluster_id})")
        self.daemon = True
        self.start()
        result = {}
        result['cluster_name'] = self.cluster_name
        result['task_href'] = self.task_resource.get('href')
        return result
    def delete_nodes(self, data):
        """Start the delete nodes operation.

        Validates data for the 'delete nodes' operation. Deleting nodes is an
        asynchronous task, so the returned `result['task_href']` can be polled
        to get updates on task progress.

        Required data: cluster_name, node_names_list
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.NODE_NAMES_LIST
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        node_names_list = validated_data[RequestKey.NODE_NAMES_LIST]
        # check that there are nodes to delete
        if len(node_names_list) == 0:
            LOGGER.debug("No nodes specified to delete")
            return {'body': {}}
        # check that master node is not in specified nodes
        for node in node_names_list:
            if node.startswith(NodeType.MASTER):
                raise CseServerError(f"Can't delete a master node: '{node}'.")

        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])
        cluster_id = cluster['cluster_id']
        # must _update_task here or else self.task_resource is None
        # do not logout of sys admin, or else in pyvcloud's session.request()
        # call, session becomes None
        self._update_task(
            TaskStatus.RUNNING,
            message=f"Deleting {len(node_names_list)} node(s)"
                    f" from cluster {cluster_name}({cluster_id})")
        self._delete_nodes_async(
            cluster_name=cluster_name,
            cluster_vapp_href=cluster['vapp_href'],
            node_names_list=validated_data[RequestKey.NODE_NAMES_LIST])

        return {
            'cluster_name': cluster_name,
            'task_href': self.task_resource.get('href')
        }
Пример #27
0
    def invoke(self, op):
        """Handle ovdc related operations.

        :param CseOperation op: Operation to be performed on the ovdc.

        :return result: result of the operation.

        :rtype: dict
        """
        result = {}

        if op == CseOperation.OVDC_UPDATE:
            ovdc_id = self.req_spec.get(RequestKey.OVDC_ID)
            org_name = self.req_spec.get(RequestKey.ORG_NAME)
            pks_plans = self.req_spec.get(RequestKey.PKS_PLAN_NAME)
            pks_cluster_domain = self.req_spec.get(
                RequestKey.PKS_CLUSTER_DOMAIN)  # noqa: E501
            container_provider = self.req_spec.get(RequestKey.K8S_PROVIDER)

            ctr_prov_ctx = construct_ctr_prov_ctx_from_pks_cache(
                ovdc_id=ovdc_id,
                org_name=org_name,
                pks_plans=pks_plans,
                pks_cluster_domain=pks_cluster_domain,
                container_provider=container_provider)

            if container_provider == K8sProviders.PKS:
                if is_pks_enabled():
                    create_pks_compute_profile(ctr_prov_ctx,
                                               self.tenant_auth_token,
                                               self.req_spec)
                else:
                    raise CseServerError(
                        'CSE is not configured to work with PKS.')

            task = OvdcManager().set_ovdc_container_provider_metadata(
                ovdc_id=ovdc_id,
                container_prov_data=ctr_prov_ctx,
                container_provider=container_provider)

            result = {'task_href': task.get('href')}
        elif op == CseOperation.OVDC_INFO:
            ovdc_id = self.req_spec.get(RequestKey.OVDC_ID)
            result = OvdcManager().get_ovdc_container_provider_metadata(
                ovdc_id=ovdc_id)
        elif op == CseOperation.OVDC_LIST:
            list_pks_plans = str_to_bool(
                self.req_spec.get(RequestKey.LIST_PKS_PLANS))  # noqa: E501
            result = self._list_ovdcs(list_pks_plans=list_pks_plans)

        return result
def _get_broker_from_k8s_metadata(k8s_metadata, op_ctx: ctx.OperationContext):
    """Get broker from ovdc k8s metadata.

    If PKS is not enabled, raise CseServerError
    If PKS is enabled
        if no ovdc metadata exists or k8s provider is None, raise server error
        else return the broker according to ovdc k8s provider
    """
    _raise_error_if_pks_not_enabled()
    if not k8s_metadata or k8s_metadata.get(
            K8S_PROVIDER_KEY) != K8sProvider.PKS:  # noqa: E501
        raise CseServerError("Org VDC is not enabled for Kubernetes "
                             "cluster deployment")
    return PksBroker(k8s_metadata, op_ctx)
 def _create_cluster(self, **cluster_spec):
     cluster_name = cluster_spec['cluster_name']
     cluster = self._find_cluster_in_org(cluster_name)[0]
     if not cluster:
         ctr_prov_ctx = self._get_ctr_prov_ctx_from_ovdc_metadata()
         if ctr_prov_ctx.get(K8S_PROVIDER_KEY) == K8sProviders.PKS:
             cluster_spec['pks_plan'] = ctr_prov_ctx[PKS_PLANS_KEY][0]
             cluster_spec['pks_ext_host'] = f"{cluster_name}." \
                 f"{ctr_prov_ctx[PKS_CLUSTER_DOMAIN_KEY]}"
         broker = self._get_broker_based_on_ctr_prov_ctx(ctr_prov_ctx)
         return broker.create_cluster(**cluster_spec)
     else:
         raise CseServerError(f'Cluster with name: {cluster_name} '
                              f'already found')
    def _get_ovdc_params(self):
        ovdc_id = self.req_spec.get('ovdc_id')
        org_name = self.req_spec.get('org_name')
        pks_plans = self.req_spec['pks_plans']
        pks_cluster_domain = self.req_spec['pks_cluster_domain']
        ovdc = self.ovdc_cache.get_ovdc(ovdc_id=ovdc_id)
        pvdc_id = self.ovdc_cache.get_pvdc_id(ovdc)

        pks_context = None
        if self.req_spec[K8S_PROVIDER_KEY] == K8sProviders.PKS:
            if not self.pks_cache:
                raise CseServerError('PKS config file does not exist')
            pvdc_info = self.pks_cache.get_pvdc_info(pvdc_id)
            if not pvdc_info:
                LOGGER.debug(f"pvdc '{pvdc_id}' is not backed "
                             f"by PKS-managed-vSphere resources")
                raise CseServerError(f"'{ovdc.resource.get('name')}' is not "
                                     f"eligible to provide resources for "
                                     f"PKS clusters. Refer debug logs for more"
                                     f" details.")
            pks_account_info = self.pks_cache.get_pks_account_info(
                org_name, pvdc_info.vc)
            nsxt_info = self.pks_cache.get_nsxt_info(pvdc_info.vc)

            pks_compute_profile_name = \
                self.ovdc_cache.get_compute_profile_name(
                    ovdc_id, ovdc.resource.get('name'))
            pks_context = OvdcCache.construct_pks_context(
                pks_account_info=pks_account_info,
                pvdc_info=pvdc_info,
                nsxt_info=nsxt_info,
                pks_compute_profile_name=pks_compute_profile_name,
                pks_plans=pks_plans,
                pks_cluster_domain=pks_cluster_domain,
                credentials_required=True)

        return pks_context, ovdc