Exemple #1
0
def GetvAppObj(parameters, client, virtual_machine):
    """
  Get vApp Object
  """
    try:

        # Get the org object that corresponds with the org provided at runtime
        org_resource = client.get_org()
        org = Org(client, resource=org_resource)

        #  Get the VDC object that correspondes with the VDC provided at runtime
        vdc_resource = org.get_vdc(parameters['VCD_VDC'])
        vdc = VDC(client, resource=vdc_resource)

        # Loop through all vApps until matching virtual machine is found, return vapp
        vapps = vdc.list_resources(EntityType.VAPP)
        for vapp in vapps:
            vapp_name = vapp.get('name')
            vapp_resource = vdc.get_vapp(vapp_name)
            vapp = VApp(client, resource=vapp_resource)

            for vm in vapp.get_all_vms():
                if vm.get('name') == virtual_machine:
                    return vapp

    except Exception as e:
        print("ERROR: Failed To Get vApp Object For Virtual Machine: '" +
              virtual_machine + "'")
        print(e)
        exit(1)
Exemple #2
0
    def count_vcloud(self, client):
        """
        Obtain counts via vCloud API. Multiple dependent requests are needed therefore
        we collect them all in one pass to avoid repeating previous requests e.g. to
        fetch VMs, one must first fetch vApps and vdcs.
        :param client:
        :return:
        """
        org_resource = client.get_org()
        org = Org(client, resource=org_resource)

        stats = {
            'num_availability_zone': 0,
            'num_orchestration_stack': 0,
            'num_vm': 0
        }

        for vdc_info in org.list_vdcs():
            stats['num_availability_zone'] += 1
            vdc = VDC(client, resource=org.get_vdc(vdc_info['name']))
            for vapp_info in vdc.list_resources():
                try:
                    vapp_resource = vdc.get_vapp(vapp_info.get('name'))
                except Exception:
                    continue  # not a vapp (probably vapp template or something)

                vapp = VApp(client, resource=vapp_resource)
                stats['num_orchestration_stack'] += 1
                stats['num_vm'] += len(vapp.get_all_vms())

        return stats
    def get_cluster_info(self, name):
        """Get the info of the cluster.

        :param cluster_name: (str): Name of the cluster

        :return: (dict): Info of the cluster.
        """
        result = {}
        result['body'] = []
        result['status_code'] = OK

        self._connect_tenant()
        clusters = load_from_metadata(self.tenant_client, name=name)
        if len(clusters) == 0:
            raise CseServerError('Cluster \'%s\' not found.' % name)
        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {'name': vm.get('name'), 'ipAddress': ''}
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name'))
            except Exception:
                LOGGER.debug('cannot get ip address for node %s' %
                             vm.get('name'))
            if vm.get('name').startswith(TYPE_MASTER):
                clusters[0].get('master_nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NODE):
                clusters[0].get('nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NFS):
                clusters[0].get('nfs_nodes').append(node_info)
        result['body'] = clusters[0]
        return result
Exemple #4
0
 def get_cluster_info(self, name, headers, body):
     result = {}
     try:
         result['body'] = []
         result['status_code'] = OK
         self._connect_tenant(headers)
         clusters = load_from_metadata(self.client_tenant, name=name)
         if len(clusters) == 0:
             raise Exception('Cluster \'%s\' not found.' % name)
         vapp = VApp(self.client_tenant, href=clusters[0]['vapp_href'])
         vms = vapp.get_all_vms()
         for vm in vms:
             node_info = {
                 'name': vm.get('name'),
                 'numberOfCpus': vm.VmSpecSection.NumCpus.text,
                 'memoryMB':
                 vm.VmSpecSection.MemoryResourceMb.Configured.text,
                 'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))),
                 'ipAddress': vapp.get_primary_ip(vm.get('name'))
             }
             if vm.get('name').startswith(TYPE_MASTER):
                 node_info['node_type'] = 'master'
                 clusters[0].get('master_nodes').append(node_info)
             elif vm.get('name').startswith(TYPE_NODE):
                 node_info['node_type'] = 'node'
                 clusters[0].get('nodes').append(node_info)
         result['body'] = clusters[0]
     except Exception as e:
         LOGGER.error(traceback.format_exc())
         result['body'] = []
         result['status_code'] = INTERNAL_SERVER_ERROR
         result['message'] = str(e)
     return result
Exemple #5
0
    def get_cluster_info(self, cluster_name):
        """Get the info of the cluster.

        :param cluster_name: (str): Name of the cluster

        :return: (dict): Info of the cluster.
        """
        self._connect_tenant()
        clusters = load_from_metadata(self.tenant_client, name=cluster_name)
        if len(clusters) == 0:
            raise CseServerError(f"Cluster '{cluster_name}' not found.")
        cluster = clusters[0]
        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {'name': vm.get('name'), 'ipAddress': ''}
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name'))
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node "
                             f"{vm.get('name')}")
            if vm.get('name').startswith(TYPE_MASTER):
                cluster.get('master_nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NODE):
                cluster.get('nodes').append(node_info)
            elif vm.get('name').startswith(TYPE_NFS):
                cluster.get('nfs_nodes').append(node_info)
        return cluster
Exemple #6
0
def get_all_host(host, org, user, password, vdc):

    #Linux guest hosts
    linux_os_list = ['CentOS 7 (64-bit)', 'Ubuntu Linux (64-bit)', 'CentOS 8 (64-bit)']

    #Windows guest hosts
    win_os_list = ['Microsoft Windows Server 2016 or later (64-bit)', 'Microsoft Windows Server 2019 (64-bit)']

    #Host list of all Vapps
    host_list = {}


    client = Client(host,
                verify_ssl_certs=True,#SSL 
                log_requests=False,
                log_headers=False,
                log_bodies=False)
    client.set_highest_supported_version()
    client.set_credentials(BasicLoginCredentials(user, org, password))

    org_resource = client.get_org()
    org = Org(client, resource=org_resource)

    vdc_resource = org.get_vdc(vdc)
    vdc = VDC(client, resource=vdc_resource)

    vapps = vdc.list_resources()

    win_list = []
    linux_list = []
    other_os_list = []
    hostvars = {}
    for vapp in vapps:
        if vapp["type"] == "application/vnd.vmware.vcloud.vApp+xml":
            currentHref = vdc.get_vapp_href(vapp["name"])
            currentVapp = VApp(client, href=currentHref)
            for vm in currentVapp.get_all_vms():
                vmName = vm.get('name')
                vmOs = VM(client, resource=vm)
                vOs = vmOs.get_operating_system_section()
                try:
                    vmIp = currentVapp.get_primary_ip(vmName)
                except:
                    pass
                if vmOs.is_powered_on():
                    if vOs.Description in win_os_list:
                        win_list.append(vmName)
                        hostvars.update({vmName:{'ansible_host':vmIp}})
                    elif vOs.Description in linux_os_list:
                        linux_list.append(vmName)
                        hostvars.update({vmName:{'ansible_host':vmIp}})
                    else:
                        other_os_list.append(vmName)
                        hostvars.update({vmName:{'ansible_host':vmIp}})
                    host_list.update({'windows':{'hosts':win_list}})
                    host_list.update({'linux':{'hosts':linux_list}})
                    host_list.update({'others':{'hosts':other_os_list}})
                    host_list.update({'_meta':{'hostvars':hostvars}})
    return host_list
    def get_node_info(self, cluster_name, node_name):
        """Get the info of a given node in the cluster.

        :param cluster_name: (str): Name of the cluster
        :param node_name: (str): Name of the node

        :return: (dict): Info of the node.
        """
        self._connect_tenant()
        clusters = load_from_metadata(
            self.tenant_client,
            name=cluster_name,
            org_name=self.req_spec.get(RequestKey.ORG_NAME),
            vdc_name=self.req_spec.get(RequestKey.OVDC_NAME))
        if len(clusters) > 1:
            raise CseDuplicateClusterError(f"Multiple clusters of name"
                                           f" '{cluster_name}' detected.")
        if len(clusters) == 0:
            raise ClusterNotFoundError(f"Cluster '{cluster_name}' not found.")

        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        node_info = None
        for vm in vms:
            if (node_name == vm.get('name')):
                node_info = {
                    'name': vm.get('name'),
                    'numberOfCpus': '',
                    'memoryMB': '',
                    'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))),
                    'ipAddress': ''
                }
                if hasattr(vm, 'VmSpecSection'):
                    node_info[
                        'numberOfCpus'] = vm.VmSpecSection.NumCpus.text
                    node_info[
                        'memoryMB'] = \
                        vm.VmSpecSection.MemoryResourceMb.Configured.text
                try:
                    node_info['ipAddress'] = vapp.get_primary_ip(
                        vm.get('name'))
                except Exception:
                    LOGGER.debug(f"Unable to get ip address of node "
                                 f"{vm.get('name')}")
                if vm.get('name').startswith(NodeType.MASTER):
                    node_info['node_type'] = 'master'
                elif vm.get('name').startswith(NodeType.WORKER):
                    node_info['node_type'] = 'worker'
                elif vm.get('name').startswith(NodeType.NFS):
                    node_info['node_type'] = 'nfs'
                    exports = self._get_nfs_exports(node_info['ipAddress'],
                                                    vapp,
                                                    vm)
                    node_info['exports'] = exports
        if node_info is None:
            raise NodeNotFoundError(f"Node '{node_name}' not found in "
                                    f"cluster '{cluster_name}'")
        return node_info
    def get_node_info(self, data):
        """Get node metadata as dictionary.

        Required data: cluster_name, node_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME,
            RequestKey.NODE_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        node_name = validated_data[RequestKey.NODE_NAME]

        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])

        vapp = VApp(self.tenant_client, href=cluster['vapp_href'])
        vms = vapp.get_all_vms()
        node_info = None
        for vm in vms:
            vm_name = vm.get('name')
            if node_name != vm_name:
                continue

            node_info = {
                'name': vm_name,
                'numberOfCpus': '',
                'memoryMB': '',
                'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))),
                'ipAddress': ''
            }
            if hasattr(vm, 'VmSpecSection'):
                node_info['numberOfCpus'] = vm.VmSpecSection.NumCpus.text
                node_info['memoryMB'] = vm.VmSpecSection.MemoryResourceMb.Configured.text # noqa: E501
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm_name)
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node {vm_name}")
            if vm_name.startswith(NodeType.MASTER):
                node_info['node_type'] = 'master'
            elif vm_name.startswith(NodeType.WORKER):
                node_info['node_type'] = 'worker'
            elif vm_name.startswith(NodeType.NFS):
                node_info['node_type'] = 'nfs'
                node_info['exports'] = self._get_nfs_exports(node_info['ipAddress'], vapp, vm_name) # noqa: E501
        if node_info is None:
            raise NodeNotFoundError(f"Node '{node_name}' not found in "
                                    f"cluster '{cluster_name}'")
        return node_info
    def get_node_info(self, cluster_name, node_name, headers):
        """Get the info of a given node in the cluster.

        :param cluster_name: (str): Name of the cluster
        :param node_name: (str): Name of the node
        :param headers: (str): Request headers

        :return: (dict): Info of the node.
        """
        result = {}

        result['body'] = []
        result['status_code'] = OK
        self._connect_tenant(headers)
        clusters = load_from_metadata(self.client_tenant, name=cluster_name)
        if len(clusters) == 0:
            raise CseServerError('Cluster \'%s\' not found.' % cluster_name)
        vapp = VApp(self.client_tenant, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        node_info = None
        for vm in vms:
            if (node_name == vm.get('name')):
                node_info = {
                    'name': vm.get('name'),
                    'numberOfCpus': '',
                    'memoryMB': '',
                    'status': VCLOUD_STATUS_MAP.get(int(vm.get('status'))),
                    'ipAddress': ''
                }
                if hasattr(vm, 'VmSpecSection'):
                    node_info['numberOfCpus'] = vm.VmSpecSection.NumCpus.text
                    node_info[
                        'memoryMB'] = \
                        vm.VmSpecSection.MemoryResourceMb.Configured.text
                try:
                    node_info['ipAddress'] = vapp.get_primary_ip(
                        vm.get('name'))
                except Exception:
                    LOGGER.debug('cannot get ip address '
                                 'for node %s' % vm.get('name'))
                if vm.get('name').startswith(TYPE_MASTER):
                    node_info['node_type'] = 'master'
                elif vm.get('name').startswith(TYPE_NODE):
                    node_info['node_type'] = 'node'
                elif vm.get('name').startswith(TYPE_NFS):
                    node_info['node_type'] = 'nfsd'
                    exports = self._get_nfs_exports(node_info['ipAddress'],
                                                    vapp, vm)
                    node_info['exports'] = exports
        if node_info is None:
            raise CseServerError('Node \'%s\' not found in cluster \'%s\'' %
                                 (node_name, cluster_name))
        result['body'] = node_info
        return result
 def list_vms(self):
     vapp_name = self.params.get('vapp_name')
     vapp_resource = self.vdc.get_vapp(vapp_name)
     vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)
     response = dict()
     response['msg'] = []
     for vm in vapp.get_all_vms():
         vm_details = dict()
         vm_details['name'] = vm.get('name')
         vm_details['status'] = VM_STATUSES[vm.get('status')]
         vm_details['deployed'] = vm.get('deployed')=='true'
         try:
             vm_details['ip_address'] = vapp.get_primary_ip(vm.get('name'))
         except:
             vm_details['ip_address'] = None
         response['msg'].append(vm_details)
     return response
 def list_vms(self):
     vapp_name = self.params.get('vapp_name')
     vapp_resource = self.vdc.get_vapp(vapp_name)
     vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)
     response = dict()
     response['msg'] = []
     for vm in vapp.get_all_vms():
         vm_details = dict()
         vm_details['name'] = vm.get('name')
         vm_details['status'] = VM_STATUSES[vm.get('status')]
         vm_details['deployed'] = vm.get('deployed') == 'true'
         try:
             vm_details['ip_address'] = vapp.get_primary_ip(vm.get('name'))
         except:
             vm_details['ip_address'] = None
         response['msg'].append(vm_details)
     return response
Exemple #12
0
    def list_vms(self):
        catalog_name = self.params.get('catalog_name')
        item_name = self.params.get('item_name')
        response = dict()
        response['changed'] = False

        catalog_item = self.org.get_catalog_item(catalog_name, item_name)
        catalog_item_href = catalog_item.Entity.get('href')
        vapp_template_resource = self.client.get_resource(catalog_item_href)
        vapp_template = VApp(self.client,
                             name=item_name,
                             resource=vapp_template_resource)

        response['msg'] = [
            vm.get('name') for vm in vapp_template.get_all_vms()
        ]

        return response
    def get_cluster_info(self, data):
        """Get cluster metadata as well as node data.

        Common broker function that validates data for the 'cluster info'
        operation and returns cluster/node metadata as dictionary.

        Required data: cluster_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}
        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])

        cluster[K8S_PROVIDER_KEY] = K8sProvider.NATIVE
        vapp = VApp(self.tenant_client, href=cluster['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {
                'name': vm.get('name'),
                'ipAddress': ''
            }
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(vm.get('name'))
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node "
                             f"{vm.get('name')}")
            if vm.get('name').startswith(NodeType.MASTER):
                cluster.get('master_nodes').append(node_info)
            elif vm.get('name').startswith(NodeType.WORKER):
                cluster.get('nodes').append(node_info)
            elif vm.get('name').startswith(NodeType.NFS):
                cluster.get('nfs_nodes').append(node_info)

        return cluster
Exemple #14
0
    def list_vms(self):
        params = self.params
        catalog_name = params.get('catalog_name')
        item_name = params.get('item_name')
        response = dict()
        result = list()
        response['changed'] = False

        catalog_item = self.org.get_catalog_item(catalog_name, item_name)
        vapp_template_resource = self.client.get_resource(
            catalog_item.Entity.get('href'))
        vapp_template = VApp(self.client, name=item_name, resource=vapp_template_resource)

        for vm in vapp_template.get_all_vms():
            result.append(vm.get('name'))

        response['msg'] = result
        response['changed'] = False

        return response
    def list_vms(self):
        params = self.params
        catalog_name = params.get('catalog_name')
        item_name = params.get('item_name')
        response = dict()
        result = list()
        response['changed'] = False

        catalog_item = self.org.get_catalog_item(catalog_name, item_name)
        vapp_template_resource = self.client.get_resource(
            catalog_item.Entity.get('href'))
        vapp_template = VApp(self.client, name=item_name, resource=vapp_template_resource)

        for vm in vapp_template.get_all_vms():
            result.append(vm.get('name'))

        response['msg'] = result
        response['changed'] = False

        return response
    def get_cluster_info(self, cluster_name):
        """Get the info of the cluster.

        :param cluster_name: (str): Name of the cluster

        :return: (dict): Info of the cluster.
        """
        self._connect_tenant()
        clusters = load_from_metadata(
            self.tenant_client,
            name=cluster_name,
            org_name=self.req_spec.get(RequestKey.ORG_NAME),
            vdc_name=self.req_spec.get(RequestKey.OVDC_NAME))
        if len(clusters) > 1:
            raise CseDuplicateClusterError(f"Multiple clusters of name"
                                           f" '{cluster_name}' detected.")
        if len(clusters) == 0:
            raise ClusterNotFoundError(f"Cluster '{cluster_name}' not found.")

        cluster = clusters[0]
        cluster[K8S_PROVIDER_KEY] = K8sProviders.NATIVE
        vapp = VApp(self.tenant_client, href=clusters[0]['vapp_href'])
        vms = vapp.get_all_vms()
        for vm in vms:
            node_info = {
                'name': vm.get('name'),
                'ipAddress': ''
            }
            try:
                node_info['ipAddress'] = vapp.get_primary_ip(
                    vm.get('name'))
            except Exception:
                LOGGER.debug(f"Unable to get ip address of node "
                             f"{vm.get('name')}")
            if vm.get('name').startswith(NodeType.MASTER):
                cluster.get('master_nodes').append(node_info)
            elif vm.get('name').startswith(NodeType.WORKER):
                cluster.get('nodes').append(node_info)
            elif vm.get('name').startswith(NodeType.NFS):
                cluster.get('nfs_nodes').append(node_info)
        return cluster
Exemple #17
0
    def get_cluster_info(self, name, headers, body):
        """Get the info of the cluster.

        :param cluster_name: (str): Name of the cluster
        :param headers: (str): Request headers

        :return: (dict): Info of the cluster.
        """
        result = {}
        try:
            result['body'] = []
            result['status_code'] = OK
            self._connect_tenant(headers)
            clusters = load_from_metadata(self.client_tenant, name=name)
            if len(clusters) == 0:
                raise Exception('Cluster \'%s\' not found.' % name)
            vapp = VApp(self.client_tenant, href=clusters[0]['vapp_href'])
            vms = vapp.get_all_vms()
            for vm in vms:
                node_info = {'name': vm.get('name'), 'ipAddress': ''}
                try:
                    node_info['ipAddress'] = vapp.get_primary_ip(
                        vm.get('name'))
                except Exception:
                    LOGGER.debug('cannot get ip address for node %s' %
                                 vm.get('name'))
                if vm.get('name').startswith(TYPE_MASTER):
                    clusters[0].get('master_nodes').append(node_info)
                elif vm.get('name').startswith(TYPE_NODE):
                    clusters[0].get('nodes').append(node_info)
                elif vm.get('name').startswith(TYPE_NFS):
                    clusters[0].get('nfs_nodes').append(node_info)
            result['body'] = clusters[0]
        except Exception as e:
            LOGGER.error(traceback.format_exc())
            result['body'] = []
            result['status_code'] = INTERNAL_SERVER_ERROR
            result['message'] = str(e)
        return result
Exemple #18
0
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, body):
    if qty < 1:
        return None
    specs = []
    catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                        template['catalog_item'])
    source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
    source_vm = source_vapp.get_all_vms()[0].get('name')
    storage_profile = None
    if 'storage_profile' in body and body['storage_profile'] is not None:
        storage_profile = vdc.get_storage_profile(body['storage_profile'])
    cust_script_init = \
"""#!/usr/bin/env bash
if [ x$1=x"postcustomization" ];
then
""" # NOQA
    cust_script_common = ''
    cust_script_end = \
"""
fi
"""  # NOQA
    if 'ssh_key' in body and body['ssh_key'] is not None:
        cust_script_common += \
"""
mkdir -p /root/.ssh
echo '{ssh_key}' >> /root/.ssh/authorized_keys
chmod -R go-rwx /root/.ssh
""".format(ssh_key=body['ssh_key'])  # NOQA

    if cust_script_common is '':
        cust_script = None
    else:
        cust_script = cust_script_init + cust_script_common + cust_script_end
    for n in range(qty):
        name = None
        while True:
            name = '%s-%s' % (node_type, ''.join(
                random.choices(string.ascii_lowercase + string.digits, k=4)))
            try:
                vapp.get_vm(name)
            except Exception:
                break
        spec = {
            'source_vm_name': source_vm,
            'vapp': source_vapp.resource,
            'target_vm_name': name,
            'hostname': name,
            'network': body['network'],
            'ip_allocation_mode': 'pool'
        }
        if cust_script is not None:
            spec['cust_script'] = cust_script
        if storage_profile is not None:
            spec['storage_profile'] = storage_profile
        specs.append(spec)
    if ('cpu' in body and body['cpu'] is not None) or \
       ('memory' in body and body['memory'] is not None):
        reconfigure_hw = True
    else:
        reconfigure_hw = False
    task = vapp.add_vms(specs, power_on=not reconfigure_hw)
    # TODO(get details of the exception like not enough resources avail)
    client.get_task_monitor().wait_for_status(task)
    if reconfigure_hw:
        vapp.reload()
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            if 'cpu' in body and body['cpu'] is not None:
                vm = VM(client, resource=vm_resource)
                task = vm.modify_cpu(body['cpu'])
                client.get_task_monitor().wait_for_status(task)
            if 'memory' in body and body['memory'] is not None:
                vm = VM(client, resource=vm_resource)
                task = vm.modify_memory(body['memory'])
                client.get_task_monitor().wait_for_status(task)
            vm = VM(client, resource=vm_resource)
            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
    password = source_vapp.get_admin_password(source_vm)
    vapp.reload()
    for spec in specs:
        vm_resource = vapp.get_vm(spec['target_vm_name'])
        command = '/bin/echo "root:{password}" | chpasswd'.format(
            password=template['admin_password'])
        nodes = [vm_resource]
        execute_script_in_nodes(config,
                                vapp,
                                password,
                                command,
                                nodes,
                                check_tools=True,
                                wait=False)
        if node_type == TYPE_NFS:
            LOGGER.debug('Enabling NFS server on %s' % spec['target_vm_name'])
            script = get_data_file('nfsd-%s.sh' % template['name'])
            execute_script_in_nodes(config, vapp, template['admin_password'],
                                    script, nodes)
    return {'task': task, 'specs': specs}
Exemple #19
0
client.set_highest_supported_version()
client.set_credentials(
    BasicLoginCredentials(config['user'], config['org'], config['password']))

print("Fetching Org...")
org = Org(client, resource=client.get_org())

print("Fetching VDC...")
vdc = VDC(client, resource=org.get_vdc(config['vdc']))

print("Fetching vApp...")
vapp_resource = vdc.get_vapp(config['vapp'])
vapp = VApp(client, resource=vapp_resource)

print("Validating VMs...")
vms = vapp.get_all_vms()

names = map(lambda vm: vm.get('name'), vms)
names = list(names)

services = config['services']
for service in services:
    name = service['vm']
    index = names.index(name)
    service['resource'] = vms[index]


def health_check_tcp(service):
    s = socket.socket()
    try:
        s.connect((service['ip'], service['port']))
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key=None):
    specs = []
    try:
        if num_nodes < 1:
            return None

        # DEV NOTE: With api v33.0 and onwards, get_catalog operation will fail
        # for non admin users of an an org which is not hosting the catalog,
        # even if the catalog is explicitly shared with the org in question.
        # This happens because for api v 33.0 and onwards, the Org XML no
        # longer returns the href to catalogs accessible to the org, and typed
        # queries hide the catalog link from non admin users.
        # As a workaround, we will use a sys admin client to get the href and
        # pass it forward. Do note that the catalog itself can still be
        # accessed by these non admin users, just that they can't find by the
        # href on their own.

        sys_admin_client = None
        try:
            sys_admin_client = vcd_utils.get_sys_admin_client()
            org_name = org.get_name()
            org_resource = sys_admin_client.get_org_by_name(org_name)
            org_sa = Org(sys_admin_client, resource=org_resource)
            catalog_item = org_sa.get_catalog_item(
                catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
            catalog_item_href = catalog_item.Entity.get('href')
        finally:
            if sys_admin_client:
                sys_admin_client.logout()

        source_vapp = VApp(client, href=catalog_item_href)
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"VM customization script execution failed on node "
                        f"{vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
Exemple #21
0
    source_vapp_resource = client.get_resource(catalog_item.Entity.get('href'))
    create_master_vm(vms, source_vapp_resource, vm_cfg)
    # wait for the master to start and get the first ip
    time.sleep(15)
    for vm_slave_cfg in vm_cfg['slaves']:
        source_slave_vapp_resource = client.get_resource(
            catalog_item.Entity.get('href'))
        create_slave_vm(vms, vm_cfg['name'], source_slave_vapp_resource,
                        vm_slave_cfg)

print("Creating (if needed) ...")
result = vapp.add_vms(vms)
handle_task(client, result)

print("Statuses ...")
while (vapp.get_all_vms() is None
       or len(vapp.get_all_vms()) < len(cfg.vapp['vms'])):
    print("  VMs ... {0}\r".format("waiting full start ...".ljust(20)), end='')
    vapp.reload()

for vm in vapp.get_all_vms():
    vm_obj = VM(client, resource=vm)
    while (vm_obj.is_powered_on() == False):
        print("  VM '{0}' ... {1}\r".format(vm.get('name'),
                                            "DOWN (waiting)".ljust(20)),
              end='')
        vm_obj.reload()
        time.sleep(5)
    print("  VM '{0}' ... {1}\r".format(vm.get('name'), "UP").ljust(20),
          end='')
    while (vm_obj.list_nics() is None or (len(vm_obj.list_nics()) == 0)
def convert_cluster(ctx, config_file_name, skip_config_decryption,
                    cluster_name, admin_password, org_name, vdc_name,
                    skip_wait_for_gc):
    if skip_config_decryption:
        decryption_password = None
    else:
        decryption_password = os.getenv('CSE_CONFIG_PASSWORD') or prompt_text(
            PASSWORD_FOR_CONFIG_DECRYPTION_MSG, color='green', hide_input=True)

    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name,
            skip_config_decryption=skip_config_decryption,
            decryption_password=decryption_password,
            msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            # this step removes the old 'cse.template' metadata and adds
            # cse.template.name and cse.template.revision metadata
            # using hard-coded values taken from github history
            console_message_printer.info("Processing metadata of cluster.")
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            old_template_name = metadata_dict.get(
                ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
            )  # noqa: E501
            new_template_name = None
            cse_version = metadata_dict.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4'):  # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif cse_version in (
                            '1.2.5',
                            '1.2.6',
                            '1.2.7',
                    ):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4', '1.2.5', '1.2.6',
                                         '1.2.7'):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(
                    ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
                )  # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)

            # this step uses hard-coded data from the newly updated
            # cse.template.name and cse.template.revision metadata fields as
            # well as github history to add [cse.os, cse.docker.version,
            # cse.kubernetes, cse.kubernetes.version, cse.cni, cse.cni.version]
            # to the clusters
            vapp.reload()
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            template_name = metadata_dict.get(ClusterMetadataKey.TEMPLATE_NAME)
            template_revision = str(
                metadata_dict.get(ClusterMetadataKey.TEMPLATE_REVISION,
                                  '0'))  # noqa: E501

            if template_name:
                k8s_version, docker_version = get_k8s_and_docker_versions(
                    template_name,
                    template_revision=template_revision,
                    cse_version=cse_version)  # noqa: E501
                tokens = template_name.split('_')
                new_metadata = {
                    ClusterMetadataKey.OS: tokens[0],
                    ClusterMetadataKey.DOCKER_VERSION: docker_version,
                    ClusterMetadataKey.KUBERNETES: 'upstream',
                    ClusterMetadataKey.KUBERNETES_VERSION: k8s_version,
                    ClusterMetadataKey.CNI: tokens[2].split('-')[0],
                    ClusterMetadataKey.CNI_VERSION: tokens[2].split('-')[1],
                }
                task = vapp.set_multiple_metadata(new_metadata)
                client.get_task_monitor().wait_for_success(task)

            console_message_printer.general(
                "Finished processing metadata of cluster.")

            reset_admin_pw = False
            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                try:
                    vapp.get_admin_password(vm_resource.get('name'))
                except EntityNotFoundException:
                    reset_admin_pw = True
                    break

            if reset_admin_pw:
                try:
                    console_message_printer.info(
                        f"Undeploying the vApp '{cluster['name']}'")
                    task = vapp.undeploy()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general(
                        "Successfully undeployed the vApp.")
                except Exception as err:
                    console_message_printer.error(str(err))

                for vm_resource in vm_resources:
                    console_message_printer.info(
                        f"Processing vm '{vm_resource.get('name')}'.")
                    vm = VM(client, href=vm_resource.get('href'))
                    vms.append(vm)

                    console_message_printer.info("Updating vm admin password")
                    task = vm.update_guest_customization_section(
                        enabled=True,
                        admin_password_enabled=True,
                        admin_password_auto=not admin_password,
                        admin_password=admin_password,
                    )
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully updated vm")

                    console_message_printer.info("Deploying vm.")
                    task = vm.power_on_and_force_recustomization()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully deployed vm")

                console_message_printer.info("Deploying cluster")
                task = vapp.deploy(power_on=True)
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully deployed cluster")  # noqa: E501

            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'")

        if skip_wait_for_gc:
            return

        while True:
            to_remove = []
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    to_remove.append(vm)
            for vm in to_remove:
                vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break
    except cryptography.fernet.InvalidToken:
        click.secho(CONFIG_DECRYPTION_ERROR_MSG, fg='red')
    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key_filepath=None):
    specs = []
    try:
        if num_nodes < 1:
            return None
        catalog_item = org.get_catalog_item(
            catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key_filepath is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"VM customization script execution failed on node "
                        f"{vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
def add_nodes(qty,
              template,
              node_type,
              config,
              client,
              org,
              vdc,
              vapp,
              body,
              wait=True):
    if qty < 1:
        return None
    specs = []
    catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                        template['catalog_item'])
    source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
    source_vm = source_vapp.get_all_vms()[0].get('name')
    storage_profile = None
    if 'storage_profile' in body and body['storage_profile'] is not None:
        storage_profile = vdc.get_storage_profile(body['storage_profile'])
    cust_script_init = \
"""#!/usr/bin/env bash
if [ x$1=x"postcustomization" ];
then
""" # NOQA
    cust_script_common = \
"""
echo "root:{password}" | chpasswd
""".format(password=template['admin_password']) # NOQA
    if 'ssh_key' in body:
        cust_script_common += \
"""
mkdir -p /root/.ssh
echo '{ssh_key}' >> /root/.ssh/authorized_keys
chmod -R go-rwx /root/.ssh
""".format(ssh_key=body['ssh_key'])  # NOQA
    cust_script_end = \
"""
fi
"""  # NOQA
    cust_script = cust_script_init
    cust_script += cust_script_common
    cust_script += cust_script_end
    for n in range(qty):
        name = None
        while True:
            name = '%s-%s' % (node_type, ''.join(
                random.choices(string.ascii_lowercase + string.digits, k=4)))
            try:
                vapp.get_vm(name)
            except Exception:
                break
        spec = {
            'source_vm_name': source_vm,
            'vapp': source_vapp.resource,
            'target_vm_name': name,
            'hostname': name,
            'network': body['network'],
            'ip_allocation_mode': 'pool',
            'cust_script': cust_script
        }
        if storage_profile is not None:
            spec['storage_profile'] = storage_profile
        specs.append(spec)
    if ('cpu_count' in body and body['cpu_count'] is not None) or (
            'memory' in body and body['memory'] is not None):
        reconfigure_hw = True
    else:
        reconfigure_hw = False

    task = vapp.add_vms(specs, power_on=not reconfigure_hw)
    if wait:
        task = client.get_task_monitor().wait_for_status(
            task=task,
            timeout=600,
            poll_frequency=5,
            fail_on_status=None,
            expected_target_statuses=[
                TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR,
                TaskStatus.CANCELED
            ],
            callback=None)
        if task.get('status').lower() != TaskStatus.SUCCESS.value:
            task_resource = client.get_resource(task.get('href'))
            if hasattr(task_resource, 'taskDetails'):
                raise Exception(task_resource.get('taskDetails'))
            elif hasattr(task_resource, 'Details'):
                raise Exception(task_resource.Details.text)
            else:
                raise Exception('Couldn\'t add node(s).')
    if wait and reconfigure_hw:
        vapp.reload()
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            if 'cpu_count' in body and body['cpu_count'] is not None:
                vm = VM(client, resource=vm_resource)
                task = vm.modify_cpu(body['cpu_count'])
                task = client.get_task_monitor().wait_for_status(
                    task=task,
                    timeout=600,
                    poll_frequency=5,
                    fail_on_status=None,
                    expected_target_statuses=[
                        TaskStatus.SUCCESS, TaskStatus.ABORTED,
                        TaskStatus.ERROR, TaskStatus.CANCELED
                    ],
                    callback=None)
            if 'memory' in body and body['memory'] is not None:
                vm = VM(client, resource=vm_resource)
                task = vm.modify_memory(body['memory'])
                task = client.get_task_monitor().wait_for_status(
                    task=task,
                    timeout=600,
                    poll_frequency=5,
                    fail_on_status=None,
                    expected_target_statuses=[
                        TaskStatus.SUCCESS, TaskStatus.ABORTED,
                        TaskStatus.ERROR, TaskStatus.CANCELED
                    ],
                    callback=None)
            vm = VM(client, resource=vm_resource)
            task = vm.power_on()
            if wait:
                task = client.get_task_monitor().wait_for_status(
                    task=task,
                    timeout=600,
                    poll_frequency=5,
                    fail_on_status=None,
                    expected_target_statuses=[
                        TaskStatus.SUCCESS, TaskStatus.ABORTED,
                        TaskStatus.ERROR, TaskStatus.CANCELED
                    ],
                    callback=None)
                if task.get('status').lower() != TaskStatus.SUCCESS.value:
                    task_resource = client.get_resource(task.get('href'))
                    if hasattr(task_resource, 'taskDetails'):
                        raise Exception(task_resource.get('taskDetails'))
                    elif hasattr(task_resource, 'Details'):
                        raise Exception(task_resource.Details.text)
                    else:
                        raise Exception('Couldn\'t add node(s).')
    return {'task': task, 'specs': specs}
Exemple #25
0
def convert_cluster(ctx, config_file_name, cluster_name, password, org_name,
                    vdc_name, skip_wait_for_gc):
    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name, msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            console_message_printer.info("Processing metadata of cluster.")
            metadata = metadata_to_dict(vapp.get_metadata())
            old_template_name = None
            new_template_name = None
            if ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME in metadata: # noqa: E501
                old_template_name = metadata.pop(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501
            version = metadata.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if '1.0.0' in version:
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4',)): # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif any(ver in version for ver in ('1.2.5', '1.2.6', '1.2.7',)): # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif '2.0.0' in version:
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if '1.0.0' in version:
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6', '1.2.7')): # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif '2.0.0' in version:
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)
            console_message_printer.general(
                "Finished processing metadata of cluster.")

            try:
                console_message_printer.info(
                    f"Undeploying the vApp '{cluster['name']}'")
                task = vapp.undeploy()
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully undeployed the vApp.")
            except Exception as err:
                console_message_printer.error(str(err))

            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                console_message_printer.info(
                    f"Processing vm '{vm_resource.get('name')}'.")
                vm = VM(client, href=vm_resource.get('href'))
                vms.append(vm)

                console_message_printer.info("Updating vm admin password.")
                task = vm.update_guest_customization_section(
                    enabled=True,
                    admin_password_enabled=True,
                    admin_password_auto=not password,
                    admin_password=password,
                )
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general("Successfully updated vm .")

                console_message_printer.info("Deploying vm.")
                task = vm.power_on_and_force_recustomization()
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general("Successfully deployed vm.")

            console_message_printer.info("Deploying cluster")
            task = vapp.deploy(power_on=True)
            client.get_task_monitor().wait_for_success(task)
            console_message_printer.general("Successfully deployed cluster.")
            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'.")

        if skip_wait_for_gc:
            return

        while True:
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break

    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()
Exemple #26
0
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp,
              req_spec):
    try:
        if qty < 1:
            return None
        specs = []
        catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                            template['catalog_item_name'])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        storage_profile = req_spec.get(RequestKey.STORAGE_PROFILE_NAME)
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script_common = ''

        cust_script_init = \
"""
#!/usr/bin/env bash
if [ x$1=x"postcustomization" ];
then
""" # noqa: E128

        cust_script_end = \
"""
fi
"""  # noqa: E128

        ssh_key_filepath = req_spec.get(RequestKey.SSH_KEY_FILEPATH)
        if ssh_key_filepath is not None:
            cust_script_common += \
f"""
mkdir -p /root/.ssh
echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys
chmod -R go-rwx /root/.ssh
""" # noqa

        if cust_script_common == '':
            cust_script = None
        else:
            cust_script = cust_script_init + cust_script_common + \
                cust_script_end
        for n in range(qty):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': req_spec.get(RequestKey.NETWORK_NAME),
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        num_cpu = req_spec.get(RequestKey.NUM_CPU)
        mb_memory = req_spec.get(RequestKey.MB_MEMORY)
        configure_hw = bool(num_cpu or mb_memory)
        task = vapp.add_vms(specs, power_on=not configure_hw)
        # TODO(get details of the exception like not enough resources avail)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()
        if configure_hw:
            for spec in specs:
                vm_resource = vapp.get_vm(spec['target_vm_name'])
                if num_cpu:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_cpu(num_cpu)
                    client.get_task_monitor().wait_for_status(task)
                if mb_memory:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_memory(mb_memory)
                    client.get_task_monitor().wait_for_status(task)
                vm = VM(client, resource=vm_resource)
                task = vm.power_on()
                client.get_task_monitor().wait_for_status(task)
            vapp.reload()

        password = vapp.get_admin_password(spec['target_vm_name'])
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            command = \
                f"/bin/echo \"root:{template['admin_password']}\" | chpasswd"
            nodes = [vm_resource]
            execute_script_in_nodes(config,
                                    vapp,
                                    password,
                                    command,
                                    nodes,
                                    check_tools=True,
                                    wait=False)
            if node_type == NodeType.NFS:
                LOGGER.debug(
                    f"enabling NFS server on {spec['target_vm_name']}")
                script_filepath = get_local_script_filepath(
                    template['name'], template['revision'], ScriptFile.NFSD)
                script = read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(
                    config, vapp, template['admin_password'], script, nodes)
                errors = get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"Script execution failed on node "
                        f"{spec['target_vm_name']}:{errors}")
    except Exception as e:
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))
    return {'task': task, 'specs': specs}