def get_cluster_config(self, data):
        """Get the cluster's kube config contents.

        Common broker function that validates data for 'cluster config'
        operation and returns the cluster's kube config file contents
        as a string.

        Required data: cluster_name
        Optional data and default values: org_name=None, ovdc_name=None
        """
        required = [
            RequestKey.CLUSTER_NAME
        ]
        utils.ensure_keys_in_dict(required, data, dict_name='data')
        defaults = {
            RequestKey.ORG_NAME: None,
            RequestKey.OVDC_NAME: None
        }
        validated_data = {**defaults, **data}

        cluster_name = validated_data[RequestKey.CLUSTER_NAME]
        cluster = get_cluster(self.tenant_client, cluster_name,
                              org_name=validated_data[RequestKey.ORG_NAME],
                              ovdc_name=validated_data[RequestKey.OVDC_NAME])
        vapp = VApp(self.tenant_client, href=cluster['vapp_href'])
        node_names = get_node_names(vapp, NodeType.MASTER)

        all_results = []
        try:
            for node_name in node_names:
                LOGGER.debug(f"getting file from node {node_name}")
                password = vapp.get_admin_password(node_name)
                vs = vs_utils.get_vsphere(self.sys_admin_client, vapp,
                                          vm_name=node_name, logger=LOGGER)
                vs.connect()
                moid = vapp.get_vm_moid(node_name)
                vm = vs.get_vm_by_moid(moid)
                filename = '/root/.kube/config'
                result = vs.download_file_from_guest(vm, 'root',
                                                     password,
                                                     filename)
                all_results.append(result)
        finally:
            self.logout_sys_admin_client()

        if len(all_results) == 0 or all_results[0].status_code != requests.codes.ok: # noqa: E501
            raise ClusterOperationError("Couldn't get cluster configuration")
        return all_results[0].content.decode()
Exemple #2
0
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, body):
    if qty < 1:
        return None
    specs = []
    catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                        template['catalog_item'])
    source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
    source_vm = source_vapp.get_all_vms()[0].get('name')
    storage_profile = None
    if 'storage_profile' in body and body['storage_profile'] is not None:
        storage_profile = vdc.get_storage_profile(body['storage_profile'])
    cust_script_init = \
"""#!/usr/bin/env bash
if [ x$1=x"postcustomization" ];
then
""" # NOQA
    cust_script_common = ''
    cust_script_end = \
"""
fi
"""  # NOQA
    if 'ssh_key' in body and body['ssh_key'] is not None:
        cust_script_common += \
"""
mkdir -p /root/.ssh
echo '{ssh_key}' >> /root/.ssh/authorized_keys
chmod -R go-rwx /root/.ssh
""".format(ssh_key=body['ssh_key'])  # NOQA

    if cust_script_common is '':
        cust_script = None
    else:
        cust_script = cust_script_init + cust_script_common + cust_script_end
    for n in range(qty):
        name = None
        while True:
            name = '%s-%s' % (node_type, ''.join(
                random.choices(string.ascii_lowercase + string.digits, k=4)))
            try:
                vapp.get_vm(name)
            except Exception:
                break
        spec = {
            'source_vm_name': source_vm,
            'vapp': source_vapp.resource,
            'target_vm_name': name,
            'hostname': name,
            'network': body['network'],
            'ip_allocation_mode': 'pool'
        }
        if cust_script is not None:
            spec['cust_script'] = cust_script
        if storage_profile is not None:
            spec['storage_profile'] = storage_profile
        specs.append(spec)
    if ('cpu' in body and body['cpu'] is not None) or \
       ('memory' in body and body['memory'] is not None):
        reconfigure_hw = True
    else:
        reconfigure_hw = False
    task = vapp.add_vms(specs, power_on=not reconfigure_hw)
    # TODO(get details of the exception like not enough resources avail)
    client.get_task_monitor().wait_for_status(task)
    if reconfigure_hw:
        vapp.reload()
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            if 'cpu' in body and body['cpu'] is not None:
                vm = VM(client, resource=vm_resource)
                task = vm.modify_cpu(body['cpu'])
                client.get_task_monitor().wait_for_status(task)
            if 'memory' in body and body['memory'] is not None:
                vm = VM(client, resource=vm_resource)
                task = vm.modify_memory(body['memory'])
                client.get_task_monitor().wait_for_status(task)
            vm = VM(client, resource=vm_resource)
            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
    password = source_vapp.get_admin_password(source_vm)
    vapp.reload()
    for spec in specs:
        vm_resource = vapp.get_vm(spec['target_vm_name'])
        command = '/bin/echo "root:{password}" | chpasswd'.format(
            password=template['admin_password'])
        nodes = [vm_resource]
        execute_script_in_nodes(config,
                                vapp,
                                password,
                                command,
                                nodes,
                                check_tools=True,
                                wait=False)
        if node_type == TYPE_NFS:
            LOGGER.debug('Enabling NFS server on %s' % spec['target_vm_name'])
            script = get_data_file('nfsd-%s.sh' % template['name'])
            execute_script_in_nodes(config, vapp, template['admin_password'],
                                    script, nodes)
    return {'task': task, 'specs': specs}
Exemple #3
0
def create_template(ctx, config, client, org, vdc_resource, catalog,
                    no_capture, template):
    ctx.obj = {}
    ctx.obj['client'] = client
    try:
        source_ova_item = org.get_catalog_item(config['broker']['catalog'],
                                               template['source_ova_name'])
    except Exception:
        source_ova_item = upload_source_ova(config, client, org, template)
    click.secho('Find source ova \'%s\': %s' %
                (template['source_ova_name'],
                 bool_to_msg(source_ova_item is not None)))
    if source_ova_item is None:
        return None
    item_id = source_ova_item.get('id')
    flag = False
    while True:
        q = client.get_typed_query(
            'adminCatalogItem',
            query_result_format=QueryResultFormat.ID_RECORDS,
            qfilter='id==%s' % item_id)
        records = list(q.execute())
        if records[0].get('status') == 'RESOLVED':
            if flag:
                click.secho('done', fg='blue')
            break
        else:
            if flag:
                click.secho('.', nl=False, fg='green')
            else:
                click.secho('Waiting for upload to complete...',
                            nl=False,
                            fg='green')
                flag = True
            time.sleep(5)
    vdc = VDC(client, resource=vdc_resource)
    try:
        vapp_resource = vdc.get_vapp(template['temp_vapp'])
    except Exception:
        vapp_resource = None
    if vapp_resource is None:
        click.secho('Creating vApp template \'%s\'' % template['temp_vapp'],
                    fg='green')

        init_script = get_data_file('init-%s.sh' % template['name'])
        vapp_resource = vdc.instantiate_vapp(
            template['temp_vapp'],
            catalog.get('name'),
            template['source_ova_name'],
            network=config['broker']['network'],
            fence_mode='bridged',
            ip_allocation_mode=config['broker']['ip_allocation_mode'],
            deploy=True,
            power_on=True,
            memory=template['mem'],
            cpu=template['cpu'],
            password=None,
            cust_script=init_script,
            accept_all_eulas=True,
            vm_name=template['temp_vapp'],
            hostname=template['temp_vapp'],
            storage_profile=config['broker']['storage_profile'])
        stdout(vapp_resource.Tasks.Task[0], ctx)
        vapp = VApp(client, resource=vapp_resource)
        vapp.reload()
        vs = get_vsphere(config, vapp, template['temp_vapp'])
        vs.connect()
        moid = vapp.get_vm_moid(template['temp_vapp'])
        vm = vs.get_vm_by_moid(moid)
        vs.wait_until_tools_ready(vm,
                                  sleep=5,
                                  callback=wait_for_tools_ready_callback)
        click.secho('Customizing vApp template \'%s\'' % template['temp_vapp'],
                    fg='green')
        vapp.reload()
        password_auto = vapp.get_admin_password(template['temp_vapp'])
        cust_script = get_data_file('cust-%s.sh' % template['name'])

        result = vs.execute_script_in_guest(
            vm,
            'root',
            password_auto,
            cust_script,
            target_file=None,
            wait_for_completion=True,
            wait_time=10,
            get_output=True,
            delete_script=True,
            callback=wait_for_guest_execution_callback)
        click.secho('Result: %s' % result, fg='green')
        result_stdout = result[1].content.decode()
        result_stderr = result[2].content.decode()
        click.secho('stderr:')
        if len(result_stderr) > 0:
            click.secho(result_stderr, err=True)
        click.secho('stdout:')
        if len(result_stdout) > 0:
            click.secho(result_stdout, err=False)
        if result[0] != 0:
            raise Exception('Failed customizing VM')

    if not no_capture:
        capture_as_template(ctx, config, vapp_resource, org, catalog, template)
        if template['cleanup']:
            click.secho('Deleting vApp template \'%s\' ' %
                        template['temp_vapp'],
                        fg='green')
            vdc.reload()
            task = vdc.delete_vapp(template['temp_vapp'], force=True)
            stdout(task, ctx)
def convert_cluster(ctx, config_file_name, skip_config_decryption,
                    cluster_name, admin_password, org_name, vdc_name,
                    skip_wait_for_gc):
    if skip_config_decryption:
        decryption_password = None
    else:
        decryption_password = os.getenv('CSE_CONFIG_PASSWORD') or prompt_text(
            PASSWORD_FOR_CONFIG_DECRYPTION_MSG, color='green', hide_input=True)

    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name,
            skip_config_decryption=skip_config_decryption,
            decryption_password=decryption_password,
            msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            # this step removes the old 'cse.template' metadata and adds
            # cse.template.name and cse.template.revision metadata
            # using hard-coded values taken from github history
            console_message_printer.info("Processing metadata of cluster.")
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            old_template_name = metadata_dict.get(
                ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
            )  # noqa: E501
            new_template_name = None
            cse_version = metadata_dict.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4'):  # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif cse_version in (
                            '1.2.5',
                            '1.2.6',
                            '1.2.7',
                    ):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4', '1.2.5', '1.2.6',
                                         '1.2.7'):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(
                    ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
                )  # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)

            # this step uses hard-coded data from the newly updated
            # cse.template.name and cse.template.revision metadata fields as
            # well as github history to add [cse.os, cse.docker.version,
            # cse.kubernetes, cse.kubernetes.version, cse.cni, cse.cni.version]
            # to the clusters
            vapp.reload()
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            template_name = metadata_dict.get(ClusterMetadataKey.TEMPLATE_NAME)
            template_revision = str(
                metadata_dict.get(ClusterMetadataKey.TEMPLATE_REVISION,
                                  '0'))  # noqa: E501

            if template_name:
                k8s_version, docker_version = get_k8s_and_docker_versions(
                    template_name,
                    template_revision=template_revision,
                    cse_version=cse_version)  # noqa: E501
                tokens = template_name.split('_')
                new_metadata = {
                    ClusterMetadataKey.OS: tokens[0],
                    ClusterMetadataKey.DOCKER_VERSION: docker_version,
                    ClusterMetadataKey.KUBERNETES: 'upstream',
                    ClusterMetadataKey.KUBERNETES_VERSION: k8s_version,
                    ClusterMetadataKey.CNI: tokens[2].split('-')[0],
                    ClusterMetadataKey.CNI_VERSION: tokens[2].split('-')[1],
                }
                task = vapp.set_multiple_metadata(new_metadata)
                client.get_task_monitor().wait_for_success(task)

            console_message_printer.general(
                "Finished processing metadata of cluster.")

            reset_admin_pw = False
            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                try:
                    vapp.get_admin_password(vm_resource.get('name'))
                except EntityNotFoundException:
                    reset_admin_pw = True
                    break

            if reset_admin_pw:
                try:
                    console_message_printer.info(
                        f"Undeploying the vApp '{cluster['name']}'")
                    task = vapp.undeploy()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general(
                        "Successfully undeployed the vApp.")
                except Exception as err:
                    console_message_printer.error(str(err))

                for vm_resource in vm_resources:
                    console_message_printer.info(
                        f"Processing vm '{vm_resource.get('name')}'.")
                    vm = VM(client, href=vm_resource.get('href'))
                    vms.append(vm)

                    console_message_printer.info("Updating vm admin password")
                    task = vm.update_guest_customization_section(
                        enabled=True,
                        admin_password_enabled=True,
                        admin_password_auto=not admin_password,
                        admin_password=admin_password,
                    )
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully updated vm")

                    console_message_printer.info("Deploying vm.")
                    task = vm.power_on_and_force_recustomization()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully deployed vm")

                console_message_printer.info("Deploying cluster")
                task = vapp.deploy(power_on=True)
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully deployed cluster")  # noqa: E501

            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'")

        if skip_wait_for_gc:
            return

        while True:
            to_remove = []
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    to_remove.append(vm)
            for vm in to_remove:
                vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break
    except cryptography.fernet.InvalidToken:
        click.secho(CONFIG_DECRYPTION_ERROR_MSG, fg='red')
    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()