def deploy(self):
        vapp_name = self.params.get('vapp_name')
        response = dict()
        response['changed'] = False

        try:
            vapp_resource = self.vdc.get_vapp(vapp_name)
            vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)
            deploy_vapp_task = vapp.deploy()
            self.execute_task(deploy_vapp_task)
            response['msg'] = 'Vapp {} has been deployed.'.format(vapp_name)
            response['changed'] = True
        except OperationNotSupportedException:
            response['warnings'] = 'Vapp {} is already deployed.'.format(vapp_name)

        return response
    def deploy(self):
        vapp_name = self.params.get('vapp_name')
        response = dict()
        response['changed'] = False

        try:
            vapp_resource = self.vdc.get_vapp(vapp_name)
            vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)
            deploy_vapp_task = vapp.deploy()
            self.execute_task(deploy_vapp_task)
            response['msg'] = 'Vapp {} has been deployed.'.format(vapp_name)
            response['changed'] = True
        except OperationNotSupportedException:
            response['msg'] = 'Vapp {} is already deployed.'.format(vapp_name)

        return response
    def deploy(self):
        params = self.module.params
        client = self.module.client
        vdc_name = params.get('vdc')
        vapp_name = params.get('name')
        response = dict()

        vdc = self.get_vdc_object(vdc_name)
        vapp_resource = vdc.get_vapp(vapp_name)
        vapp = VApp(client, name=vapp_name, resource=vapp_resource)
        deploy_vapp_task = vapp.deploy()
        self.execute_task(deploy_vapp_task)
        response['msg'] = 'Vapp {} has been deployed.'.format(vapp_name)
        response['changed'] = True

        return response
Exemple #4
0
    def deploy(self):
        vapp_name = self.params.get('vapp_name')
        response = dict()
        response['changed'] = False

        vapp = self.get_vapp()
        if not vapp.is_deployed():
            vapp_resource = self.vdc.get_vapp(vapp_name)
            vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)
            deploy_vapp_task = vapp.deploy()
            self.execute_task(deploy_vapp_task)
            response['msg'] = 'Vapp {} has been deployed.'.format(vapp_name)
            response['changed'] = True
        else:
            response['warnings'] = 'Vapp {} is already deployed.'.format(vapp_name)

        return response
 def test_1002_deploy_vapp(self):
     logged_in_org = self.client.get_org()
     org = Org(self.client, resource=logged_in_org)
     v = org.get_vdc(self.config['vcd']['vdc'])
     vdc = VDC(self.client, href=v.get('href'))
     assert self.config['vcd']['vdc'] == vdc.get_resource().get('name')
     vapp_resource = vdc.get_vapp(self.config['vcd']['vapp'])
     assert vapp_resource.get('name') == self.config['vcd']['vapp']
     vapp = VApp(self.client, resource=vapp_resource)
     result = vapp.deploy()
     task = self.client.get_task_monitor().wait_for_status(
         task=result,
         timeout=60,
         poll_frequency=2,
         fail_on_statuses=None,
         expected_target_statuses=[
             TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR,
             TaskStatus.CANCELED
         ],
         callback=None)
     assert task.get('status') == TaskStatus.SUCCESS.value
    def test_0050_attach_disk_to_vm_in_vapp(self):
        """Test the  method vapp.attach_disk_to_vm().

        Invoke the method for the second independent disk, and attach it to the
        first vm in the vApp created during setup. The vApp must be in deployed
        state before we try to attach the disk to it.

        This test passes if the disk attachment task succeeds.
        """
        vdc = Environment.get_test_vdc(TestDisk._client)
        vapp = VApp(TestDisk._client, href=TestDisk._test_vapp_href)
        vm_name = TestDisk._test_vapp_first_vm_name
        disk = vdc.get_disk(disk_id=TestDisk._idisk2_id)

        # vApp needs to be deployed for attach to succeed.
        if vapp.is_suspended():
            task = vapp.deploy()
            TestDisk._client.get_task_monitor().wait_for_success(task=task)

        task = vapp.attach_disk_to_vm(disk_href=disk.get('href'),
                                      vm_name=vm_name)
        TestDisk._client.get_task_monitor().wait_for_success(task=task)
Exemple #7
0
 def test_1002_deploy_vapp(self):
     logged_in_org = self.client.get_org()
     org = Org(self.client, resource=logged_in_org)
     v = org.get_vdc(self.config['vcd']['vdc'])
     vdc = VDC(self.client, href=v.get('href'))
     assert self.config['vcd']['vdc'] == vdc.get_resource().get('name')
     vapp_resource = vdc.get_vapp(self.config['vcd']['vapp'])
     assert vapp_resource.get('name') == self.config['vcd']['vapp']
     vapp = VApp(self.client, resource=vapp_resource)
     result = vapp.deploy()
     task = self.client.get_task_monitor().wait_for_status(
         task=result,
         timeout=60,
         poll_frequency=2,
         fail_on_statuses=None,
         expected_target_statuses=[
             TaskStatus.SUCCESS,
             TaskStatus.ABORTED,
             TaskStatus.ERROR,
             TaskStatus.CANCELED],
         callback=None)
     assert task.get('status') == TaskStatus.SUCCESS.value
Exemple #8
0
def deploy(ctx, name, vm_names, power_on, force_customization):
    try:
        client = ctx.obj['client']
        vdc_href = ctx.obj['profiles'].get('vdc_href')
        vdc = VDC(client, href=vdc_href)
        vapp_resource = vdc.get_vapp(name)
        vapp = VApp(client, resource=vapp_resource)
        if power_on is not None:
            power_on = False
        if force_customization is not None:
            force_customization = True
        if len(vm_names) == 0:
            task = vapp.deploy(power_on=power_on)
            stdout(task, ctx)
        else:
            for vm_name in vm_names:
                vm = VM(client, href=vapp.get_vm(vm_name).get('href'))
                vm.reload()
                task = vm.deploy(power_on=power_on,
                                 force_customization=force_customization)
                stdout(task, ctx)
    except Exception as e:
        stderr(e, ctx)
Exemple #9
0
def deploy(ctx, name, vm_names, power_on, force_customization):
    try:
        restore_session(ctx, vdc_required=True)
        client = ctx.obj['client']
        vdc_href = ctx.obj['profiles'].get('vdc_href')
        vdc = VDC(client, href=vdc_href)
        vapp_resource = vdc.get_vapp(name)
        vapp = VApp(client, resource=vapp_resource)
        if power_on is not None:
            power_on = False
        if force_customization is not None:
            force_customization = True
        if len(vm_names) == 0:
            task = vapp.deploy(power_on=power_on)
            stdout(task, ctx)
        else:
            for vm_name in vm_names:
                vm = VM(client, href=vapp.get_vm(vm_name).get('href'))
                vm.reload()
                task = vm.deploy(
                    power_on=power_on, force_customization=force_customization)
                stdout(task, ctx)
    except Exception as e:
        stderr(e, ctx)
def convert_cluster(ctx, config_file_name, skip_config_decryption,
                    cluster_name, admin_password, org_name, vdc_name,
                    skip_wait_for_gc):
    if skip_config_decryption:
        decryption_password = None
    else:
        decryption_password = os.getenv('CSE_CONFIG_PASSWORD') or prompt_text(
            PASSWORD_FOR_CONFIG_DECRYPTION_MSG, color='green', hide_input=True)

    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name,
            skip_config_decryption=skip_config_decryption,
            decryption_password=decryption_password,
            msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            # this step removes the old 'cse.template' metadata and adds
            # cse.template.name and cse.template.revision metadata
            # using hard-coded values taken from github history
            console_message_printer.info("Processing metadata of cluster.")
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            old_template_name = metadata_dict.get(
                ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
            )  # noqa: E501
            new_template_name = None
            cse_version = metadata_dict.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4'):  # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif cse_version in (
                            '1.2.5',
                            '1.2.6',
                            '1.2.7',
                    ):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if cse_version in ('1.0.0'):
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif cse_version in ('1.1.0', '1.2.0', '1.2.1', '1.2.2',
                                         '1.2.3', '1.2.4', '1.2.5', '1.2.6',
                                         '1.2.7'):  # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif cse_version in ('2.0.0'):
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(
                    ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME
                )  # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)

            # this step uses hard-coded data from the newly updated
            # cse.template.name and cse.template.revision metadata fields as
            # well as github history to add [cse.os, cse.docker.version,
            # cse.kubernetes, cse.kubernetes.version, cse.cni, cse.cni.version]
            # to the clusters
            vapp.reload()
            metadata_dict = metadata_to_dict(vapp.get_metadata())
            template_name = metadata_dict.get(ClusterMetadataKey.TEMPLATE_NAME)
            template_revision = str(
                metadata_dict.get(ClusterMetadataKey.TEMPLATE_REVISION,
                                  '0'))  # noqa: E501

            if template_name:
                k8s_version, docker_version = get_k8s_and_docker_versions(
                    template_name,
                    template_revision=template_revision,
                    cse_version=cse_version)  # noqa: E501
                tokens = template_name.split('_')
                new_metadata = {
                    ClusterMetadataKey.OS: tokens[0],
                    ClusterMetadataKey.DOCKER_VERSION: docker_version,
                    ClusterMetadataKey.KUBERNETES: 'upstream',
                    ClusterMetadataKey.KUBERNETES_VERSION: k8s_version,
                    ClusterMetadataKey.CNI: tokens[2].split('-')[0],
                    ClusterMetadataKey.CNI_VERSION: tokens[2].split('-')[1],
                }
                task = vapp.set_multiple_metadata(new_metadata)
                client.get_task_monitor().wait_for_success(task)

            console_message_printer.general(
                "Finished processing metadata of cluster.")

            reset_admin_pw = False
            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                try:
                    vapp.get_admin_password(vm_resource.get('name'))
                except EntityNotFoundException:
                    reset_admin_pw = True
                    break

            if reset_admin_pw:
                try:
                    console_message_printer.info(
                        f"Undeploying the vApp '{cluster['name']}'")
                    task = vapp.undeploy()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general(
                        "Successfully undeployed the vApp.")
                except Exception as err:
                    console_message_printer.error(str(err))

                for vm_resource in vm_resources:
                    console_message_printer.info(
                        f"Processing vm '{vm_resource.get('name')}'.")
                    vm = VM(client, href=vm_resource.get('href'))
                    vms.append(vm)

                    console_message_printer.info("Updating vm admin password")
                    task = vm.update_guest_customization_section(
                        enabled=True,
                        admin_password_enabled=True,
                        admin_password_auto=not admin_password,
                        admin_password=admin_password,
                    )
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully updated vm")

                    console_message_printer.info("Deploying vm.")
                    task = vm.power_on_and_force_recustomization()
                    client.get_task_monitor().wait_for_success(task)
                    console_message_printer.general("Successfully deployed vm")

                console_message_printer.info("Deploying cluster")
                task = vapp.deploy(power_on=True)
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully deployed cluster")  # noqa: E501

            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'")

        if skip_wait_for_gc:
            return

        while True:
            to_remove = []
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    to_remove.append(vm)
            for vm in to_remove:
                vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break
    except cryptography.fernet.InvalidToken:
        click.secho(CONFIG_DECRYPTION_ERROR_MSG, fg='red')
    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()
Exemple #11
0
def convert_cluster(ctx, config_file_name, cluster_name, password, org_name,
                    vdc_name, skip_wait_for_gc):
    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name, msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            console_message_printer.info("Processing metadata of cluster.")
            metadata = metadata_to_dict(vapp.get_metadata())
            old_template_name = None
            new_template_name = None
            if ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME in metadata: # noqa: E501
                old_template_name = metadata.pop(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501
            version = metadata.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if '1.0.0' in version:
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4',)): # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif any(ver in version for ver in ('1.2.5', '1.2.6', '1.2.7',)): # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif '2.0.0' in version:
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if '1.0.0' in version:
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6', '1.2.7')): # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif '2.0.0' in version:
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)
            console_message_printer.general(
                "Finished processing metadata of cluster.")

            try:
                console_message_printer.info(
                    f"Undeploying the vApp '{cluster['name']}'")
                task = vapp.undeploy()
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully undeployed the vApp.")
            except Exception as err:
                console_message_printer.error(str(err))

            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                console_message_printer.info(
                    f"Processing vm '{vm_resource.get('name')}'.")
                vm = VM(client, href=vm_resource.get('href'))
                vms.append(vm)

                console_message_printer.info("Updating vm admin password.")
                task = vm.update_guest_customization_section(
                    enabled=True,
                    admin_password_enabled=True,
                    admin_password_auto=not password,
                    admin_password=password,
                )
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general("Successfully updated vm .")

                console_message_printer.info("Deploying vm.")
                task = vm.power_on_and_force_recustomization()
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general("Successfully deployed vm.")

            console_message_printer.info("Deploying cluster")
            task = vapp.deploy(power_on=True)
            client.get_task_monitor().wait_for_success(task)
            console_message_printer.general("Successfully deployed cluster.")
            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'.")

        if skip_wait_for_gc:
            return

        while True:
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break

    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()