Esempio n. 1
0
 def get_vm(self):
     vapp_vm_resource = self.vapp.get_vm(self.params.get('vm_name'))
     return VM(self.client, resource=vapp_vm_resource)
Esempio n. 2
0
    def get_vm(self):
        vapp = VApp(self.client,
                    resource=self.vdc.get_vapp(self.params.get('target_vapp')))
        vapp_vm_resource = vapp.get_vm(self.params.get('target_vm_name'))

        return VM(self.client, resource=vapp_vm_resource)
Esempio n. 3
0
 def test_0380_get_meadata(self):
     # retrieve metadata
     vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href)
     entries = metadata_to_dict(vm.get_metadata())
     self.assertTrue(len(entries) > 0)
Esempio n. 4
0
 def test_0420_list_mks_ticket(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     dict = vm.list_mks_ticket()
     self.assertTrue(len(dict) > 0)
Esempio n. 5
0
 def test_0310_get_post_gc_status(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     dict = vm.list_check_post_gc_status()
     self.assertTrue(len(dict) > 0)
Esempio n. 6
0
 def test_0340_list_boot_options(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     dict = vm.list_boot_options()
     self.assertTrue(len(dict) > 0)
 def _get_vm_resource(self, vm):
     try:
         return VM(self.client, resource=self.vapp_resource.get_vm(vm))
     except Exception as e:
         raise AnsibleError(f"Failed to get vm resource, MSG: {e}")
Esempio n. 8
0
 def test_0270_list_os_info(self):
     vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href)
     dict = vm.list_os_section()
     self.assertTrue(len(dict) > 0)
Esempio n. 9
0
    def remove_compute_policy_from_vdc_sync(self,
                                            vdc,
                                            compute_policy_href,
                                            force=False,
                                            is_placement_policy=False,
                                            task_resource=None):
        """Remove compute policy from vdc.

        This method makes use of an umbrella task which can be used for
        tracking progress. If the umbrella task is not specified, it is
        created.

        :param pyvcloud.vcd.vdc.VDC vdc: VDC object
        :param str compute_policy_href: href of the compute policy to remove
        :param bool force: Force remove compute policy from vms in the VDC
            as well
        :param lxml.objectify.Element task_resource: Task resource for
            the umbrella task
        """
        user_name = self._session.get('user')

        task = Task(self._sysadmin_client)
        task_href = None
        is_umbrella_task = task_resource is not None
        # Create a task if not umbrella task
        if not is_umbrella_task:
            # TODO the following org will be associated with 'System' org.
            # task created should be associated with the corresponding org of
            # the vdc object.
            org = vcd_utils.get_org(self._sysadmin_client)
            org.reload()
            user_href = org.get_user(user_name).get('href')
            org_href = org.href
            task_resource = task.update(
                status=vcd_client.TaskStatus.RUNNING.value,
                namespace='vcloud.cse',
                operation=
                f"Removing compute policy (href: {compute_policy_href})"  # noqa: E501
                f" from org VDC (vdc id: {vdc.name})",
                operation_name='Remove org VDC compute policy',
                details='',
                progress=None,
                owner_href=vdc.href,
                owner_name=vdc.name,
                owner_type=vcd_client.EntityType.VDC.value,
                user_href=user_href,
                user_name=user_name,
                org_href=org.href)
        else:
            user_href = task_resource.User.get('href')
            org_href = task_resource.Organization.get('href')

        task_href = task_resource.get('href')

        try:
            # remove the compute policy from VMs if force is True
            if force:
                compute_policy_id = retrieve_compute_policy_id_from_href(
                    compute_policy_href)  # noqa: E501
                vdc_id = vcd_utils.extract_id(vdc.get_resource().get('id'))
                vapps = vcd_utils.get_all_vapps_in_ovdc(
                    client=self._sysadmin_client, ovdc_id=vdc_id)
                target_vms = []
                system_default_href = None
                operation_msg = None
                for cp_dict in self.list_compute_policies_on_vdc(vdc_id):
                    if cp_dict['name'] == _SYSTEM_DEFAULT_COMPUTE_POLICY:
                        system_default_href = cp_dict['href']
                        break
                if is_placement_policy:
                    for vapp in vapps:
                        target_vms += \
                            [vm for vm in vapp.get_all_vms()
                                if self._get_vm_placement_policy_id(vm) == compute_policy_id] # noqa: E501
                    vm_names = [vm.get('name') for vm in target_vms]
                    operation_msg = f"Removing placement policy from " \
                                    f"{len(vm_names)} VMs. " \
                                    f"Affected VMs: {vm_names}"
                else:
                    for vapp in vapps:
                        target_vms += \
                            [vm for vm in vapp.get_all_vms()
                                if self._get_vm_sizing_policy_id(vm) == compute_policy_id] # noqa: E501
                    vm_names = [vm.get('name') for vm in target_vms]
                    operation_msg = "Setting sizing policy to " \
                                    f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' on " \
                                    f"{len(vm_names)} VMs. " \
                                    f"Affected VMs: {vm_names}"

                task.update(status=vcd_client.TaskStatus.RUNNING.value,
                            namespace='vcloud.cse',
                            operation=operation_msg,
                            operation_name='Remove org VDC compute policy',
                            details='',
                            progress=None,
                            owner_href=vdc.href,
                            owner_name=vdc.name,
                            owner_type=vcd_client.EntityType.VDC.value,
                            user_href=user_href,
                            user_name=user_name,
                            task_href=task_href,
                            org_href=org_href)

                task_monitor = self._sysadmin_client.get_task_monitor()
                for vm_resource in target_vms:
                    vm = VM(self._sysadmin_client,
                            href=vm_resource.get('href'))
                    _task = None
                    operation_msg = None
                    if is_placement_policy:
                        if hasattr(vm_resource, 'ComputePolicy') and \
                                not hasattr(vm_resource.ComputePolicy, 'VmSizingPolicy'):  # noqa: E501
                            # Updating sizing policy for the VM
                            _task = vm.update_compute_policy(
                                compute_policy_href=system_default_href)
                            operation_msg = \
                                "Setting compute policy to " \
                                f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' "\
                                f"on VM '{vm_resource.get('name')}'"
                            task.update(
                                status=vcd_client.TaskStatus.RUNNING.value,
                                namespace='vcloud.cse',
                                operation=operation_msg,
                                operation_name=
                                f'Setting sizing policy to {_SYSTEM_DEFAULT_COMPUTE_POLICY}',  # noqa: E501
                                details='',
                                progress=None,
                                owner_href=vdc.href,
                                owner_name=vdc.name,
                                owner_type=vcd_client.EntityType.VDC.value,
                                user_href=user_href,
                                user_name=user_name,
                                task_href=task_href,
                                org_href=org_href)
                            task_monitor.wait_for_success(_task)
                        _task = vm.remove_placement_policy()
                        operation_msg = "Removing placement policy on VM " \
                                        f"'{vm_resource.get('name')}'"
                        task.update(
                            status=vcd_client.TaskStatus.RUNNING.value,
                            namespace='vcloud.cse',
                            operation=operation_msg,
                            operation_name='Remove org VDC compute policy',
                            details='',
                            progress=None,
                            owner_href=vdc.href,
                            owner_name=vdc.name,
                            owner_type=vcd_client.EntityType.VDC.value,
                            user_href=user_href,
                            user_name=user_name,
                            task_href=task_href,
                            org_href=org_href)
                        task_monitor.wait_for_success(_task)
                    else:
                        _task = vm.update_compute_policy(
                            compute_policy_href=system_default_href)
                        operation_msg = "Setting sizing policy to " \
                                        f"'{_SYSTEM_DEFAULT_COMPUTE_POLICY}' "\
                                        f"on VM '{vm_resource.get('name')}'"
                        task.update(
                            status=vcd_client.TaskStatus.RUNNING.value,
                            namespace='vcloud.cse',
                            operation=operation_msg,
                            operation_name='Remove org VDC compute policy',
                            details='',
                            progress=None,
                            owner_href=vdc.href,
                            owner_name=vdc.name,
                            owner_type=vcd_client.EntityType.VDC.value,
                            user_href=user_href,
                            user_name=user_name,
                            task_href=task_href,
                            org_href=org_href)
                        task_monitor.wait_for_success(_task)

            final_status = vcd_client.TaskStatus.RUNNING.value \
                if is_umbrella_task else vcd_client.TaskStatus.SUCCESS.value
            task.update(status=final_status,
                        namespace='vcloud.cse',
                        operation=f"Removing compute policy (href:"
                        f"{compute_policy_href}) from org VDC '{vdc.name}'",
                        operation_name='Remove org VDC compute policy',
                        details='',
                        progress=None,
                        owner_href=vdc.href,
                        owner_name=vdc.name,
                        owner_type=vcd_client.EntityType.VDC.value,
                        user_href=user_href,
                        user_name=user_name,
                        task_href=task_href,
                        org_href=org_href)

            vdc.remove_compute_policy(compute_policy_href)
        except Exception as err:
            logger.SERVER_LOGGER.error(err, exc_info=True)
            # Set task to error if not an umbrella task
            if not is_umbrella_task:
                msg = 'Failed to remove compute policy: ' \
                      f'{compute_policy_href} from the OVDC: {vdc.name}'
                task.update(status=vcd_client.TaskStatus.ERROR.value,
                            namespace='vcloud.cse',
                            operation=msg,
                            operation_name='Remove org VDC compute policy',
                            details='',
                            progress=None,
                            owner_href=vdc.href,
                            owner_name=vdc.name,
                            owner_type=vcd_client.EntityType.VDC.value,
                            user_href=user_href,
                            user_name=self._session.get('user'),
                            task_href=task_href,
                            org_href=org_href,
                            error_message=f"{err}",
                            stack_trace='')
            raise err
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, body):
    try:
        if qty < 1:
            return None
        specs = []
        catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                            template['catalog_item'])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        storage_profile = None
        if 'storage_profile' in body and body['storage_profile'] is not None:
            storage_profile = vdc.get_storage_profile(body['storage_profile'])
        cust_script_init = \
    """#!/usr/bin/env bash
    if [ x$1=x"postcustomization" ];
    then
    """ # NOQA
        cust_script_common = ''
        cust_script_end = \
    """
    fi
    """  # NOQA
        if 'ssh_key' in body and body['ssh_key'] is not None:
            cust_script_common += \
    """
    mkdir -p /root/.ssh
    echo '{ssh_key}' >> /root/.ssh/authorized_keys
    chmod -R go-rwx /root/.ssh
    """.format(ssh_key=body['ssh_key'])  # NOQA

        if cust_script_common == '':
            cust_script = None
        else:
            cust_script = cust_script_init + cust_script_common + \
                cust_script_end
        for n in range(qty):
            name = None
            while True:
                name = '%s-%s' % (node_type, ''.join(
                    random.choices(string.ascii_lowercase + string.digits,
                                   k=4)))
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'network': body['network'],
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)
        if ('cpu' in body and body['cpu'] is not None) or \
           ('memory' in body and body['memory'] is not None):
            reconfigure_hw = True
        else:
            reconfigure_hw = False
        task = vapp.add_vms(specs, power_on=not reconfigure_hw)
        # TODO(get details of the exception like not enough resources avail)
        client.get_task_monitor().wait_for_status(task)
        if reconfigure_hw:
            vapp.reload()
            for spec in specs:
                vm_resource = vapp.get_vm(spec['target_vm_name'])
                if 'cpu' in body and body['cpu'] is not None:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_cpu(body['cpu'])
                    client.get_task_monitor().wait_for_status(task)
                if 'memory' in body and body['memory'] is not None:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_memory(body['memory'])
                    client.get_task_monitor().wait_for_status(task)
                vm = VM(client, resource=vm_resource)
                task = vm.power_on()
                client.get_task_monitor().wait_for_status(task)
        password = source_vapp.get_admin_password(source_vm)
        vapp.reload()
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            command = '/bin/echo "root:{password}" | chpasswd'.format(
                password=template['admin_password'])
            nodes = [vm_resource]
            execute_script_in_nodes(config,
                                    vapp,
                                    password,
                                    command,
                                    nodes,
                                    check_tools=True,
                                    wait=False)
            if node_type == TYPE_NFS:
                LOGGER.debug('Enabling NFS server on %s' %
                             spec['target_vm_name'])
                script = get_data_file('nfsd-%s.sh' % template['name'])
                exec_results = execute_script_in_nodes(
                    config, vapp, template['admin_password'], script, nodes)
                errors = get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"Script execution failed on node "
                        f"{spec['target_vm_name']}:{errors}")
    except Exception as e:
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))
    return {'task': task, 'specs': specs}
Esempio n. 11
0
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key_filepath=None):
    specs = []
    try:
        if num_nodes < 1:
            return None
        catalog_item = org.get_catalog_item(
            catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key_filepath is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"Script execution failed on node {vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
Esempio n. 12
0
def convert_cluster(ctx, config_file_name, cluster_name, password, org_name,
                    vdc_name, skip_wait_for_gc):
    try:
        check_python_version()
    except Exception as err:
        click.secho(str(err), fg='red')
        sys.exit(1)

    client = None
    try:
        console_message_printer = ConsoleMessagePrinter()
        config = get_validated_config(
            config_file_name, msg_update_callback=console_message_printer)

        log_filename = None
        log_wire = str_to_bool(config['service'].get('log_wire'))
        if log_wire:
            log_filename = 'cluster_convert_wire.log'

        client = Client(config['vcd']['host'],
                        api_version=config['vcd']['api_version'],
                        verify_ssl_certs=config['vcd']['verify'],
                        log_file=log_filename,
                        log_requests=log_wire,
                        log_headers=log_wire,
                        log_bodies=log_wire)
        credentials = BasicLoginCredentials(config['vcd']['username'],
                                            SYSTEM_ORG_NAME,
                                            config['vcd']['password'])
        client.set_credentials(credentials)
        msg = f"Connected to vCD as system administrator: " \
              f"{config['vcd']['host']}:{config['vcd']['port']}"
        console_message_printer.general(msg)

        cluster_records = get_all_clusters(client=client,
                                           cluster_name=cluster_name,
                                           org_name=org_name,
                                           ovdc_name=vdc_name)

        if len(cluster_records) == 0:
            console_message_printer.info(f"No clusters were found.")
            return

        vms = []
        for cluster in cluster_records:
            console_message_printer.info(
                f"Processing cluster '{cluster['name']}'.")
            vapp_href = cluster['vapp_href']
            vapp = VApp(client, href=vapp_href)

            console_message_printer.info("Processing metadata of cluster.")
            metadata = metadata_to_dict(vapp.get_metadata())
            old_template_name = None
            new_template_name = None
            if ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME in metadata: # noqa: E501
                old_template_name = metadata.pop(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501
            version = metadata.get(ClusterMetadataKey.CSE_VERSION)
            if old_template_name:
                console_message_printer.info(
                    "Determining k8s version on cluster.")
                if 'photon' in old_template_name:
                    new_template_name = 'photon-v2'
                    if '1.0.0' in version:
                        new_template_name += '_k8s-1.8_weave-2.0.5'
                    elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4',)): # noqa: E501
                        new_template_name += '_k8s-1.9_weave-2.3.0'
                    elif any(ver in version for ver in ('1.2.5', '1.2.6', '1.2.7',)): # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif '2.0.0' in version:
                        new_template_name += '_k8s-1.12_weave-2.3.0'
                elif 'ubuntu' in old_template_name:
                    new_template_name = 'ubuntu-16.04'
                    if '1.0.0' in version:
                        new_template_name += '_k8s-1.9_weave-2.1.3'
                    elif any(ver in version for ver in ('1.1.0', '1.2.0', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6', '1.2.7')): # noqa: E501
                        new_template_name += '_k8s-1.10_weave-2.3.0'
                    elif '2.0.0' in version:
                        new_template_name += '_k8s-1.13_weave-2.3.0'

            if new_template_name:
                console_message_printer.info("Updating metadata of cluster.")
                task = vapp.remove_metadata(ClusterMetadataKey.BACKWARD_COMPATIBILE_TEMPLATE_NAME) # noqa: E501
                client.get_task_monitor().wait_for_success(task)
                new_metadata_to_add = {
                    ClusterMetadataKey.TEMPLATE_NAME: new_template_name,
                    ClusterMetadataKey.TEMPLATE_REVISION: 0
                }
                task = vapp.set_multiple_metadata(new_metadata_to_add)
                client.get_task_monitor().wait_for_success(task)
            console_message_printer.general(
                "Finished processing metadata of cluster.")

            try:
                console_message_printer.info(
                    f"Undeploying the vApp '{cluster['name']}'")
                task = vapp.undeploy()
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general(
                    "Successfully undeployed the vApp.")
            except Exception as err:
                console_message_printer.error(str(err))

            vm_resources = vapp.get_all_vms()
            for vm_resource in vm_resources:
                console_message_printer.info(
                    f"Processing vm '{vm_resource.get('name')}'.")
                vm = VM(client, href=vm_resource.get('href'))
                vms.append(vm)

                console_message_printer.info("Updating vm admin password.")
                task = vm.update_guest_customization_section(
                    enabled=True,
                    admin_password_enabled=True,
                    admin_password_auto=not password,
                    admin_password=password,
                )
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general("Successfully updated vm .")

                console_message_printer.info("Deploying vm.")
                task = vm.power_on_and_force_recustomization()
                client.get_task_monitor().wait_for_success(task)
                console_message_printer.general("Successfully deployed vm.")

            console_message_printer.info("Deploying cluster")
            task = vapp.deploy(power_on=True)
            client.get_task_monitor().wait_for_success(task)
            console_message_printer.general("Successfully deployed cluster.")
            console_message_printer.general(
                f"Successfully processed cluster '{cluster['name']}'.")

        if skip_wait_for_gc:
            return

        while True:
            for vm in vms:
                status = vm.get_guest_customization_status()
                if status != 'GC_PENDING':
                    vms.remove(vm)
            console_message_printer.info(
                f"Waiting on guest customization to finish on {len(vms)} vms.")
            if not len(vms) == 0:
                time.sleep(5)
            else:
                break

    except Exception as err:
        click.secho(str(err), fg='red')
    finally:
        if client:
            client.logout()
Esempio n. 13
0
def _get_vm(ctx, vapp_name, vm_name):
    client = ctx.obj['client']
    vapp = _get_vapp(ctx, vapp_name)
    vm_resource = vapp.get_vm(vm_name)
    return VM(client, href=vm_resource.get('href'))
Esempio n. 14
0
 def test_0240_list_all_current_metrics(self):
     vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href)
     list = vm.list_all_current_metrics()
     self.assertTrue(len(list) > 0)
Esempio n. 15
0
 def test_0060_vm_power_operations(self):
     """Test the method related to power operations in vm.py.
     This test passes if all the power operations are successful.
     """
     logger = Environment.get_default_logger()
     vm_name = TestVM._test_vapp_first_vm_name
     vm = VM(client=TestVM._client, href=TestVM._test_vapp_first_vm_href)
     # make sure the vm is powered on before running tests
     logger.debug('Making sure vm ' + vm_name + ' is powered on.')
     if vm.is_suspended():
         task = vm.deploy()
         TestVM._client.get_task_monitor().wait_for_success(task=task)
         vm.reload()
     if not vm.is_powered_on():
         task = vm.power_on()
         TestVM._client.get_task_monitor().wait_for_success(task=task)
         vm.reload()
     logger.debug('Un-deploying vm ' + vm_name)
     task = vm.undeploy()
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     logger.debug('Deploying vm ' + vm_name)
     vm.reload()
     task = vm.deploy(power_on=False)
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     logger.debug('Powering on vm ' + vm_name)
     vm.reload()
     task = vm.power_on()
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     logger.debug('Reseting (power) vm ' + vm_name)
     vm.reload()
     task = vm.power_reset()
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     logger.debug('Powering off vm ' + vm_name)
     vm.reload()
     task = vm.power_off()
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     logger.debug('Powering back on vm ' + vm_name)
     vm.reload()
     task = vm.power_on()
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     #       discard suspend state sometime show inconsistent behavior and puts
     #       VM in partially suspended state. Commenting theis scenerio to avoid
     #       this failure.
     #        logger.debug('Suspend a vm ' + vm_name)
     #        vm.reload()
     #        task = vm.suspend()
     #       result = TestVM._client.get_task_monitor().wait_for_success(task)
     #        self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     #        logger.debug('Discard suspended state of a vm ' + vm_name)
     #        vm.reload()
     #        if vm.is_suspended():
     #            task = vm.discard_suspended_state()
     #            result = TestVM._client.get_task_monitor().wait_for_success(task)
     #            self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
     #        logger.debug('Powering back on vm ' + vm_name)
     vm.reload()
     if not vm.is_powered_on():
         task = vm.power_on()
         result = TestVM._client.get_task_monitor().wait_for_success(task)
         self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
Esempio n. 16
0
 def test_0250_list_subset_current_metrics(self):
     vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href)
     list = vm.list_current_metrics_subset(metric_pattern='*.average')
     self.assertTrue(len(list) > 0)
Esempio n. 17
0
 def test_0085_vm_nic_update(self):
     vm = VM(TestVM._client, href=TestVM._test_vapp_first_vm_href)
     task = vm.update_nic(network_name=TestVM._vapp_network_name,
                          is_connected=False)
     result = TestVM._client.get_task_monitor().wait_for_success(task=task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
Esempio n. 18
0
 def test_0290_list_gc_info(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     dict = vm.list_gc_section()
     self.assertTrue(len(dict) > 0)
Esempio n. 19
0
 def test_0100_upgrade_virtual_hardware(self):
     vm = VM(TestVM._client, href=TestVM._test_vapp_first_vm_href)
     task = vm.upgrade_virtual_hardware()
     result = TestVM._client.get_task_monitor().wait_for_success(task=task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
Esempio n. 20
0
 def test_0320_list_vm_capabilties(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     dict = vm.list_vm_capabilities()
     self.assertTrue(len(dict) > 0)
Esempio n. 21
0
 def test_0130_general_setting_detail(self):
     vm = VM(TestVM._client, href=TestVM._test_vapp_first_vm_href)
     result = vm.general_setting_detail()
     self.assertNotEqual(len(result), 0)
Esempio n. 22
0
 def test_0360_list_runtime_info(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     dict = vm.list_run_time_info()
     self.assertTrue(len(dict) > 0)
Esempio n. 23
0
 def test_0140_list_storage_profile(self):
     vm = VM(TestVM._client, href=TestVM._test_vapp_first_vm_href)
     result = vm.list_storage_profile()
     self.assertNotEqual(len(result), 0)
Esempio n. 24
0
 def test_0400_remove_metadata(self):
     # remove metadata entry
     vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href)
     task = vm.remove_metadata(key=TestVM._metadata_key)
     result = TestVM._client.get_task_monitor().wait_for_success(task)
     self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
Esempio n. 25
0
 def test_0220_list_virtual_harware_section(self):
     vm = VM(TestVM._client, href=TestVM._test_vapp_first_vm_href)
     list = vm.list_virtual_hardware_section(is_disk=True,
                                             is_media=True,
                                             is_networkCards=True)
     self.assertTrue(len(list) > 0)
Esempio n. 26
0
 def test_0430_list_product_sections(self):
     vm = VM(TestVM._sys_admin_client,
             href=TestVM._test_vapp_vmtools_vm_href)
     list = vm.list_product_sections()
     self.assertTrue(len(list) > 0)
Esempio n. 27
0
 def test_0230_get_compliance_result(self):
     vm = VM(TestVM._sys_admin_client, href=TestVM._test_vapp_first_vm_href)
     result = vm.get_compliance_result()
     self.assertEqual(result.ComplianceStatus, 'COMPLIANT')
Esempio n. 28
0
                log_file='pyvcloud.log',
                log_requests=True,
                log_headers=True,
                log_bodies=True)
client.set_credentials(BasicLoginCredentials(user, org, password))
task_monitor=client.get_task_monitor()

print("Fetching Org...")
org_resource = client.get_org()
org = Org(client, resource=org_resource)

print("Fetching VDC...")
vdc_resource = org.get_vdc(vdc)
vdc = VDC(client, resource=vdc_resource)

print("Fetching vApp...")
vapp_resource = vdc.get_vapp(vapp)
vapp = VApp(client, resource=vapp_resource)

print("Fetching VM...")
vm_resource = vapp.get_vm(vm)
vm = VM(client, resource=vm_resource)

print("Creating Snapshot...")
snaphot_resource = vm.snapshot_create(memory=False, quiesce=False)
print("Waiting for Snapshot finish...")
task_monitor.wait_for_success(snaphot_resource)
# Log out.
print("Logging out")
client.logout()
    def get_vm(self, vm_name):
        vapp_vm_resource = self.vapp.get_vm(vm_name)

        return VM(self.module.client, resource=vapp_vm_resource)