class ListVM(object):
    """
    Demonstrates getting list of VMs present in vCenter
    Sample Prerequisites:
    vCenter/ESX
    """

    def __init__(self):
        self.service_manager = None

    def setup(self):
        parser = sample_cli.build_arg_parser()
        args = sample_util.process_cli_args(parser.parse_args())

        self.service_manager = ServiceManager(args.server,
                                              args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

    def run(self):
        """
        List VMs present in server
        """
        vm_svc = VM(self.service_manager.stub_config)
        list_of_vms = vm_svc.list()
        print("----------------------------")
        print("List Of VMs")
        print("----------------------------")
        pprint(list_of_vms)
        print("----------------------------")
class Sample(object):
    """
    TODO: Sample description and prerequisites.
    e.g. Demonstrates getting list of VMs present in vCenter

    Sample Prerequisites:
        - vCenter
    """
    def __init__(self):
        self.service_manager = None
        self.vm_name = None
        self.cleardata = None

    def setup(self):
        # Create argument parser for standard inputs:
        # server, username, password, cleanup and skipverification
        parser = sample_cli.build_arg_parser()

        # Add your custom input arguments
        parser.add_argument('-n',
                            '--vm_name',
                            action='store',
                            default='Sample_Default_VM_for_Simple_Testbed',
                            help='Name of the testing vm')

        args = sample_util.process_cli_args(parser.parse_args())
        self.vm_name = args.vm_name
        self.cleardata = args.cleardata

        # Connect to both Vim and vAPI services
        self.service_manager = ServiceManager(args.server, args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

    def run(self):
        # TODO add your sample code here

        # Using REST API service
        vm_service = VM(self.service_manager.stub_config)
        filter_spec = VM.FilterSpec(names=set([self.vm_name]))
        vms = vm_service.list(filter_spec)
        print(vms)

        # Using Vim API service (pyVmomi)
        current_time = self.service_manager.si.CurrentTime()
        print(current_time)

    def cleanup(self):
        if self.cleardata:
            # TODO add cleanup code
            pass
Exemple #3
0
class DeleteVM(object):
    """
    Demonstrates Deleting User specified Virtual Machine (VM)
    Sample Prerequisites:
    vCenter/ESX
    """

    def __init__(self):
        self.service_manager = None
        self.vm_name = None

    def setup(self):
        parser = sample_cli.build_arg_parser()
        parser.add_argument('-n', '--vm_name',
                            action='store',
                            default='Sample_Default_VM_for_Simple_Testbed',
                            help='Name of the testing vm')
        args = sample_util.process_cli_args(parser.parse_args())
        self.vm_name = args.vm_name

        self.service_manager = ServiceManager(args.server,
                                              args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

    def run(self):
        """
        Delete User specified VM from Server
        """
        vm_svc = VM(self.service_manager.stub_config)
        power_svc = Power(self.service_manager.stub_config)
        vm = get_vm(self.service_manager.stub_config, self.vm_name)
        if not vm:
            raise Exception('Sample requires an existing vm with name ({}).'
                            'Please create the vm first.'.format(vm_name))
        print("Deleting VM -- '{}-({})')".format(self.vm_name, vm))
        state = power_svc.get(vm)
        if state == Power.Info(state=Power.State.POWERED_ON):
            power_svc.stop(vm)
        elif state == Power.Info(state=Power.State.SUSPENDED):
            power_svc.start(vm)
            power_svc.stop(vm)
        vm_svc.delete(vm)
        print("Deleted VM -- '{}-({})".format(self.vm_name, vm))
class DeployOvfTemplate:
    """
    Demonstrates the workflow to deploy an OVF library item to a resource pool.
    Note: the sample needs an existing library item with an OVF template
    and an existing cluster with resources for deploying the VM.
    """
    def __init__(self):
        self.servicemanager = None
        self.client = None
        self.helper = None
        self.resource_pool_name = None
        self.lib_item_name = None
        self.vm_obj = None
        self.vm_name = None
        self.vm_count = None

    def setup(self):
        parser = sample_cli.build_arg_parser()
        parser.add_argument('-n',
                            '--vm_name',
                            action='store',
                            help='Name of the testing vm')
        parser.add_argument('-resourcepoolname',
                            '--resourcepoolname',
                            help='The name of the resource pool to be used.')
        parser.add_argument('-libitemname',
                            '--libitemname',
                            help='The name of the library item to deploy.'
                            'The library item should contain an OVF package.')
        parser.add_argument(
            '-vm_count',
            '--vm_count',
            help='Number of VMs to be created. By default is 1',
            type=int,
            default=1)
        args = sample_util.process_cli_args(parser.parse_args())
        self.lib_item_name = args.libitemname
        self.resource_pool_name = args.resourcepoolname
        self.vm_name = args.vm_name
        self.vm_count = args.vm_count

        self.servicemanager = ServiceManager(args.server, args.username,
                                             args.password,
                                             args.skipverification)
        self.servicemanager.connect()
        atexit.register(self.servicemanager.disconnect)

        self.client = ClsApiClient(self.servicemanager)
        self.helper = ClsApiHelper(self.client, args.skipverification)

        # Default VM name
        if not self.vm_name:
            self.vm_name = 'vm-' + str(generate_random_uuid())

    def execute(self):

        # Find the resource pool's moid
        resource_pool_obj = get_obj(self.servicemanager.content,
                                    [vim.ResourcePool],
                                    self.resource_pool_name)
        assert resource_pool_obj is not None
        print("Resource Pool Moref: {0}".format(resource_pool_obj))

        deployment_target = LibraryItem.DeploymentTarget(
            resource_pool_id=resource_pool_obj._GetMoId())
        lib_item_id = self.helper.get_item_id_by_name(self.lib_item_name)
        assert lib_item_id
        ovf_summary = self.client.ovf_lib_item_service.filter(
            ovf_library_item_id=lib_item_id, target=deployment_target)
        print('Found an OVF template :{0} to deploy.'.format(ovf_summary.name))

        # Deploy the ovf template
        self.deploy_ovf_template(lib_item_id, ovf_summary, deployment_target)

    def deploy_ovf_template(self, lib_item_id, ovf_summary, deployment_target):
        # Build the deployment spec
        deployment_spec = LibraryItem.ResourcePoolDeploymentSpec(
            name=self.vm_name,
            annotation=ovf_summary.annotation,
            accept_all_eula=True,
            network_mappings=None,
            storage_mappings=None,
            storage_provisioning=None,
            storage_profile_id=None,
            locale=None,
            flags=None,
            additional_parameters=None,
            default_datastore_id=None)

        # Deploy the ovf template
        result = self.client.ovf_lib_item_service.deploy(
            lib_item_id,
            deployment_target,
            deployment_spec,
            client_token=generate_random_uuid())

        # The type and ID of the target deployment is available in the deployment result.
        if result.succeeded:
            print(
                'Deployment successful. Result resource: {0}, ID: {1}'.format(
                    result.resource_id.type, result.resource_id.id))
            self.vm_id = result.resource_id.id
            error = result.error
            if error is not None:
                for warning in error.warnings:
                    print('OVF warning: {}'.format(warning.message))

            # Power on the VM and wait  for the power on operation to be completed
            self.vm_obj = get_obj_by_moId(self.servicemanager.content,
                                          [vim.VirtualMachine], self.vm_id)
            assert self.vm_obj is not None
            poweron_vm(self.servicemanager.content, self.vm_obj)

        else:
            print('Deployment failed.')
            for error in result.error.errors:
                print('OVF error: {}'.format(error.message))

    def cleanup(self):
        if self.vm_obj is not None:
            # Power off the VM and wait for the power off operation to complete
            poweroff_vm(self.servicemanager.content, self.vm_obj)
            # Delete the VM
            delete_object(self.servicemanager.content, self.vm_obj)
Exemple #5
0
from pyVmomi import vim
from samples.vsphere.common.service_manager import ServiceManager
from samples.vsphere.contentlibrary.lib.cls_api_client import ClsApiClient
from samples.vsphere.common.vim.helpers.vim_utils import get_obj
from com.vmware.content.library_client import Item
from com.vmware.vcenter.ovf_client import LibraryItem

vm_name = ''
rp_name = ''
template_name = ''
esx_host = ''
api_host = ''
api_user = ''
api_pass = ''

service_manager = ServiceManager(api_host, api_user, api_pass, True)
service_manager.connect()
client = ClsApiClient(service_manager)
find_spec = Item.FindSpec(name=template_name)
ovf_template_id = client.library_item_service.find(find_spec)[0]
target = get_obj(service_manager.content, [vim.HostSystem], esx_host)
rp = get_obj(service_manager.content, [vim.ResourcePool], rp_name)
deployment_target = LibraryItem.DeploymentTarget(
    host_id=target._GetMoId(), resource_pool_id=rp._GetMoId())
deployment_spec = LibraryItem.ResourcePoolDeploymentSpec(name=template_name +
                                                         '_deployed',
                                                         accept_all_eula=True)
client.ovf_lib_item_service.deploy(ovf_template_id, deployment_target,
                                   deployment_spec)
Exemple #6
0
class CreateExhaustiveVM(object):
    """
    Demonstrates how to create a exhaustive VM with the below configuration:
    3 disks, 2 nics, 2 vcpu, 2 GB, memory, boot=BIOS, 1 cdrom, 1 serial port,
    1 parallel port, 1 floppy, boot_device=[CDROM, DISK, ETHERNET])

    Sample Prerequisites:
        - datacenter
        - vm folder
        - resource pool
        - datastore
        - standard switch network
        - distributed switch network
        - An iso file on the datastore mentioned above
    """

    def __init__(self, stub_config=None, placement_spec=None,
                 standard_network=None, distributed_network=None):
        self.context = None
        self.service_manager = None
        self.stub_config = stub_config
        self.placement_spec = placement_spec
        self.standard_network = standard_network
        self.distributed_network = distributed_network
        self.vm_name = testbed.config['VM_NAME_EXHAUSTIVE']
        self.cleardata = None

    def setup(self):
        parser = sample_cli.build_arg_parser()
        parser.add_argument('-n', '--vm_name',
                            action='store',
                            help='Name of the testing vm')
        args = sample_util.process_cli_args(parser.parse_args())
        if args.vm_name:
            self.vm_name = args.vm_name
        self.cleardata = args.cleardata

        self.service_manager = ServiceManager(args.server,
                                              args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

        self.stub_config = self.service_manager.stub_config

    def run(self):
        # Get a placement spec
        datacenter_name = testbed.config['VM_DATACENTER_NAME']
        vm_folder_name = testbed.config['VM_FOLDER2_NAME']
        datastore_name = testbed.config['VM_DATASTORE_NAME']
        std_portgroup_name = testbed.config['STDPORTGROUP_NAME']
        dv_portgroup_name = testbed.config['VDPORTGROUP1_NAME']

        if not self.placement_spec:
            self.placement_spec = vm_placement_helper.get_placement_spec_for_resource_pool(
                self.stub_config,
                datacenter_name,
                vm_folder_name,
                datastore_name)

        # Get a standard network backing
        if not self.standard_network:
            self.standard_network = network_helper.get_standard_network_backing(
                self.stub_config,
                std_portgroup_name,
                datacenter_name)

        # Get a distributed network backing
        if not self.distributed_network:
            self.distributed_network = network_helper.get_distributed_network_backing(
                self.stub_config,
                dv_portgroup_name,
                datacenter_name)

        """
        Create an exhaustive VM.

        Using the provided PlacementSpec, create a VM with a selected Guest OS
        and provided name.

        Create a VM with the following configuration:
        * Hardware Version = VMX_11 (for 6.0)
        * CPU (count = 2, coresPerSocket = 2, hotAddEnabled = false,
        hotRemoveEnabled = false)
        * Memory (size_mib = 2 GB, hotAddEnabled = false)
        * 3 Disks and specify each of the HBAs and the unit numbers
          * (capacity=40 GB, name=<some value>, spaceEfficient=true)
        * Specify 2 ethernet adapters, one using a Standard Portgroup backing and
        the
          other using a DISTRIBUTED_PORTGROUP networking backing.
          * nic1: Specify Ethernet (macType=MANUAL, macAddress=<some value>)
          * nic2: Specify Ethernet (macType=GENERATED)
        * 1 CDROM (type=ISO_FILE, file="os.iso", startConnected=true)
        * 1 Serial Port (type=NETWORK_SERVER, file="tcp://localhost/16000",
        startConnected=true)
        * 1 Parallel Port (type=HOST_DEVICE, startConnected=false)
        * 1 Floppy Drive (type=CLIENT_DEVICE)
        * Boot, type=BIOS
        * BootDevice order: CDROM, DISK, ETHERNET

        Use guest and system provided defaults for remaining configuration settings.
        """
        guest_os = testbed.config['VM_GUESTOS']
        iso_datastore_path = testbed.config['ISO_DATASTORE_PATH']
        serial_port_network_location = \
            testbed.config['SERIAL_PORT_NETWORK_SERVER_LOCATION']

        GiB = 1024 * 1024 * 1024
        GiBMemory = 1024

        vm_create_spec = VM.CreateSpec(
            guest_os=guest_os,
            name=self.vm_name,
            placement=self.placement_spec,
            hardware_version=Hardware.Version.VMX_11,
            cpu=Cpu.UpdateSpec(count=2,
                               cores_per_socket=1,
                               hot_add_enabled=False,
                               hot_remove_enabled=False),
            memory=Memory.UpdateSpec(size_mib=2 * GiBMemory,
                                     hot_add_enabled=False),
            disks=[
                Disk.CreateSpec(type=Disk.HostBusAdapterType.SCSI,
                                scsi=ScsiAddressSpec(bus=0, unit=0),
                                new_vmdk=Disk.VmdkCreateSpec(name='boot',
                                                             capacity=40 * GiB)),
                Disk.CreateSpec(new_vmdk=Disk.VmdkCreateSpec(name='data1',
                                                             capacity=10 * GiB)),
                Disk.CreateSpec(new_vmdk=Disk.VmdkCreateSpec(name='data2',
                                                             capacity=10 * GiB))
            ],
            nics=[
                Ethernet.CreateSpec(
                    start_connected=True,
                    mac_type=Ethernet.MacAddressType.MANUAL,
                    mac_address='11:23:58:13:21:34',
                    backing=Ethernet.BackingSpec(
                        type=Ethernet.BackingType.STANDARD_PORTGROUP,
                        network=self.standard_network)),
                Ethernet.CreateSpec(
                    start_connected=True,
                    mac_type=Ethernet.MacAddressType.GENERATED,
                    backing=Ethernet.BackingSpec(
                        type=Ethernet.BackingType.DISTRIBUTED_PORTGROUP,
                        network=self.distributed_network)),
            ],
            cdroms=[
                Cdrom.CreateSpec(
                    start_connected=True,
                    backing=Cdrom.BackingSpec(type=Cdrom.BackingType.ISO_FILE,
                                              iso_file=iso_datastore_path)
                )
            ],
            serial_ports=[
                Serial.CreateSpec(
                    start_connected=False,
                    backing=Serial.BackingSpec(
                        type=Serial.BackingType.NETWORK_SERVER,
                        network_location=serial_port_network_location)
                )
            ],
            parallel_ports=[
                Parallel.CreateSpec(
                    start_connected=False,
                    backing=Parallel.BackingSpec(
                        type=Parallel.BackingType.HOST_DEVICE)
                )
            ],
            floppies=[
                Floppy.CreateSpec(
                    backing=Floppy.BackingSpec(
                        type=Floppy.BackingType.CLIENT_DEVICE)
                )
            ],
            boot=Boot.CreateSpec(type=Boot.Type.BIOS,
                                 delay=0,
                                 enter_setup_mode=False
                                 ),
            # TODO Should DISK be put before CDROM and ETHERNET?  Does the BIOS
            # automatically try the next device if the DISK is empty?
            boot_devices=[
                BootDevice.EntryCreateSpec(BootDevice.Type.CDROM),
                BootDevice.EntryCreateSpec(BootDevice.Type.DISK),
                BootDevice.EntryCreateSpec(BootDevice.Type.ETHERNET)
            ]
        )
        print(
            '# Example: create_exhaustive_vm: Creating a VM using spec\n-----')
        print(pp(vm_create_spec))
        print('-----')

        vm_svc = VM(self.stub_config)
        vm = vm_svc.create(vm_create_spec)

        print("create_exhaustive_vm: Created VM '{}' ({})".format(self.vm_name,
                                                                  vm))

        vm_info = vm_svc.get(vm)
        print('vm.get({}) -> {}'.format(vm, pp(vm_info)))

        return vm

    def cleanup(self):
        vm = get_vm(self.stub_config, self.vm_name)
        if vm:
            power_svc = Power(self.stub_config)
            vm_svc = VM(self.stub_config)
            state = power_svc.get(vm)
            if state == Power.Info(state=Power.State.POWERED_ON):
                power_svc.stop(vm)
            elif state == Power.Info(state=Power.State.SUSPENDED):
                power_svc.start(vm)
                power_svc.stop(vm)
            print("Deleting VM '{}' ({})".format(self.vm_name, vm))
            vm_svc.delete(vm)
Exemple #7
0
class CreateBasicVM(object):
    """
    Demonstrates how to create a basic VM with following configuration:
    2 disks, 1 nic

    Sample Prerequisites:
        - datacenter
        - vm folder
        - datastore
        - standard switch network
    """
    def __init__(self, stub_config=None, placement_spec=None):
        self.context = None
        self.service_manager = None
        self.stub_config = stub_config
        self.placement_spec = placement_spec
        self.vm_name = testbed.config['VM_NAME_BASIC']
        self.cleardata = None

    def setup(self):
        parser = sample_cli.build_arg_parser()
        parser.add_argument('-n',
                            '--vm_name',
                            action='store',
                            help='Name of the testing vm')
        args = sample_util.process_cli_args(parser.parse_args())
        if args.vm_name:
            self.vm_name = args.vm_name
        self.cleardata = args.cleardata

        self.service_manager = ServiceManager(args.server, args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

        self.stub_config = self.service_manager.stub_config

    def run(self):
        # Get a placement spec
        datacenter_name = testbed.config['VM_DATACENTER_NAME']
        vm_folder_name = testbed.config['VM_FOLDER2_NAME']
        datastore_name = testbed.config['VM_DATASTORE_NAME']
        std_portgroup_name = testbed.config['STDPORTGROUP_NAME']

        if not self.placement_spec:
            self.placement_spec = vm_placement_helper.get_placement_spec_for_resource_pool(
                self.stub_config, datacenter_name, vm_folder_name,
                datastore_name)

        # Get a standard network backing
        standard_network = network_helper.get_standard_network_backing(
            self.stub_config, std_portgroup_name, datacenter_name)
        """
        Create a basic VM.

        Using the provided PlacementSpec, create a VM with a selected Guest OS
        and provided name.

        Create a VM with the following configuration:
        * Create 2 disks and specify one of them on scsi0:0 since it's the boot disk
        * Specify 1 ethernet adapter using a Standard Portgroup backing
        * Setup for PXE install by selecting network as first boot device

        Use guest and system provided defaults for most configuration settings.
        """
        guest_os = testbed.config['VM_GUESTOS']

        boot_disk = Disk.CreateSpec(type=Disk.HostBusAdapterType.SCSI,
                                    scsi=ScsiAddressSpec(bus=0, unit=0),
                                    new_vmdk=Disk.VmdkCreateSpec())
        data_disk = Disk.CreateSpec(new_vmdk=Disk.VmdkCreateSpec())

        nic = Ethernet.CreateSpec(
            start_connected=True,
            backing=Ethernet.BackingSpec(
                type=Ethernet.BackingType.STANDARD_PORTGROUP,
                network=standard_network))

        boot_device_order = [
            BootDevice.EntryCreateSpec(BootDevice.Type.ETHERNET),
            BootDevice.EntryCreateSpec(BootDevice.Type.DISK)
        ]

        vm_create_spec = VM.CreateSpec(name=self.vm_name,
                                       guest_os=guest_os,
                                       placement=self.placement_spec,
                                       disks=[boot_disk, data_disk],
                                       nics=[nic],
                                       boot_devices=boot_device_order)
        print('\n# Example: create_basic_vm: Creating a VM using spec\n-----')
        print(pp(vm_create_spec))
        print('-----')

        vm_svc = VM(self.stub_config)
        vm = vm_svc.create(vm_create_spec)

        print("create_basic_vm: Created VM '{}' ({})".format(self.vm_name, vm))

        vm_info = vm_svc.get(vm)
        print('vm.get({}) -> {}'.format(vm, pp(vm_info)))

        return vm

    def cleanup(self):
        vm = get_vm(self.stub_config, self.vm_name)
        if vm:
            power_svc = Power(self.stub_config)
            vm_svc = VM(self.stub_config)
            state = power_svc.get(vm)
            if state == Power.Info(state=Power.State.POWERED_ON):
                power_svc.stop(vm)
            elif state == Power.Info(state=Power.State.SUSPENDED):
                power_svc.start(vm)
                power_svc.stop(vm)
            print("Deleting VM '{}' ({})".format(self.vm_name, vm))
            vm_svc.delete(vm)
class ImportHistorySample(object):
    """
    Sample demonstrating how one can change the state of the Defer History Data
    Import using its vAPI. To use this feature you need to have an appliance
    upgrade or migrated to the 6.7 or later version, using the option for
    transferring historical data after upgrade.
    """
    def __init__(self):
        self.service_manager = None

    def setup(self):
        # Create argument parser for standard inputs:
        # server, username, password, cleanup and skipverification
        parser = sample_cli.build_arg_parser()

        args = sample_util.process_cli_args(parser.parse_args())

        self.service_manager = ServiceManager(args.server, args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

    def run(self):
        """
        Runs the sample's operations
        """

        try:
            # Using REST API service
            import_history = ImportHistory(self.service_manager.stub_config)

            # Change the status - either pause or resume it
            start_status = get_defer_history_import_status(import_history)
            if start_status == Status.RUNNING:
                print('Pausing Defer History Data Import.')
                import_history.pause()
                expected_status = Status.PAUSED
                revert_operation = import_history.resume
            elif start_status == Status.PAUSED:
                print('Resuming Defer History Data Import.')
                import_history.resume()
                expected_status = Status.RUNNING
                revert_operation = import_history.pause
            else:
                print('Sample can only work if the status of Defer History '
                      'Data Import is paused or running, current status '
                      'is: {0}'.format(start_status))
                return

            after_ops_status = get_defer_history_import_status(import_history)
            if after_ops_status == expected_status:
                print('Operation finished successfully.')
            else:
                print('Executed operation did not bring the process in '
                      'desired state. Current status is "{0}". '
                      'Aborting'.format(after_ops_status))
                return

            # revert to the original status
            print('Reverting to original state.')
            revert_operation()
            get_defer_history_import_status(import_history)
        except AlreadyInDesiredState:
            print('The Defer History Data Import is already in the '
                  'desired state.')
        except Error as error:
            for err in error.messages:
                print('Error: {0}'.format(get_message_as_text(err)))

    def cleanup(self):
        # Nothing to clean up
        pass
class CreateDefaultVM(object):
    """
    Demonstrates how to create a VM with system provided defaults

    Sample Prerequisites:
        - datacenter
        - vm folder
        - datastore
    """
    def __init__(self, stub_config=None, placement_spec=None):
        self.context = None
        self.service_manager = None
        self.stub_config = stub_config
        self.placement_spec = placement_spec
        self.vm_name = testbed.config['VM_NAME_DEFAULT']
        self.cleardata = None

    def setup(self):
        parser = sample_cli.build_arg_parser()
        parser.add_argument('-n',
                            '--vm_name',
                            action='store',
                            help='Name of the testing vm')
        args = sample_util.process_cli_args(parser.parse_args())
        if args.vm_name:
            self.vm_name = args.vm_name
        self.cleardata = args.cleardata

        self.service_manager = ServiceManager(args.server, args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

        self.stub_config = self.service_manager.stub_config

    def run(self):
        # Get a placement spec
        datacenter_name = testbed.config['VM_DATACENTER_NAME']
        vm_folder_name = testbed.config['VM_FOLDER2_NAME']
        datastore_name = testbed.config['VM_DATASTORE_NAME']

        if not self.placement_spec:
            self.placement_spec = vm_placement_helper.get_placement_spec_for_resource_pool(
                self.stub_config, datacenter_name, vm_folder_name,
                datastore_name)
        """
        Create a default VM.

        Using the provided PlacementSpec, create a VM with a selected Guest OS
        and provided name.  Use all the guest and system provided defaults.
        """
        guest_os = testbed.config['VM_GUESTOS']
        vm_create_spec = VM.CreateSpec(name=self.vm_name,
                                       guest_os=guest_os,
                                       placement=self.placement_spec)
        print(
            '\n# Example: create_default_vm: Creating a VM using spec\n-----')
        print(pp(vm_create_spec))
        print('-----')

        vm_svc = VM(self.stub_config)
        vm = vm_svc.create(vm_create_spec)
        print("create_default_vm: Created VM '{}' ({})".format(
            self.vm_name, vm))

        vm_info = vm_svc.get(vm)
        print('vm.get({}) -> {}'.format(vm, pp(vm_info)))
        return vm

    def cleanup(self):
        vm = get_vm(self.stub_config, self.vm_name)
        if vm:
            power_svc = Power(self.stub_config)
            vm_svc = VM(self.stub_config)
            state = power_svc.get(vm)
            if state == Power.Info(state=Power.State.POWERED_ON):
                power_svc.stop(vm)
            elif state == Power.Info(state=Power.State.SUSPENDED):
                power_svc.start(vm)
                power_svc.stop(vm)
            print("Deleting VM '{}' ({})".format(self.vm_name, vm))
            vm_svc.delete(vm)
class ImportHistorySampleCli(object):
    """
    Sample demonstrating how the API for the upgrade's Defer History Data
    Import feature can be used. To use this feature you need to have an
    appliance upgrade or migrated to the 6.7 or later version, using the
    option for transferring historical data after upgrade.
    """
    def __init__(self):
        self.service_manager = None
        self.operation = None

    def setup(self):
        # Create argument parser for standard inputs:
        # server, username, password, cleanup and skipverification
        parser = sample_cli.build_arg_parser()

        parser.add_argument(
            '-o',
            '--operation',
            action='store',
            default='status',
            choices=['status', 'start', 'pause', 'resume', 'cancel'],
            help='Operation to execute')

        args = sample_util.process_cli_args(parser.parse_args())
        self.operation = args.operation

        self.service_manager = ServiceManager(args.server, args.username,
                                              args.password,
                                              args.skipverification)
        self.service_manager.connect()
        atexit.register(self.service_manager.disconnect)

    def run(self):
        """
        Runs the requested operation
        """

        # Using REST API service
        import_history = ImportHistory(self.service_manager.stub_config)

        if self.operation == 'status':
            get_defer_history_import_status(import_history)
            return

        try:
            operations = {
                'start': import_history.start,
                'pause': import_history.pause,
                'resume': import_history.resume,
                'cancel': import_history.cancel
            }
            print('Executing operation "{0}"'.format(self.operation))
            if self.operation in operations:
                operations[self.operation]()
                print('Executing operation "{0}" was successful'.format(
                    self.operation))
            else:
                print('Unknown operation {0}'.format(self.operation))
        except AlreadyInDesiredState:
            print('The Defer History Data Import is already in the '
                  'desired state.')
        except Error as error:
            print('Request "{0}" returned error.'.format(self.operation))
            for err in error.messages:
                print('Error: {0}'.format(get_message_as_text(err)))

    def cleanup(self):
        # Nothing to clean up
        pass