Exemplo n.º 1
0
    def run(self, vms, distributed_switch=None, mac_address=None, network_name=None, port_key=None,
            stay_connected=False, network_type='Flexible', wake_on_lan=False):
        network_type = network_type.lower() if network_type else None
        si = self.si
        si_content = si.RetrieveContent()

        vm_objs = [vim.VirtualMachine(moid, stub=si._stub) for moid in vms]
        # by checking the name property, the vms' existance is checked.
        [vm_obj.name for vm_obj in vm_objs]

        distributed_switch_obj = None
        if distributed_switch:
            distributed_switch_obj = vim.DistributedVirtualSwitch(distributed_switch, stub=si._stub)
            # by checking the name property, the distributed switch existence is checked.
            distributed_switch_obj.name

        network_obj = None
        if network_name:
            network_obj = inventory.get_network(si_content, name=network_name)
            # by checking the name property, the network existence is checked.
            network_obj.name

        result = []
        for vm in vm_objs:
            vm_reconfig_spec = NewNetworkAdapter.get_vm_reconfig_spec(distributed_switch_obj,
                mac_address, network_obj, port_key, stay_connected, network_type, wake_on_lan)
            add_disk_task = vm.ReconfigVM_Task(spec=vm_reconfig_spec)
            successfully_added_network = self._wait_for_task(add_disk_task)
            result.append({
                "vm_moid": vm._GetMoId(),
                "success": successfully_added_network
            })

        return result
Exemplo n.º 2
0
def add_host_to_vdswitch(context, vdswitch_name, host_name, pnic_names=None):
    """Add host to Distributed Switch"""
    host = context.testbed.entities['HOST_IDS'][host_name]
    host_mo = vim.HostSystem(host, context.soap_stub)

    vdswitch = context.testbed.entities['DISTRIBUTED_SWITCH_IDS'][
        vdswitch_name]
    vdswitch_mo = vim.DistributedVirtualSwitch(vdswitch, context.soap_stub)

    pnic_specs = []
    if pnic_names:
        for pnic in pnic_names:
            pnic_specs.append(vim.dvs.HostMember.PnicSpec(pnicDevice=pnic))

    dvs_member_config = vim.dvs.HostMember.ConfigSpec(
        operation="add",
        host=host_mo,
        backing=vim.dvs.HostMember.PnicBacking(pnicSpec=pnic_specs))

    dvs_config = vim.DistributedVirtualSwitch.ConfigSpec(
        configVersion=vdswitch_mo.config.configVersion,
        host=[dvs_member_config])

    task = vdswitch_mo.Reconfigure(dvs_config)
    pyVim.task.WaitForTask(task)

    print("Added Host '{}' ({}) to Distributed Switch '{}' ({})".format(
        host_name, host, vdswitch_name, vdswitch))
Exemplo n.º 3
0
def create_vdportgroup(context, vdswitch_name, vdportgroup_name):
    """Create Distributed Switch portgroup"""
    vdportgroup_type = "earlyBinding"

    vdswitch = context.testbed.entities['DISTRIBUTED_SWITCH_IDS'][
        vdswitch_name]
    vdswitch_mo = vim.DistributedVirtualSwitch(vdswitch, context.soap_stub)

    vdportgroup_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec(
        name=vdportgroup_name, type=vdportgroup_type)
    vdportgroup_specs = [vdportgroup_spec]

    task = vdswitch_mo.AddPortgroups(vdportgroup_specs)
    pyVim.task.WaitForTask(task)

    # The AddPortgroup operation doesn't return any information about the
    # created portgroup, so look it up.
    vdportgroup = None
    for vdportgroup_mo in vdswitch_mo.portgroup:
        if vdportgroup_mo.name == vdportgroup_name:
            vdportgroup = vdportgroup_mo._moId
            print(
                "Created Distributed Portgroup '{}' ({}) on Distributed Switch '{}' ({})"
                .format(vdportgroup_name, vdportgroup, vdswitch_name,
                        vdswitch))
    return vdportgroup
Exemplo n.º 4
0
 def test_invalid_find_by_name(self):
     entity_type = vim.dvs.DistributedVirtualPortgroup
     with contextlib.nested(
             patch.object(vim.DistributedVirtualSwitch,
                          '__init__',
                          return_value=None),
             patch.object(util, 'find_by')) as (mock_dvs, mock_find_by):
         name = 'dvs1'
         util.find_by.return_value = [vim.DistributedVirtualSwitch()]
         output = util.find_by_name(PrepFolder(), name, entity_type)
         self.assertIsNone(output)
         self.assertTrue(mock_dvs.called)
         self.assertTrue(mock_find_by.called)
Exemplo n.º 5
0
    def test_create_dvs_skeleton(self):
        with patch.object(vim.DistributedVirtualSwitch,
                          '__init__',
                          return_value=None) as (mock_constructor):
            self.dvs_conf = vim.DistributedVirtualSwitch()
            self.assertTrue(mock_constructor.called)

        with patch.object(util.VMwareUtils,
                          'wait_for_task',
                          return_value=self.dvs_conf) as mock_wait_for_task:
            self.dvs_config.create_dvs_skeleton(fake_inputs.session,
                                                NetworkFolder, 'dvs_name',
                                                'pnic_device', 1500)
            self.assertTrue(mock_wait_for_task.called)
Exemplo n.º 6
0
    def test_create_dvSwitch(self):
        with patch.object(vim.DistributedVirtualSwitch,
                          '__init__',
                          return_value=None) as (mock_constructor):
            self.dvs_conf = vim.DistributedVirtualSwitch()
            self.assertTrue(mock_constructor.called)

        with contextlib.nested(
                patch.object(DVSAdapter, '_create_host_config_spec'),
                patch.object(util.VMwareUtils,
                             'wait_for_task',
                             return_value=self.dvs_conf)) as (
                                 mock_create_host_config_spec,
                                 mock_wait_for_task):
            self.dvs_config.create_dvSwitch(fake_inputs.session, NetworkFolder,
                                            'hosts', 'dvs_name', 'pnic_device',
                                            1500)
            self.assertTrue(mock_create_host_config_spec.called)
            self.assertTrue(mock_wait_for_task.called)
Exemplo n.º 7
0
    def run(self,
            ids=None,
            names=None,
            datastores=None,
            datastore_clusters=None,
            resource_pools=None,
            vapps=None,
            hosts=None,
            folders=None,
            clusters=None,
            datacenters=None,
            virtual_switches=None,
            no_recursion=False,
            vsphere=None):
        # TODO: food for thought. PowerCli contains additional
        # parameters that are not present here for the folliwing reason:
        # <server> - we may need to bring it in if we decide to have
        #            connections to more than 1 VC.
        # <tag>    - Tags in VC are not the same as tags you see in Web
        #            Client for the reason, that those tags are stored
        #            in Inventory Service only. PowerCli somehow can access
        #            it, from vSphere SDK there is no way.

        self.establish_connection(vsphere)

        props = ['config.guestFullName', 'name', 'runtime.powerState']
        moid_to_vm = {}

        # getting vms by their ids
        vms_from_vmids = []
        if ids:
            vms_from_vmids = [
                vim.VirtualMachine(moid, stub=self.si._stub) for moid in ids
            ]
            GetVMs.__add_vm_properties_to_map_from_vm_array(
                moid_to_vm, vms_from_vmids)

        # getting vms by their names
        vms_from_names = []
        if names:
            container = self.si_content.viewManager.CreateContainerView(
                self.si_content.rootFolder, [vim.VirtualMachine], True)
            for vm in container.view:
                if vm.name in names:
                    vms_from_names.append(vm)
            GetVMs.__add_vm_properties_to_map_from_vm_array(
                moid_to_vm, vms_from_names)

        # getting vms from datastore objects
        vms_from_datastores = []
        if datastores:
            vim_datastores = [
                vim.Datastore(moid, stub=self.si._stub) for moid in datastores
            ]
            for ds in vim_datastores:
                vms_from_datastores.extend(ds.vm)
            GetVMs.__add_vm_properties_to_map_from_vm_array(
                moid_to_vm, vms_from_datastores)

        # getting vms from datastore cluster objects
        vms_from_datastore_clusters = []
        if datastore_clusters:
            vim_datastore_clusters = [
                vim.StoragePod(moid, stub=self.si._stub)
                for moid in datastore_clusters
            ]
            for ds_cl in vim_datastore_clusters:
                for ds in ds_cl.childEntity:
                    vms_from_datastore_clusters.extend(ds.vm)
            GetVMs.__add_vm_properties_to_map_from_vm_array(
                moid_to_vm, vms_from_datastore_clusters)

        # getting vms from virtual switch objects
        vms_from_virtual_switches = []
        if virtual_switches:
            vim_virtual_switches = [
                vim.DistributedVirtualSwitch(moid, stub=self.si._stub)
                for moid in virtual_switches
            ]
            for vswitch in vim_virtual_switches:
                for pg in vswitch.portgroup:
                    vms_from_virtual_switches.extend(pg.vm)
            GetVMs.__add_vm_properties_to_map_from_vm_array(
                moid_to_vm, vms_from_virtual_switches)

        # getting vms from containers (location param)
        vms_from_containers = []
        containers = []

        if resource_pools:
            containers += [
                vim.ResourcePool(moid, stub=self.si._stub)
                for moid in resource_pools
            ]

        if vapps:
            containers += [
                vim.VirtualApp(moid, stub=self.si._stub) for moid in vapps
            ]

        if hosts:
            containers += [
                vim.HostSystem(moid, stub=self.si._stub) for moid in hosts
            ]

        if folders:
            containers += [
                vim.Folder(moid, stub=self.si._stub) for moid in folders
            ]

        if clusters:
            containers += [
                vim.ComputeResource(moid, stub=self.si._stub)
                for moid in clusters
            ]

        if datacenters:
            containers += [
                vim.Datacenter(moid, stub=self.si._stub)
                for moid in datacenters
            ]

        for cont in containers:
            objView = self.si_content.viewManager.CreateContainerView(
                cont, [vim.VirtualMachine], not no_recursion)
            tSpec = vim.PropertyCollector.TraversalSpec(
                name='tSpecName',
                path='view',
                skip=False,
                type=vim.view.ContainerView)
            pSpec = vim.PropertyCollector.PropertySpec(all=False,
                                                       pathSet=props,
                                                       type=vim.VirtualMachine)
            oSpec = vim.PropertyCollector.ObjectSpec(obj=objView,
                                                     selectSet=[tSpec],
                                                     skip=False)
            pfSpec = vim.PropertyCollector.FilterSpec(
                objectSet=[oSpec],
                propSet=[pSpec],
                reportMissingObjectsInResults=False)
            retOptions = vim.PropertyCollector.RetrieveOptions()
            retProps = self.si_content.propertyCollector.RetrievePropertiesEx(
                specSet=[pfSpec], options=retOptions)
            vms_from_containers += retProps.objects
            while retProps.token:
                retProps = self.si_content.propertyCollector.\
                    ContinueRetrievePropertiesEx(
                        token=retProps.token)
                vms_from_containers += retProps.objects
            objView.Destroy()

        for vm in vms_from_containers:
            if vm.obj._GetMoId() not in moid_to_vm:
                moid_to_vm[vm.obj._GetMoId()] = {
                    "moid": vm.obj._GetMoId(),
                    "name": vm.propSet[1].val,
                    "os": vm.propSet[0].val,
                    "runtime.powerState": vm.propSet[2].val
                }

        return moid_to_vm.values()