def add_raw_disk(vm, si, device_name, disk_mode, compatibility_mode):
    spec = vim.vm.ConfigSpec()
    # get all disks on a VM, set unit_number to the next available
    unit_number = 0
    for dev in vm.config.hardware.device:
        if hasattr(dev.backing, 'fileName'):
            unit_number = int(dev.unitNumber) + 1
            # unit_number 7 reserved for scsi controller
            if unit_number == 7:
                unit_number += 1
            if unit_number >= 16:
                print "we don't support this many disks"
                return
        if isinstance(dev, vim.vm.device.VirtualSCSIController):
            controller = dev
    disk_spec = vim.vm.device.VirtualDeviceSpec()
    disk_spec.fileOperation = "create"
    disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
    disk_spec.device = vim.vm.device.VirtualDisk()
    rdm_info = vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo()
    disk_spec.device.backing = rdm_info
    disk_spec.device.backing.compatibilityMode = compatibility_mode
    disk_spec.device.backing.diskMode = disk_mode
    # The device_name will look something like
    #     /vmfs/devices/disks/naa.41412340757396001d7710df0fdd22a9
    disk_spec.device.backing.deviceName = device_name
    disk_spec.device.unitNumber = unit_number
    disk_spec.device.controllerKey = controller.key
    spec.deviceChange = [disk_spec]
    WaitForTasks([vm.ReconfigVM_Task(spec=spec)], si=si)
    print "Raw disk added to %s" % (vm.config.name)
示例#2
0
    def stop_vms(self, vms, force=True):
        """
        Stop VMs

        Args:
            vms (list): VM (vm) objects
            force (bool): True for VM ungraceful power off, False for
                graceful VM shutdown

        """
        if force:
            logger.info(f"Powering off VMs: {[vm.name for vm in vms]}")
            tasks = [vm.PowerOff() for vm in vms]
            WaitForTasks(tasks, self._si)

        else:
            logger.info(
                f"Gracefully shutting down VMs: {[vm.name for vm in vms]}")

            # Can't use WaitForTasks as it requires VMWare tools installed
            # on the guests to check for Shutdown task completion
            _ = [vm.ShutdownGuest() for vm in vms]

            def get_vms_power_status(vms):
                return [self.get_vm_power_status(vm) for vm in vms]

            for statuses in TimeoutSampler(600, 5, get_vms_power_status, vms):
                logger.info(
                    f"Waiting for VMs {[vm.name for vm in vms]} to power off. "
                    f"Current VMs statuses: {statuses}")
                if all(status == VM_POWERED_OFF for status in statuses):
                    logger.info("All VMs reached poweredOff off status")
                    break
def revertSnapshot(vmlist, snap_name):

    tasks = []
    sys.stdout.write("Revert snapshot %s:\n" % snap_name)
    for idx, snap in enumerate(vmlist):
        sys.stdout.write("-- VM %s\n" % vmlist[idx].name)
        snap = vmlist[idx].snapshotByName(snap_name)
        tasks.append(snap.revert(async=True))

    WaitForTasks(tasks, onProgressUpdate=taskProgress)
    sys.stdout.write("Revert snapshot eseguito correttamente.\n")
示例#4
0
 def handleTask(self, tasks=None):
     if tasks is None:
         return False
     else:
         from pyVim.task import WaitForTasks
         try:
             WaitForTasks(tasks=tasks, si=self.client)
         except Exception as e:
             traceback.print_exc()
             print str(e)
             return False
示例#5
0
    def destroy_vms(self, vms):
        """
        Destroys the VM's

        Args:
             vms (list): VM instance list

        """
        self.poweroff_vms(vms)
        logger.info(f"Destroying VM's: {[vm.name for vm in vms]}")
        tasks = [vm.Destroy_Task() for vm in vms]
        WaitForTasks(tasks, self._si)
示例#6
0
def main():
   """
   Simple command-line program for creating virtual machines on a
   system managed by hostd.
   """

   options = GetOptions()

   curSi = SmartConnect(host=options.host,
                        user=options.user,
                        pwd=options.password)

   # Create vms
   envBrowser = GetEnv()
   cfgOption = envBrowser.QueryConfigOption(None, None)
   cfgTarget = envBrowser.QueryConfigTarget(None)

   vmList = []
   tasks = []
   clock = StopWatch()
   for i in range(int(options.num_iterations)):
      vm1 = vm.CreateQuickDummy(options.vm_name + "_" + str(i),
                                options.num_scsi_disks, options.num_ide_disks,
                                datastoreName=options.datastore_name,
                                cfgOption=cfgOption, cfgTarget=cfgTarget)
      vmList.append(vm1)
      if options.opaquenetwork_id:
         config = Vim.Vm.ConfigSpec()
         config = vmconfig.AddOpaqueNetwork(config, cfgOption, opaqueNetworkId=options.opaquenetwork_id, \
                                            opaqueNetworkType=options.opaquenetwork_type)
         task = vm1.Reconfigure(config)
         tasks.append(task)
   WaitForTasks(tasks)
   clock.finish("Reconfigure VMs done")

   # Delete the vm as cleanup
   if not options.dont_delete:
      clock = StopWatch()
      WaitForTasks([curVm.Destroy() for curVm in vmList])
      clock.finish("Destroy VMs done")
def deleteSnapshot(vmlist, snap_name, snap_list, cascade):

    msg = "Elimino snapshot %s:" % snap_name
    if cascade:
        msg = msg + " e tutti gli snapshot dipendenti."

    tasks = []
    sys.stdout.write(msg + '\n')

    for idx, snap in enumerate(snap_list):
        sys.stdout.write("-- VM %s\n" % vmlist[idx].name)
        tasks.append(snap.remove(async=True, removeChildren=cascade))

    WaitForTasks(tasks, onProgressUpdate=taskProgress)
    sys.stdout.write("Snapshot eliminate correttamente.\n")
示例#8
0
    def restart_vms(self, vms, force=False):
        """
        Restart VMs by VM Reset or Guest reboot

        Args:
            vms (list): VM (vm) objects
            force (bool): True for Hard reboot(VM Reset),
                False for Soft reboot(Guest Reboot)

        """
        logger.info(f"Rebooting VMs: {[vm.name for vm in vms]}")
        if force:
            tasks = [vm.ResetVM_Task() for vm in vms]
            WaitForTasks(tasks, self._si)
        else:
            [vm.RebootGuest() for vm in vms]
示例#9
0
    def poweron_vms(self, vms):
        """
        Powers on the VM and wait for operation to complete

        Args:
            vms (list): VM instance list

        """
        to_poweron_vms = []
        for vm in vms:
            status = self.get_vm_power_status(vm)
            logger.info(f"power state of {vm.name}: {status}")
            if status == "poweredOff":
                to_poweron_vms.append(vm)
        logger.info(f"Powering on VMs: {[vm.name for vm in to_poweron_vms]}")
        tasks = [vm.PowerOn() for vm in to_poweron_vms]
        WaitForTasks(tasks, self._si)
示例#10
0
    def start_vms(self, vms, wait=True):
        """
        Start VMs

        Args:
            vms (list): VM (vm) objects
            wait (bool): Wait for VMs to start

        """
        logger.info(f"Powering on VMs: {[vm.name for vm in vms]}")
        tasks = [vm.PowerOn() for vm in vms]
        WaitForTasks(tasks, self._si)

        if wait:
            for ips in TimeoutSampler(240, 3, self.get_vms_ips, vms):
                logger.info(
                    f"Waiting for VMs {[vm.name for vm in vms]} to power on "
                    f"based on network connectivity. Current VMs IPs: {ips}")
                if not (None in ips or "<unset>" in ips):
                    break
示例#11
0
def wait(tasks):
    tasks = list(tasks)
    if tasks:
        log.info("waiting on %d tasks..." % len(tasks))
        WaitForTasks(tasks, wait._service_instance)
    log.info("done")
            try:
                tasks.append(vm.power(mode=mode, async=True))
            except Exception, e:
                sys.stderr.write(
                    "Impossibile eseguire %s sulla vm %s. Verificare che i VMWare tool siano installati"
                    % (oper, vm.name))
        else:
            sys.stdout.write("-- VM %s %s\n" % (vm.name, not_oper))

    # In caso di accenzione inserisco uno sleep
        if mode == 'on':
            time.sleep(80)

    # È possibile che con un timeout il task sia già finito
    try:
        WaitForTasks(tasks, onProgressUpdate=taskProgress)
    except Exception, e:
        #sys.stderr.write(str(e))
        parse_args

    sys.stdout.write("Operazione completata.\n")


def createSnapshot(vmlist, snap_name, descrTpl=None, status='leave'):

    if descrTpl == None:
        descrTpl = "Snapshot del %s\n" % datetime.now().strftime(
            "%Y-%m-%d %H:%M")

    for vm in vmlist:
        sys.stdout.write("-- VM %s\n" % vm.name)
示例#13
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            ovftool_path=dict(required=True, type='str'),
            vcenter_host=dict(required=True, type='str'),
            vcenter_user=dict(required=True, type='str'),
            vcenter_password=dict(required=True, type='str', no_log=True),
            ssl_verify=dict(required=False, type='bool', default=False),
            state=dict(required=False, type='str', default='present'),
            con_datacenter=dict(required=False, type='str'),
            con_cluster=dict(required=False, type='str'),
            con_datastore=dict(required=False, type='str'),
            con_mgmt_network=dict(required=True, type='str'),
            con_disk_mode=dict(required=False, type='str', default='thin'),
            con_ova_path=dict(required=True, type='str'),
            con_vm_name=dict(required=True, type='str'),
            con_power_on=dict(required=False, type='bool', default=True),
            con_vcenter_folder=dict(required=False, type='str'),
            con_mgmt_ip=dict(required=False, type='str'),
            con_mgmt_mask=dict(required=False, type='str'),
            con_default_gw=dict(required=False, type='str'),
            con_sysadmin_public_key=dict(required=False, type='str'),
            con_number_of_cpus=dict(required=False, type='int'),
            con_cpu_reserved=dict(required=False, type='int'),
            con_memory=dict(required=False, type='int'),
            con_memory_reserved=dict(required=False, type='int'),
            con_disk_size=dict(required=False, type='int'),
            con_ovf_properties=dict(required=False, type='dict'),
            # Max time to wait for controller up state
            con_wait_time=dict(required=False, type='int', default=3600),
            # Retry after every rount_wait time to check for controller state.
            round_wait=dict(required=False, type='int', default=10),
        ),
        supports_check_mode=True,
    )
    try:
        si = SmartConnectNoSSL(host=module.params['vcenter_host'],
                               user=module.params['vcenter_user'],
                               pwd=module.params['vcenter_password'])
        atexit.register(Disconnect, si)
    except vim.fault.InvalidLogin:
        return module.fail_json(
            msg='exception while connecting to vCenter, login failure, '
                'check username and password')
    except requests.exceptions.ConnectionError:
        return module.fail_json(
            msg='exception while connecting to vCenter, check hostname, '
                'FQDN or IP')
    check_mode = module.check_mode
    if module.params['state'] == 'absent':
        vm = get_vm_by_name(si, module.params['con_vm_name'])

        if vm is None:
            return module.exit_json(msg='A VM with the name %s not found' % (
                module.params['con_vm_name']))

        if check_mode:
            return module.exit_json(msg='A VM with the name %s found' % (
                module.params['con_vm_name']), changed=True)

        if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
            task = vm.PowerOffVM_Task()
            wait_for_tasks(si, [task])

        task = vm.Destroy_Task()
        wait_for_tasks(si, [task])

        return module.exit_json(msg='A VM with the name %s deleted successfully'
                                    % (module.params['con_vm_name']))

    if module.params.get('con_datacenter', None):
        dc = get_dc(si, module.params['con_datacenter'])
    else:
        dc = si.content.rootFolder.childEntity[0]

    if module.params.get('con_cluster', None):
        cl = get_cluster(si, dc, module.params['con_cluster'])
    else:
        cl = get_first_cluster(si, dc)

    if module.params.get('con_datastore', None):
        ds = get_ds(cl, module.params['con_datastore'])
    else:
        ds = get_largest_free_ds(cl)

    if is_vm_exist(si, cl, module.params['con_vm_name']):
        vm = get_vm_by_name(si, module.params['con_vm_name'])
        vm_path = compile_folder_path_for_object(vm)
        folder = get_folder_by_path(si, dc, module.params['con_vcenter_folder'])
        folder_path = compile_folder_path_for_object(folder)
        changed = False
        if vm_path != folder_path:
            # migrate vm to new folder
            if not check_mode:
                folder.MoveInto([vm])
            changed = True
        if (not module.params['con_power_on']) and \
                vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
            if not check_mode:
                task = vm.PowerOffVM_Task()
                wait_for_tasks(si, [task])
            changed = True
        if module.params['con_power_on'] and vm.runtime.powerState == \
                vim.VirtualMachinePowerState.poweredOff:
            if not check_mode:
                task = vm.PowerOnVM_Task()
                wait_for_tasks(si, [task])
            changed = True

        if module.params.get('con_datastore', None):
            ds_names = []
            for datastore in vm.datastore:
                ds_names.append(datastore.name)
            if ds.name not in ds_names:
                module.fail_json(msg='VM datastore cant be modified')

        if module.params.get('con_mgmt_ip', None):
            ip_addresses = get_vm_ips(vm)
            if (ip_addresses and
                    not module.params['con_mgmt_ip'] in ip_addresses):
                module.fail_json(msg='VM static ip address cant be modified')

        if is_reconfigure_vm(module):
            if not check_mode:
                vmSummary = vm.summary.config
                cspec = vim.vm.ConfigSpec()

                if is_resize_disk(module):
                    disk = None
                    for device in vm.config.hardware.device:
                        if isinstance(device, vim.vm.device.VirtualDisk):
                            disk = device
                            break

                if vmSummary.numCpu != module.params['con_number_of_cpus'] or \
                        vmSummary.memorySizeMB != module.params['con_memory'] or \
                        vmSummary.memoryReservation != module.params['con_memory_reserved'] or \
                        vmSummary.cpuReservation != module.params['con_cpu_reserved'] or \
                        (disk is not None and disk.capacityInKB != module.params['con_disk_size'] * 1024 * 1024):
                    if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
                        task = vm.PowerOffVM_Task()
                        wait_for_tasks(si, [task])
                    if is_update_cpu(module):
                        if vmSummary.numCpu != module.params['con_number_of_cpus']:
                            cspec.numCPUs = module.params['con_number_of_cpus']
                            changed = True
                    if is_update_memory(module):
                        if vmSummary.memorySizeMB != module.params['con_memory']:
                            cspec.memoryMB = module.params['con_memory']
                            changed = True
                    if is_reserve_memory(module):
                        if vmSummary.memoryReservation != module.params['con_memory_reserved']:
                            cspec.memoryAllocation = vim.ResourceAllocationInfo(
                                reservation=module.params['con_memory_reserved'])
                            changed = True
                    if is_reserve_cpu(module):
                        if vmSummary.cpuReservation != module.params['con_cpu_reserved']:
                            cspec.cpuAllocation = vim.ResourceAllocationInfo(
                                reservation=module.params['con_cpu_reserved'])
                            changed = True
                    if is_resize_disk(module):
                        if disk.capacityInKB != module.params['con_disk_size'] * 1024 * 1024:
                            disk.capacityInKB = module.params['con_disk_size'] * 1024 * 1024
                            devSpec = vim.vm.device.VirtualDeviceSpec(
                                device=disk, operation="edit")
                            cspec.deviceChange.append(devSpec)
                            changed = True
                    WaitForTasks([vm.Reconfigure(cspec)], si=si)

                    if module.params['con_power_on']:
                        task = vm.PowerOnVM_Task()
                        WaitForTasks([task], si=si)

        if changed and not check_mode:
            module.exit_json(msg='A VM with the name %s updated successfully' %
                                 (module.params['con_vm_name']), changed=True)
        if changed and check_mode:
            module.exit_json(changed=True)
        else:
            module.exit_json(
                msg='A VM with the name %s is already present' % (
                    module.params['con_vm_name']))

    if (module.params['con_ova_path'].startswith('http')):
        if (requests.head(module.params['con_ova_path']).status_code != 200):
                module.fail_json(msg='Controller OVA not found or readable from specified URL path')
    else:
        if (not os.path.isfile(module.params['con_ova_path']) or
                not os.access(module.params['con_ova_path'], os.R_OK)):
                module.fail_json(msg='Controller OVA not found or not readable')

    ovftool_exec = '%s/ovftool' % module.params['ovftool_path']
    ova_file = module.params['con_ova_path']
    quoted_vcenter_user = quote(module.params['vcenter_user'])
    quoted_vcenter_pass = quote(module.params['vcenter_password'])
    vi_string = 'vi://%s:%s@%s' % (
        quoted_vcenter_user, quoted_vcenter_pass,
        module.params['vcenter_host'])
    vi_string += '/%s%s/%s' % (dc.name, compile_folder_path_for_object(cl),
                               cl.name)
    command_tokens = [ovftool_exec]

    if module.params['con_power_on'] and not is_reconfigure_vm(module):
        command_tokens.append('--powerOn')
    if not module.params['ssl_verify']:
        command_tokens.append('--noSSLVerify')
    if check_mode:
        command_tokens.append('--verifyOnly')
    command_tokens.extend([
        '--acceptAllEulas',
        '--skipManifestCheck',
        '--allowExtraConfig',
        '--diskMode=%s' % module.params['con_disk_mode'],
        '--datastore=%s' % ds.name,
        '--name=%s' % module.params['con_vm_name']
    ])

    if ('ovf_network_name' in module.params.keys() and
            module.params['ovf_network_name'] is not None and
            len(module.params['ovf_network_name']) > 0):
            try:
                d = json.loads(
                    module.params['ovf_network_name'].replace("'", "\""))
                for key, network_item in d.iteritems():
                    command_tokens.append('--net:%s=%s' % (key, network_item))
            except ValueError:
                command_tokens.append('--net:%s=%s' % (
                    module.params['ovf_network_name'],
                    module.params['con_mgmt_network']))
    else:
        command_tokens.append(
            '--network=%s' % module.params['con_mgmt_network'])

    if module.params.get('con_mgmt_ip', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.mgmt-ip.CONTROLLER', module.params['con_mgmt_ip']))

    if module.params.get('con_mgmt_mask', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.mgmt-mask.CONTROLLER', module.params['con_mgmt_mask']))

    if module.params.get('con_default_gw', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.default-gw.CONTROLLER', module.params['con_default_gw']))

    if module.params.get('con_sysadmin_public_key', None):
        command_tokens.append('--prop:%s=%s' % (
            'avi.sysadmin-public-key.CONTROLLER',
            get_sysadmin_key(module.params['con_sysadmin_public_key'])))

    if module.params.get('con_ovf_properties', None):
        for key in module.params['con_ovf_properties'].keys():
            command_tokens.append(
                '--prop:%s=%s' % (
                    key, module.params['con_ovf_properties'][key]))

    if ('con_vcenter_folder' in module.params and
            module.params['con_vcenter_folder'] is not None):
        command_tokens.append(
            '--vmFolder=%s' % module.params['con_vcenter_folder'])

    command_tokens.extend([ova_file, vi_string])
    ova_tool_result = module.run_command(command_tokens)

    if ova_tool_result[0] != 0:
        return module.fail_json(
            msg='Failed to deploy OVA, error message from ovftool is: %s '
                'for command %s' % (ova_tool_result[1], command_tokens))


    vm = None
    if is_reconfigure_vm(module):
        vm = get_vm_by_name(si, module.params['con_vm_name'])
        cspec = vim.vm.ConfigSpec()
        if is_update_cpu(module):
            cspec.numCPUs = module.params['con_number_of_cpus']
        if is_update_memory(module):
            cspec.memoryMB = module.params['con_memory']
        if is_reserve_memory(module):
            cspec.memoryAllocation = vim.ResourceAllocationInfo(
                reservation=module.params['con_memory_reserved'])
        if is_reserve_cpu(module):
            cspec.cpuAllocation = vim.ResourceAllocationInfo(
                reservation=module.params['con_cpu_reserved'])
        if is_resize_disk(module):
            disk = None
            for device in vm.config.hardware.device:
                if isinstance(device, vim.vm.device.VirtualDisk):
                    disk = device
                    break
            if disk is not None:
                disk.capacityInKB = module.params['con_disk_size'] * 1024 * 1024
                devSpec = vim.vm.device.VirtualDeviceSpec(
                    device=disk, operation="edit")
                cspec.deviceChange.append(devSpec)
        WaitForTasks([vm.Reconfigure(cspec)], si=si)

        task = vm.PowerOnVM_Task()
        WaitForTasks([task], si=si)

    if not vm:
        vm = get_vm_by_name(si, module.params['con_vm_name'])

    if not module.params['con_mgmt_ip']:
        interval = 15
        timeout = 300
        controller_ip = None
        while timeout > 0:
            controller_ip = get_vm_ip_by_network(vm, module.params['con_mgmt_network'])
            if controller_ip:
                controller_ip = controller_ip[0]
                break
            time.sleep(interval)
            timeout -= interval
    else:
        controller_ip = module.params['con_mgmt_ip']

    # Wait for controller tcontroller_waito come up for given con_wait_time
    if controller_ip:
        controller_up = controller_wait(controller_ip, module.params['round_wait'],
                                    module.params['con_wait_time'])
        if not controller_up:
            return module.fail_json(
                msg='Something wrong with the controller. The Controller is not in the up state.')
    return module.exit_json(changed=True, ova_tool_result=ova_tool_result)
示例#14
0
def cleanup_items(host, username, password, interval, iterations, dry_run,
                  power_off, unregister, delete, service_instance, content, dc,
                  view_ref):
    # openstack connection
    conn = connection.Connection(
        auth_url=os.getenv('OS_AUTH_URL'),
        project_name=os.getenv('OS_PROJECT_NAME'),
        project_domain_name=os.getenv('OS_PROJECT_DOMAIN_NAME'),
        username=os.getenv('OS_USERNAME'),
        user_domain_name=os.getenv('OS_USER_DOMAIN_NAME'),
        password=os.getenv('OS_PASSWORD'))

    known = dict()

    # get all servers, volumes, snapshots and images from openstack to compare the resources we find on the vcenter against
    for server in conn.compute.servers(details=False, all_tenants=1):
        known[server.id] = server

    for volume in conn.block_store.volumes(details=False, all_tenants=1):
        known[volume.id] = volume

    for snapshot in conn.block_store.snapshots(details=False, all_tenants=1):
        known[snapshot.id] = snapshot

    for image in conn.image.images(details=False, all_tenants=1):
        known[image.id] = image

    # the properties we want to collect - some of them are not yet used, but will at a later
    # development stage of this script to validate the volume attachments with cinder and nova
    vm_properties = [
        "config.hardware.device", "config.name", "config.uuid",
        "config.instanceUuid"
    ]

    # collect the properties for all vms
    data = collect_properties(service_instance, view_ref, vim.VirtualMachine,
                              vm_properties, True)

    # create a dict of volumes mounted to vms to compare the volumes we plan to delete against
    # to find possible ghost volumes
    vcenter_mounted = dict()
    # iterate over the list of vms
    for k in data:
        # get the config.hardware.device property out of the data dict and iterate over its elements
        #for j in k['config.hardware.device']:
        # this check seems to be required as in one bb i got a key error otherwise - looks like a vm without that property
        if k.get('config.hardware.device'):
            for j in k.get('config.hardware.device'):
                # we are only interested in disks - TODO: maybe the range needs to be adjusted
                if 2001 <= j.key <= 2010:
                    vcenter_mounted[j.backing.uuid] = k['config.instanceUuid']

    # do the check from the other end: see for which vms or volumes in the vcenter we do not have any openstack info
    missing = dict()
    # iterate through all datastores in the vcenter
    for ds in dc.datastore:
        # only consider eph and vvol datastores
        if ds.name.lower().startswith('eph') or ds.name.lower().startswith(
                'vvol'):
            log.info("- datacenter / datastore: %s / %s", dc.name, ds.name)

            # get all files and folders recursively from the datastore
            task = ds.browser.SearchDatastoreSubFolders_Task(
                datastorePath="[%s] /" % ds.name,
                searchSpec=vim.HostDatastoreBrowserSearchSpec(
                    matchPattern="*"))
            # matchPattern = ["*.vmx", "*.vmdk", "*.vmx.renamed_by_vcenter_nanny", "*,vmdk.renamed_by_vcenter_nanny"]))

            try:
                # wait for the async task to finish and then find vms and vmdks with openstack uuids in the name and
                # compare those uuids to all the uuids we know from openstack
                WaitForTask(task, si=service_instance)
                for uuid, location in _uuids(task):
                    if uuid not in known:
                        # multiple locations are possible for one uuid, thus we need to put the locations into a list
                        if uuid in missing:
                            missing[uuid].append(location)
                        else:
                            missing[uuid] = [location]
            except vim.fault.InaccessibleDatastore as e:
                log.warn(
                    "- something went wrong trying to access this datastore: %s",
                    e.msg)
            except vim.fault.FileNotFound as e:
                log.warn(
                    "- something went wrong trying to access this datastore: %s",
                    e.msg)

    init_seen_dict(vms_seen)
    init_seen_dict(files_seen)

    # needed to mark folder paths and full paths we already dealt with
    vmxmarked = {}
    vmdkmarked = {}
    vvolmarked = {}

    # iterate over all entities we have on the vcenter which have no relation to openstack anymore
    for item, locationlist in six.iteritems(missing):
        # none of the uuids we do not know anything about on openstack side should be mounted anywhere in vcenter
        # so we should neither see it as vmx (shadow vm) or datastore file
        if vcenter_mounted.get(item):
            log.warn(
                "- PLEASE CHECK MANUALLY: possibly mounted ghost volume - %s mounted on %s",
                item, vcenter_mounted[item])
        else:
            for location in locationlist:
                # foldername on datastore
                path = "{folderpath}".format(**location)
                # filename on datastore
                filename = "{filepath}".format(**location)
                fullpath = path + filename
                # in the case of a vmx file we check if the vcenter still knows about it
                if location["filepath"].lower().endswith(".vmx"):
                    vmx_path = "{folderpath}{filepath}".format(**location)
                    vm = content.searchIndex.FindByDatastorePath(path=vmx_path,
                                                                 datacenter=dc)
                    # there is a vm for that file path we check what to do with it
                    if vm:
                        power_state = vm.runtime.powerState
                        # is the vm located on vvol storage - needed later to check if its a volume shadow vm
                        if vm.config.files.vmPathName.lower().startswith(
                                '[vvol'):
                            is_vvol = True
                        else:
                            is_vvol = False
                        # check if the vm has a nic configured
                        for j in vm.config.hardware.device:
                            if j.key == 4000:
                                has_no_nic = False
                            else:
                                has_no_nic = True
                        # we store the openstack project id in the annotations of the vm
                        annotation = vm.config.annotation or ''
                        items = dict([
                            line.split(':', 1)
                            for line in annotation.splitlines()
                        ])
                        # we search for either vms with a project_id in the annotation (i.e. real vms) or
                        # for powered off vms with 128mb, one cpu and no nic which are stored on vvol (i.e. shadow vm for a volume)
                        if 'projectid' in items or (
                                vm.config.hardware.memoryMB == 128
                                and vm.config.hardware.numCPU == 1
                                and power_state == 'poweredOff' and is_vvol
                                and has_no_nic):
                            # if still powered on the planned action is to suspend it
                            if power_state == 'poweredOn':
                                # mark that path as already dealt with, so that we ignore it when we see it again
                                # with vmdks later maybe
                                vmxmarked[path] = True
                                now_or_later(vm.config.instanceUuid,
                                             vms_to_be_suspended, vms_seen,
                                             "suspend of vm", iterations,
                                             dry_run, power_off, unregister,
                                             delete, vm, dc, content, filename)
                            # if already suspended the planned action is to power off the vm
                            elif power_state == 'suspended':
                                vmxmarked[path] = True
                                now_or_later(vm.config.instanceUuid,
                                             vms_to_be_poweredoff, vms_seen,
                                             "power off of vm", iterations,
                                             dry_run, power_off, unregister,
                                             delete, vm, dc, content, filename)
                            # if already powered off the planned action is to unregister the vm
                            else:
                                vmxmarked[path] = True
                                now_or_later(vm.config.instanceUuid,
                                             vms_to_be_unregistered, vms_seen,
                                             "unregister of vm", iterations,
                                             dry_run, power_off, unregister,
                                             delete, vm, dc, content, filename)
                        # this should not happen
                        elif (vm.config.hardware.memoryMB == 128
                              and vm.config.hardware.numCPU == 1
                              and power_state == 'poweredOff' and not is_vvol
                              and has_no_nic):
                            log.warn(
                                "- PLEASE CHECK MANUALLY: possible orphan shadow vm on eph storage - %s",
                                path)
                        # this neither
                        else:
                            log.warn(
                                "- PLEASE CHECK MANUALLY: this vm seems to be neither a former openstack vm nor an orphan shadow vm - %s",
                                path)

                    # there is no vm anymore for the file path - planned action is to delete the file
                    elif not vmxmarked.get(path, False):
                        vmxmarked[path] = True
                        if path.lower().startswith("[eph"):
                            if path.endswith(".renamed_by_vcenter_nanny/"):
                                # if already renamed finally delete
                                now_or_later(str(path), files_to_be_deleted,
                                             files_seen, "delete of ds path",
                                             iterations, dry_run, power_off,
                                             unregister, delete, vm, dc,
                                             content, filename)
                            else:
                                # first rename the file before deleting them later
                                now_or_later(str(path), files_to_be_renamed,
                                             files_seen, "rename of ds path",
                                             iterations, dry_run, power_off,
                                             unregister, delete, vm, dc,
                                             content, filename)
                        else:
                            # vvol storage
                            # for vvols we have to mark based on the full path, as we work on them file by file
                            # and not on a directory base
                            vvolmarked[fullpath] = True
                            if fullpath.endswith(".renamed_by_vcenter_nanny/"):
                                now_or_later(str(fullpath),
                                             files_to_be_deleted, files_seen,
                                             "delete of ds path", iterations,
                                             dry_run, power_off, unregister,
                                             delete, vm, dc, content, filename)
                            else:
                                now_or_later(str(fullpath),
                                             files_to_be_renamed, files_seen,
                                             "rename of ds path", iterations,
                                             dry_run, power_off, unregister,
                                             delete, vm, dc, content, filename)

                    if len(tasks) % 8 == 0:
                        WaitForTasks(tasks[-8:], si=service_instance)

                # in case of a vmdk or vmx.renamed_by_vcenter_nanny
                # eph storage case - we work on directories
                elif path.lower().startswith("[eph") and not vmxmarked.get(
                        path, False) and not vmdkmarked.get(path, False):
                    # mark to not redo it for other vmdks as we are working on the dir at once
                    vmdkmarked[path] = True
                    if path.endswith(".renamed_by_vcenter_nanny/"):
                        now_or_later(str(path), files_to_be_deleted,
                                     files_seen, "delete of ds path",
                                     iterations, dry_run, power_off,
                                     unregister, delete, None, dc, content,
                                     filename)
                    else:
                        now_or_later(str(path), files_to_be_renamed,
                                     files_seen, "rename of ds path",
                                     iterations, dry_run, power_off,
                                     unregister, delete, None, dc, content,
                                     filename)
                # vvol storage case - we work file by file as we can't rename or delete the vvol folders
                elif path.lower().startswith("[vvol") and not vvolmarked.get(
                        fullpath, False):
                    # vvol storage
                    if fullpath.endswith(".renamed_by_vcenter_nanny"):
                        now_or_later(str(fullpath), files_to_be_deleted,
                                     files_seen, "delete of ds path",
                                     iterations, dry_run, power_off,
                                     unregister, delete, None, dc, content,
                                     filename)
                    else:
                        now_or_later(str(fullpath), files_to_be_renamed,
                                     files_seen, "rename of ds path",
                                     iterations, dry_run, power_off,
                                     unregister, delete, None, dc, content,
                                     filename)

                if len(tasks) % 8 == 0:
                    WaitForTasks(tasks[-8:], si=service_instance)

    # reset the dict of vms or files we plan to do something with for all machines we did not see or which disappeared
    reset_to_be_dict(vms_to_be_suspended, vms_seen)
    reset_to_be_dict(vms_to_be_poweredoff, vms_seen)
    reset_to_be_dict(vms_to_be_unregistered, vms_seen)
    reset_to_be_dict(files_to_be_deleted, files_seen)
    reset_to_be_dict(files_to_be_renamed, files_seen)

    # wait the interval time
    time.sleep(60 * int(interval))
示例#15
0
        entity = entity_stack.pop()
        if entity.name == name:
            vms.append(entity)
            del entity_stack[0:len(entity_stack)]
        elif hasattr(entity, 'childEntity'):
            entity_stack.extend(entity.childEntity)
        elif isinstance(entity, vim.Datacenter):
            entity_stack.append(entity.vmFolder)


def get_snapshots_by_name_recursively(snapshots, snapname):
    snap_obj = []
    for snapshot in snapshots:
        if snapshot.name == snapname:
            snap_obj.append(snapshot)
        else:
            snap_obj = snap_obj + get_snapshots_by_name_recursively\
                       (snapshot.childSnapshotList, snapname)
    return snap_obj


for vm in vms:
    if vm.name in vm_names:
        snap_obj = get_snapshots_by_name_recursively\
                   (vm.snapshot.rootSnapshotList, snapshot_name)
        print("Reverting snapshot:", snap_obj[0].name, "for vm:", vm.name)
        task = [snap_obj[0].snapshot.RevertToSnapshot_Task()]
        WaitForTasks(task, connection)

Disconnect(connection)