Exemplo n.º 1
0
def add_disk(vm, disk_size):
    """
    :param vm: Virtual Machine Object
    :param disk_size: disk size, in GB
    """
    from pyVmomi import vim
    from pyVim import task
    spec = vim.vm.ConfigSpec()
    # get all disks on a VM, set unit_number to the next available
    unit_number = 0
    for dev in vm.config.hardware.device:
        if hasattr(dev.backing, 'fileName'):
            unit_number = int(dev.unitNumber) + 1
            # unit_number 7 reserved for scsi controller
            if unit_number == 7:
                unit_number += 1
            if unit_number >= 16:
                print "we don't support this many disks"
                return
        if isinstance(dev, vim.vm.device.VirtualSCSIController):
            controller = dev
    # add disk here
    dev_changes = []
    new_disk_kb = int(disk_size) * 1024 * 1024
    disk_spec = vim.vm.device.VirtualDeviceSpec()
    disk_spec.fileOperation = "create"
    disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
    disk_spec.device = vim.vm.device.VirtualDisk()
    disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
    disk_spec.device.backing.thinProvisioned = True
    disk_spec.device.backing.diskMode = 'persistent'
    disk_spec.device.unitNumber = unit_number
    disk_spec.device.capacityInKB = new_disk_kb
    disk_spec.device.controllerKey = controller.key
    dev_changes.append(disk_spec)
    spec.deviceChange = dev_changes
    TASK = vm.ReconfigVM_Task(spec=spec)
    task.WaitForTask(TASK)
    print "%sGB disk added to %s" % (disk_size, vm.config.name)
Exemplo n.º 2
0
 def check_relocate(self, vm_mor, relocate_spec, test_type=None):
     """
     放置检查.
     vm_mor: vim.VirtualMachine
     relocate_spec: vim.vm.RelocateSpec
     test_type: The set of tests to run. If this argument is not set, all
                tests will be run. possible values: hostTests,
                resourcePoolTests, datastoreTests, sourceTests, networkTests
     """
     check_result = CheckResult()
     try:
         task_mor = self.vmProvisioningChecker.CheckRelocate(
             vm_mor, relocate_spec, test_type)
         task_state = task.WaitForTask(task_mor)
         if task_state == 'success':
             for result in task_mor.info.result:
                 if result.warning:
                     check_result.status = 'warning'
                     check_result.msg = result.warning[0].msg
                 elif result.error:
                     check_result.status = 'error'
                     check_result.msg = result.error[0].msg
                 else:
                     check_result.status = 'success'
                     check_result.msg = "兼容性检查成功"
         else:
             check_result.status = 'error'
             check_result.msg = "兼容性检查失败"
     except vim.fault.InvalidState as ex:
         check_result.status = 'error'
         check_result.msg = "InvalidState: %s" % str(ex)
     except vmodl.fault.NotSupported as ex:
         check_result.status = 'error'
         check_result.msg = "NotSupported: %s" % str(ex)
     except vmodl.fault.InvalidArgument as ex:
         check_result.status = 'error'
         check_result.msg = "InvalidArgument: %s" % str(ex)
     return check_result
Exemplo n.º 3
0
    def run(self):
        vmsProcessed = 0
        startIndex = self.startIndex
        while (vmsProcessed < self.numVms):
            vmsToProcess = self.batchSize
            if (vmsProcessed + vmsToProcess > self.numVms):
                vmsToProcess = self.numVms - vmsProcessed

            try:
                startTime = int(time.time() * 1000)

                t1 = self.dc.PowerOnVm(self.vms[startIndex:startIndex +
                                                vmsToProcess])
                task.WaitForTask(t1)
                result = t1.info.result
                tasks = []
                for t in result.attempted:
                    tasks.append(t.task)
                if tasks:
                    task.WaitForTasks(tasks, raiseOnError=False)

                endTime = int(time.time() * 1000)
                print("PowerOn for vms(%d-%d) took %d milliseconds" %
                      (startIndex, startIndex + vmsToProcess - 1,
                       endTime - startTime))
                self.result.append(
                    (startIndex, vmsToProcess, endTime - startTime))

            except Exception as e:
                print(
                    "PowerOn failed for vms (%d-%d)test due to exception: %s" %
                    (startIndex, startIndex + vmsToProcess - 1, e))
                self.result.append((startIndex, vmsToProcess, -1))

            vmsProcessed += vmsToProcess
            startIndex += vmsToProcess
Exemplo n.º 4
0
def Test2(si):  # verify unsetting masks
    success = 1
    keyValue = {"cpuid.vendor": "Max:2", "cpuid.family": "Max:5"}

    evcTask = host.ApplyEvcMode(EVCMode(None, []), False)
    task.WaitForTask(evcTask)
    if useVc:
        time.sleep(15)  # any way to wait for host sync?

    hostCaps = filter(lambda x: x.key in keyValue.keys(),
                      host.config.featureCapability)
    maskedCaps = filter(lambda x: x.key in keyValue.keys(),
                        host.config.maskedFeatureCapability)

    for maskCap in maskedCaps:
        for hostCap in hostCaps:
            if hostCap.key == maskCap.key and hostCap.value != maskCap.value:
                success = 0
                Log("Host Capability %s is still masked (Value: %s, Expected %s)"
                    % (hostCap.key, maskCap.value, hostCap.value))

    if not success:
        raise Exception("Test2 failed")
    Log("Test2 PASSED")
Exemplo n.º 5
0
def CreateVM(vmFolder, config, resPool, host):
    createTask = vmFolder.CreateVm(config, resPool, host)
    task.WaitForTask(createTask)
    return createTask.info.result
Exemplo n.º 6
0
def Test13(si):  # if intel, do a real evc mode test
    success = 1
    isIntel = 0
    for cap in host.config.featureCapability:
        if cap.key == "cpuid.Intel" and cap.value == "1":
            isIntel = 1
            break

    if isIntel == 0:
        Log("Test13 Host is not Intel, skipping test")
        return

    featureMap = {
        'cpuid.STEPPING': 'Val:0xa',
        'cpuid.Intel': 'Val:1',
        'cpuid.MODEL': 'Val:0x17',
        'cpuid.XSAVE': 'Val:1',
        'cpuid.LM': 'Val:1',
        'cpuid.NUM_EXT_LEVELS': 'Val:0x80000008',
        'cpuid.MWAIT': 'Val:1',
        'cpuid.FAMILY': 'Val:6',
        'cpuid.SSSE3': 'Val:1',
        'cpuid.SSE3': 'Val:1',
        'cpuid.NX': 'Val:1',
        'cpuid.SSE41': 'Val:1',
        'cpuid.SS': 'Val:1',
        'cpuid.DS': 'Val:1',
        'cpuid.LAHF64': 'Val:1',
        'cpuid.CMPXCHG16B': 'Val:1',
        'cpuid.NUMLEVELS': 'Val:0xd',
        'cpuid.VMX': 'Val:1',
    }
    mockups = []
    for k, v in featureMap.items():
        mockups.append(vim.host.FeatureMask(key=k, featureName=k, value=v))
    task.WaitForTask(host.ApplyMockupFeatures(mockups))

    # now testEvcMode

    evcReqMap = {
        'cpuid.Intel': 'Bool:Min:1',
        'cpuid.XSAVE': 'Bool:Min:1',
        'cpuid.LM': 'Bool:Min:1',
        'cpuid.MWAIT': 'Bool:Min:1',
        'cpuid.SSSE3': 'Bool:Min:1',
        'cpuid.SSE3': 'Bool:Min:1',
        'cpuid.NX': 'Bool:Min:1',
        'cpuid.SSE41': 'Bool:Min:1',
        'cpuid.SS': 'Bool:Min:1',
        'cpuid.DS': 'Bool:Min:1',
        'cpuid.LAHF64': 'Bool:Min:1',
        'cpuid.CMPXCHG16B': 'Bool:Min:1',
    }
    evcMaskMap = {
        'cpuid.STEPPING': 'Val:1',
        'cpuid.Intel': 'Val:1',
        'cpuid.MODEL': 'Val:0xf',
        'cpuid.XSAVE': 'Val:1',
        'cpuid.LM': 'Val:1',
        'cpuid.NUM_EXT_LEVELS': 'Val:0x80000008',
        'cpuid.MWAIT': 'Val:1',
        'cpuid.FAMILY': 'Val:6',
        'cpuid.SSSE3': 'Val:1',
        'cpuid.SSE3': 'Val:1',
        'cpuid.NX': 'Val:1',
        'cpuid.SSE41': 'Val:1',
        'cpuid.SS': 'Val:1',
        'cpuid.DS': 'Val:1',
        'cpuid.LAHF64': 'Val:1',
        'cpuid.CMPXCHG16B': 'Val:1',
        'cpuid.NUMLEVELS': 'Val:0xa',
    }
    evcReqs = []
    evcMasks = []

    for k, v in evcReqMap.items():
        evcReqs.append(vim.vm.FeatureRequirement(key=k, featureName=k,
                                                 value=v))
    for k, v in evcMaskMap.items():
        evcMasks.append(vim.host.FeatureMask(key=k, featureName=k, value=v))

    evcTask = host.TestEvcMode(EVCMode(evcReqs, evcMasks))
    task.WaitForTask(evcTask)
    Log("%s" % evcTask.info.result)

    task.WaitForTask(host.ApplyMockupFeatures([]))

    if not success:
        raise Exception("Test13 failed")
    Log("Test13 PASSED")
Exemplo n.º 7
0
def Test15(si, datastore, sim=False):
    success = 1

    vmName = "vmFeature-Test15"
    envBrowser = host.parent.environmentBrowser
    envBrowser = cluster.environmentBrowser
    resPool = cluster.resourcePool

    vmspec = CreateQuickDummySpec(vmName,
                                  1,
                                  vmxVersion='vmx-09',
                                  guest="rhel5_64Guest",
                                  datastoreName=datastore,
                                  envBrowser=envBrowser,
                                  ctlrType="lsilogic")
    vm = CreateVM(dc.vmFolder, vmspec, resPool, host)

    opts = []
    opts.append(
        vim.Option.OptionValue(key="featureCompat.enable", value="TRUE"))
    opts.append(
        vim.Option.OptionValue(key="answer.msg.cpuid.noVHVQuestion",
                               value="No"))
    spec = vim.Vm.ConfigSpec(extraConfig=opts)
    spec.nestedHVEnabled = True
    task.WaitForTask(vm.Reconfigure(spec), si=si)

    offlineReqs = vm.runtime.offlineFeatureRequirement
    if offlineReqs is None or len(offlineReqs) < 2:
        success = 0
        Log("%s" % offlineReqs)
        Log("Requirements not populated for 64-bit guest and nestedHV")

    featureHide = ['cpuid.VMX', 'cpuid.SVM']
    # disable hv.capable
    mask = []
    for feature in featureHide:
        mask.append(
            vim.host.FeatureMask(featureName=feature,
                                 key=feature,
                                 value='Max:0'))

    task.WaitForTask(ApplyMockupFeatures(host, mask))

    try:
        task.WaitForTask(vm.PowerOn())
    except Exception as e:
        Log("Hit exception as expected: %s" % e)
    else:
        success = 0
        Log("Power on succeeded even though hv is false")

    spec = vim.Vm.ConfigSpec()
    spec.nestedHVEnabled = False
    task.WaitForTask(vm.Reconfigure(spec), si=si)

    offlineReqs = vm.runtime.offlineFeatureRequirement
    if offlineReqs is None or len(offlineReqs) >= 2:
        success = 0
        Log("%s" % offlineReqs)
        Log("More requirements than expected")

    if vm.runtime.powerState != "poweredOff":
        try:
            task.WaitForTask(vm.PowerOff())
        except:
            pass

    task.WaitForTask(vm.Destroy())

    task.WaitForTask(host.ApplyMockupFeatures([]))

    if not success:
        raise Exception("Test15 failed")
    Log("Test15 PASSED")
Exemplo n.º 8
0
def powerOff_vm(vm):
    from pyVim import task
    Task = vm.PowerOffVM_Task()
    task.WaitForTask(Task)
Exemplo n.º 9
0
def Test8(si, datastore, sim=False):
    success = 1

    vmName = "vmFeature-Test8"
    envBrowser = host.parent.environmentBrowser
    envBrowser = cluster.environmentBrowser
    resPool = cluster.resourcePool
    vmspec = CreateQuickDummySpec(vmName,
                                  1,
                                  vmxVersion='vmx-09',
                                  datastoreName=datastore,
                                  envBrowser=envBrowser)
    vm = CreateVM(dc.vmFolder, vmspec, resPool, host)

    opts = []
    opts.append(
        vim.Option.OptionValue(key="featureCompat.enable", value="TRUE"))
    opts.append(
        vim.Option.OptionValue(key="answer.msg.checkpoint.resume.error",
                               value="Preserve"))
    opts.append(
        vim.Option.OptionValue(key="answer.msg.checkpoint.resume.softError",
                               value="Preserve"))
    spec = vim.Vm.ConfigSpec(extraConfig=opts)
    vmTask = vm.Reconfigure(spec)
    task.WaitForTask(vmTask, si=si)

    reqs = vm.runtime.featureRequirement
    if reqs is not None and len(reqs) > 0:
        Log("VM requirements populated unnecessarily")
        Log("%s" % reqs)
        success = 0

    filterobj = RetrieveFilterForRuntime(si, vm)
    updates = si.content.propertyCollector.WaitForUpdatesEx(
        None, vmodl.Query.PropertyCollector.WaitOptions(maxWaitSeconds=20))
    version = updates.version

    task.WaitForTask(vm.PowerOn())

    updates = si.content.propertyCollector.WaitForUpdatesEx(
        version, vmodl.Query.PropertyCollector.WaitOptions(maxWaitSeconds=20))

    if updates is None:
        Log("No updates for featureRequirements/featureMasks after poweron")
        success = 0
    else:
        version = updates.version

    reqs = vm.runtime.featureRequirement

    if reqs is None or len(reqs) == 0:
        Log("VM requirements not populated")
        success = 0

    keyValue = GetFeatureReqs(reqs, 1, 1)
    for k, v in keyValue.items():
        keyValue[k]['operation'] = "Max"
        keyValue[k]['value'] = int(v['value']) - 1

    mask = []
    for k, v in keyValue.items():
        mask.append(
            vim.host.FeatureMask(featureName=k,
                                 key=k,
                                 value="%s:%d" % (v['operation'], v['value'])))
    Log("%s" % mask)
    evcTask = host.TestEvcMode(EVCMode(None, mask))
    try:
        task.WaitForTask(evcTask)
    except (vim.fault.EVCAdmissionFailedCPUFeaturesForMode,
            vim.fault.EVCAdmissionFailedVmActive) as e:
        result = e
    else:
        result = evcTask.info.result
        if 'faults' not in result.__dict__.keys():
            result = None

    if result is None or len(result.faults) == 0:
        Log("TestEvcMode did not populate req for '%s'" %
            ''.join(keyValue.keys()))
        success = 0

    task.WaitForTask(ApplyEvcMode(host, EVCMode(None, mask), True))

    if len(vm.configIssue) == 0:
        success = 0
        Log("No ConfigIssues raised for VM %s" % vm.name)

    task.WaitForTask(vm.Suspend())

    try:
        task.WaitForTask(vm.PowerOn())
    except Exception as e:
        if vm.runtime.question is not None:
            Log("Questions for VM: %s" % vm.runtime.question)
            success = 0
        Log("Exception %s raised as expected" % e)
    else:
        time.sleep(5)
        if len(vm.configIssue) > 0:
            Log("ConfigIssue raised, but expected PowerOn failure")
            if sim == False:
                success = 0
        else:
            Log("No config issue, PowerOn was allowed by vmx")
            success = 0
            Log("Previous reqs were %s" % reqs)
            Log("Current reqs are %s" % vm.runtime.featureRequirement)

    time.sleep(5)  # XXX: shouldn't be necessary
    task.WaitForTask(vm.PowerOff())

    updates = si.content.propertyCollector.WaitForUpdatesEx(
        version, vmodl.Query.PropertyCollector.WaitOptions(maxWaitSeconds=20))

    if updates is not None:
        version = updates.version

    if len(vm.configIssue) > 0:
        success = 0
        Log("VM ConfigIssues remaining after power off")
        if len(vm.runtime.featureRequirement) > 0:
            Log("%s" % vm.runtime.featureRequirement)
        Log("%s" % vm.configIssue)

    filterobj.Destroy()
    try:
        task.WaitForTask(vm.PowerOff())
    except:
        pass
    #   task.WaitForTask(vm.Destroy())

    task.WaitForTask(host.ApplyEvcMode(EVCMode(None, None), True))

    if not success:
        raise Exception("Test8 failed")
    Log("Test8 PASSED")
Exemplo n.º 10
0
    def run(self, snapshot, recursive=False):
        dataset = snapshot.get('dataset') or snapshot.get('id').split('@')[0]
        id = snapshot.get('id') or '{0}@{1}'.format(dataset,
                                                    snapshot.get('name'))
        vm_snapname = self.environment.get('vmware_snapshot_name')
        failed_snapshots = self.environment.get('vmware_failed_snapshots', [])

        if not vm_snapname:
            return

        logger.info('VM snapshot name is: {0}'.format(vm_snapname))

        for mapping in self.datastore.query_stream('vmware.datasets'):
            if recursive:
                if not re.search('^{0}(/|$)'.format(mapping['dataset']), dataset) and \
                   not re.search('^{0}(/|$)'.format(dataset), mapping['dataset']):
                    continue
            else:
                if mapping['dataset'] != dataset:
                    continue

            peer = self.dispatcher.call_sync('peer.query',
                                             [('id', '=', mapping['peer'])],
                                             {'single': True})
            if not peer:
                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    '<mapping {0}>'.format(mapping['name']),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    'Cannot find peer entry for mapping {0}'.format(
                        mapping['name'])
                })
                continue

            if any(
                    i.get('host') == q.get(peer, 'credentials.address')
                    for i in failed_snapshots):
                continue

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(
                    host=q.get(peer, 'credentials.address'),
                    user=q.get(peer, 'credentials.username'),
                    pwd=unpassword(q.get(peer, 'credentials.password')),
                    sslContext=ssl_context)
                content = si.RetrieveContent()
                vm_view = content.viewManager.CreateContainerView(
                    content.rootFolder, [vim.VirtualMachine], True)
            except BaseException as err:
                logger.warning(
                    'Connecting to VMware instance at {0} failed: {1}'.format(
                        q.get(peer, 'credentials.address'), str(err)))

                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    q.get(peer, 'credentials.address'),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    getattr(err, 'msg', str(err))
                })

                continue

            for vm in vm_view.view:
                if not any(i.info.name == mapping['datastore']
                           for i in vm.datastore):
                    continue

                if not vm.snapshot:
                    continue

                snapshot = find_snapshot(vm.snapshot.rootSnapshotList,
                                         vm_snapname)
                if not snapshot:
                    continue

                logger.info(
                    'Removing snapshot of VM {0} (datastore {1})'.format(
                        vm.summary.config.name, mapping['datastore']))

                try:
                    task.WaitForTask(snapshot.RemoveSnapshot_Task(True))
                except vmodl.MethodFault as err:
                    logger.warning(
                        'Deleting snapshot of {0} failed: {1}'.format(
                            vm.summary.config.name, err.msg))
                    failed_snapshots.append({
                        'when': 'delete',
                        'vm': vm.summary.config.name,
                        'datastore': mapping['datastore'],
                        'error': err.msg
                    })

            connect.Disconnect(si)

        if failed_snapshots:
            descr = Template(ALERT_TEMPLATE).render(
                id=id, failed_snapshots=failed_snapshots)
            self.dispatcher.call_sync(
                'alert.emit', {
                    'clazz': 'VMwareSnapshotFailed',
                    'target': dataset,
                    'title':
                    'Failed to create or remove snapshot of one or more VMware virtual machines',
                    'description': descr
                })
Exemplo n.º 11
0
   def TestDisconnectedHost(self):
      self.banner(self.TestDisconnectedHost)
      VerboseLog(logTrivia, self._host)

      scId = self._sc
      spec = Vim.Host.DatastoreSystem.VvolDatastoreSpec()
      spec.SetScId(scId)
      spec.SetName("vvol-test-ds:%s" % random.randint(1,1000))

      ret=True
      try:
         self.StopVpxa()

         try:
            VerboseLog(logInfo, "{Testing simple create")
            ds = self.CreateDs(spec)
         except vmodl.fault.HostNotConnected:
            pass
         except:
            raise
         finally:
            VerboseLog(logInfo, "}")

         try:
            VerboseLog(logInfo, "{Testing bulk create")
            create_task = self._vasaMgr.CreateVVolDatastore(spec, self._hosts)
            task.WaitForTask(create_task)
            VerboseLog(logVerbose, create_task.info.result)
            for result in create_task.info.result :
               if result.result == 'fail':
                  hostid = self._host.__class__.__name__ + ":" + self._host._moId
                  if result.hostKey == hostid:
                     if isinstance(result.fault, vmodl.fault.HostNotConnected) == False:
                        VerboseLog(logInfo, "failed for host " + result.hostKey)
                        raise Exception("unexpected exception")
                  else:
                     raise Exception("unexpected failure")
         finally:
            VerboseLog(logInfo, "}")

         self.StartVpxa()

         ds = self.CreateDs(spec)

         self.StopVpxa()

         try:
            VerboseLog(logInfo, "{Testing bulk remove")
            delete_task = self._vasaMgr.RemoveVVolDatastore(ds, self._hosts)
            task.WaitForTask(delete_task)
            VerboseLog(logVerbose, delete_task.info.result)
            for result in delete_task.info.result :
               if result.result == 'fail':
                  hostid = self._host.__class__.__name__ + ":" + self._host._moId
                  if result.hostKey == hostid:
                     if isinstance(result.fault, vmodl.fault.HostNotConnected) == False:
                        VerboseLog(logInfo, "failed for host " + result.hostKey)
                        raise Exception("unexpected exception")
                  else:
                     raise Exception("unexpected failure")
         finally:
            VerboseLog(logInfo, "}")

         self.StartVpxa()
         self.removeDs(ds)
      except:
         VerboseLog(logInfo, traceback.format_exc())
         ret=False
      VerboseLog(logInfo, "passed" if ret else "failed");
Exemplo n.º 12
0
def remove_host(host_obj):
    from pyVim import task
    disconnect_host(host_obj)
    TASK = host_obj.Destroy_Task()
    task.WaitForTask(TASK)
Exemplo n.º 13
0
def exit_maintenance_mode(host, timeout=60):
    from pyVim import task
    TASK = host.ExitMaintenanceMode(timeout=timeout)
    task.WaitForTask(TASK)
    host_inf = TASK.info.result
    return host_inf
Exemplo n.º 14
0
def disconnect_host(host_obj):
    from pyVim import task
    Task = host_obj.DisconnectHost_Task()
    task.WaitForTask(Task)
Exemplo n.º 15
0
def remove_cluster(cluster):
    from pyVim import task
    TASK = cluster.Destroy_Task()
    task.WaitForTask(TASK)
Exemplo n.º 16
0
     content.rootFolder, [vim.VirtualMachine], True)
 for vm in vm_view.view:
     if vm.summary.runtime.powerState != 'poweredOn':
         continue
     if doesVMDependOnDataStore(vm, vmsnapobj.datastore):
         try:
             if canSnapshotVM(vm):
                 if doesVMSnapshotByNameExists(vm,
                                               vmsnapname) is False:
                     # have we already created a snapshot of the VM for this volume
                     # iteration? can happen if the VM uses two datasets (a and b)
                     # where both datasets are mapped to the same ZFS volume in FreeNAS.
                     VimTask.WaitForTask(
                         vm.CreateSnapshot_Task(
                             name=vmsnapname,
                             description=vmsnapdescription,
                             memory=False,
                             quiesce=False,
                         ))
                 else:
                     log.debug(
                         "Not creating snapshot %s for VM %s because it "
                         "already exists", vmsnapname, vm)
             else:
                 # TODO:
                 # we can try to shutdown the VM, if the user provided us an ok to do
                 # so (might need a new list property in obj to know which VMs are
                 # fine to shutdown and a UI to specify such exceptions)
                 # otherwise can skip VM snap and then make a crash-consistent zfs
                 # snapshot for this VM
                 log.log(
Exemplo n.º 17
0
    def snapshot_begin(self, dataset, recursive):
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'vmware')

        # If there's a VMWare Plugin object for this filesystem
        # snapshot the VMs before taking the ZFS snapshot.
        # Once we've taken the ZFS snapshot we're going to log back in
        # to VMWare and destroy all the VMWare snapshots we created.
        # We do this because having VMWare snapshots in existence impacts
        # the performance of your VMs.
        qs = self._dataset_get_vms(dataset, recursive)

        # Generate a unique snapshot name that won't collide with anything that exists on the VMWare side.
        vmsnapname = str(uuid.uuid4())

        # Generate a helpful description that is visible on the VMWare side.  Since we
        # are going to be creating VMWare snaps, if one gets left dangling this will
        # help determine where it came from.
        vmsnapdescription = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} TrueNAS Created Snapshot"

        # We keep track of snapshots per VMWare "task" because we are going to iterate
        # over all the VMWare tasks for a given ZFS filesystem, do all the VMWare snapshotting
        # then take the ZFS snapshot, then iterate again over all the VMWare "tasks" and undo
        # all the snaps we created in the first place.
        vmsnapobjs = []
        for vmsnapobj in qs:
            # Data structures that will be used to keep track of VMs that are snapped,
            # as wel as VMs we tried to snap and failed, and VMs we realized we couldn't
            # snapshot.
            snapvms = []
            snapvmfails = []
            snapvmskips = []

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(host=vmsnapobj["hostname"],
                                          user=vmsnapobj["username"],
                                          pwd=vmsnapobj["password"],
                                          sslContext=ssl_context)
                content = si.RetrieveContent()
            except Exception as e:
                self.logger.warn("VMware login to %s failed",
                                 vmsnapobj["hostname"],
                                 exc_info=True)
                self._alert_vmware_login_failed(vmsnapobj, e)
                continue

            # There's no point to even consider VMs that are paused or powered off.
            vm_view = content.viewManager.CreateContainerView(
                content.rootFolder, [vim.VirtualMachine], True)
            for vm in vm_view.view:
                if vm.summary.runtime.powerState != "poweredOn":
                    continue

                if self._doesVMDependOnDataStore(vm, vmsnapobj["datastore"]):
                    try:
                        if self._canSnapshotVM(vm):
                            if not self._findVMSnapshotByName(vm, vmsnapname):
                                # have we already created a snapshot of the VM for this volume
                                # iteration? can happen if the VM uses two datasets (a and b)
                                # where both datasets are mapped to the same ZFS volume in TrueNAS.
                                VimTask.WaitForTask(
                                    vm.CreateSnapshot_Task(
                                        name=vmsnapname,
                                        description=vmsnapdescription,
                                        memory=False,
                                        quiesce=True,
                                    ))
                            else:
                                self.logger.debug(
                                    "Not creating snapshot %s for VM %s because it "
                                    "already exists", vmsnapname, vm)
                        else:
                            # TODO:
                            # we can try to shutdown the VM, if the user provided us an ok to do
                            # so (might need a new list property in obj to know which VMs are
                            # fine to shutdown and a UI to specify such exceptions)
                            # otherwise can skip VM snap and then make a crash-consistent zfs
                            # snapshot for this VM
                            self.logger.info(
                                "Can't snapshot VM %s that depends on "
                                "datastore %s and filesystem %s. "
                                "Possibly using PT devices. Skipping.",
                                vm.name, vmsnapobj["datastore"], dataset)
                            snapvmskips.append(vm.config.uuid)
                    except Exception as e:
                        self.logger.warning("Snapshot of VM %s failed",
                                            vm.name,
                                            exc_info=True)
                        self.middleware.call_sync(
                            "alert.oneshot_create",
                            "VMWareSnapshotCreateFailed", {
                                "hostname": vmsnapobj["hostname"],
                                "vm": vm.name,
                                "snapshot": vmsnapname,
                                "error": self._vmware_exception_message(e),
                            })
                        snapvmfails.append([vm.config.uuid, vm.name])

                    snapvms.append(vm.config.uuid)

            connect.Disconnect(si)

            vmsnapobjs.append({
                "vmsnapobj": vmsnapobj,
                "snapvms": snapvms,
                "snapvmfails": snapvmfails,
                "snapvmskips": snapvmskips,
            })

        # At this point we've completed snapshotting VMs.

        if not vmsnapobjs:
            return None

        return {
            "vmsnapname":
            vmsnapname,
            "vmsnapobjs":
            vmsnapobjs,
            "vmsynced":
            vmsnapobjs and all(
                len(vmsnapobj["snapvms"]) > 0
                and len(vmsnapobj["snapvmfails"]) == 0
                for vmsnapobj in vmsnapobjs)
        }
Exemplo n.º 18
0
Arquivo: vcFT.py Projeto: free-Zen/pvc
def main():
    supportedArgs = [
        (["H:", "hosts="], "", "List of hosts (comma separated)", "hosts"),
        (["D:", "dcName="], "Datacenter", "datacenter name", "dcName"),
        (["d:", "dsName="], "storage1", "shared datastore name", "dsName"),
        (["dsMount="], "", "server:path of datastore to mount", "dsMount"),
        (["k:", "keep="], "0", "Keep configs", "keep"),
        (["l:",
          "leaveRunning="], False, "Leave FT VMs running", "leaveRunning"),
        (["L:", "leaveRegistered="], False,
         "Leave FT VMs configured but not powered on", "leaveRegistered"),
        (["e:", "useExistingVm="], False, "Use existing VM", "useExistingVm"),
        (["r:", "checkRRState="], "True", "Validate Record/Replay states",
         "checkRRState"), (["V:", "vc="], "", "VC Server name", "vc"),
        (["u:", "user="******"root", "User name", "user"),
        (["p:", "pwd="], "vmware", "Password", "pwd"),
        (["v:", "vmname="], "vmFT", "Name of the virtual machine", "vmname")
    ]

    supportedToggles = [(["usage",
                          "help"], False, "Show usage information", "usage")]

    args = arguments.Arguments(sys.argv, supportedArgs, supportedToggles)
    if args.GetKeyValue("usage") == True:
        args.Usage()
        sys.exit(0)

    # Process command line
    primaryName = args.GetKeyValue("vmname")
    keep = int(args.GetKeyValue("keep"))
    vc = args.GetKeyValue("vc")
    leaveRegistered = bool(args.GetKeyValue("leaveRegistered"))
    leaveRunning = bool(args.GetKeyValue("leaveRunning"))
    useExistingVm = bool(args.GetKeyValue("useExistingVm"))
    global checkRRState
    checkRRState = eval(args.GetKeyValue("checkRRState"))
    dsName = args.GetKeyValue("dsName")
    dsMount = args.GetKeyValue("dsMount")
    dcName = args.GetKeyValue("dcName")
    hostList = args.GetKeyValue("hosts")
    hosts = re.split('\s*,\s*', hostList)
    hostSystems = []

    # Connect to VC server
    global si
    Log("Connecting to %s" % vc)
    si = connect.Connect(host=vc,
                         user=args.GetKeyValue("user"),
                         pwd=args.GetKeyValue("pwd"),
                         version=newestVersions.get('vim'))
    if si is None:
        raise Exception("Failed to connect to VC")
    connect.SetSi(si)
    Log("Connected to VC Server")

    dc = CreateOrUseDC(si, dcName)
    cluster = CreateOrUseCluster(dc, "HA")

    for host in hosts:
        hostSystem = GetHostByName(si, host)
        if hostSystem is not None and hostSystem.runtime.connectionState != "connected":
            task.WaitForTask(hostSystem.Destroy())
            hostSystem = None
        if hostSystem == None:
            Log("Failed to find %s" % host)
            hostSystem = AddHostToCluster(si, cluster, host)
        hostSystems.append(hostSystem)
        if len(FindNicType(si, hostSystem, ftLoggingNicType)) == 0:
            SelectVnic(si, hostSystem, "vmk0", ftLoggingNicType)
        if len(FindNicType(si, hostSystem, vmotionNicType)) == 0:
            SelectVnic(si, hostSystem, "vmk0", vmotionNicType)
        ds = FindDS(hostSystem, dsName)
        if ds is None and dsMount:
            MountNas(hostSystem, dsName, dsMount, accessMode="readWrite")

    secondaryName = primaryName
    orphanedSecondaryName = primaryName + "_orphaned"

    global primaryVm
    global secondaryVm

    primaryVm = None
    si = None
    try:
        CleanupVm(primaryName)

        # Create new VM
        primaryVm = None
        if useExistingVm:
            primaryVm = folder.Find(primaryName)
            if primaryVm == None:
                raise Exception("No primary VM with name " + primaryName +
                                " found!")
            Log("Using primary VM " + primaryName)
        else:
            Log("Creating primary VM " + primaryName)
            # Short delay to avoid colliding with a cleanup.
            time.sleep(5)
            primaryVm = vm.CreateQuickDummy(primaryName,
                                            guest="winXPProGuest",
                                            cdrom=0,
                                            numScsiDisks=2,
                                            scrubDisks=False,
                                            datastoreName=dsName,
                                            vmxVersion="vmx-09",
                                            dc=dcName)
            spec = vim.vm.ConfigSpec(numCPUs=2)
            task.WaitForTask(primaryVm.Reconfigure(spec))

        # Get details about primary VM
        primaryUuid = primaryVm.GetConfig().GetInstanceUuid()
        primaryCfgPath = primaryVm.GetConfig().GetFiles().GetVmPathName()
        primaryDir = primaryCfgPath[:primaryCfgPath.rfind("/")]
        Log("Using VM : " + primaryVm.GetName() + " with instanceUuid " +
            primaryUuid)
        CheckFTState(primaryVm, FTState.notConfigured)

        # Create secondary VM
        Log("Creating secondary VM " + secondaryName)
        HandleAction("on")

        if secondaryVm == None:
            raise "Secondary VM creation failed"

        ##  Configure some additional config variables needed for FT
        ##  This should eventually be done automatically at FT Vmotion time
        Log("Setting up extra config settings for the primary VM...")
        extraCfgs = primaryVm.GetConfig().GetExtraConfig()
        #      AddExtraConfig(extraCfgs, "replay.allowBTOnly", "TRUE")
        #      AddExtraConfig(extraCfgs, "replay.allowFT", "TRUE")
        cSpec = vim.Vm.ConfigSpec()
        cSpec.SetExtraConfig(extraCfgs)
        task.WaitForTask(primaryVm.Reconfigure(cSpec))

        Log("FT configured successfully.")

        # Test snapshot
        #SnapshotTests(primaryVm)

        Log("PowerOn")
        task.WaitForTask(primaryVm.PowerOn())

        WaitForRunning()
        time.sleep(5)

        # We are now in a good known state, start random testing
        while True:
            action = random.choice(ACTION_LIST)
            revAction = REV_ACTION[action]
            HandleActions(action, revAction)

    except Exception as e:
        Log("Caught exception : " + str(e))
        traceback.print_exc()
        global status
        status = "FAIL"
    finally:
        connect.Disconnect(si)
Exemplo n.º 19
0
    def run(self, snapshot, recursive=False):
        # Find the matching datastore mappings
        dataset = snapshot.get('dataset') or snapshot.get('id').split('@')[0]
        vm_snapname = 'FreeNAS-{0}'.format(str(uuid.uuid4()))
        vm_snapdescr = '{0} (Created by FreeNAS)'.format(datetime.utcnow())
        failed_snapshots = []

        # Save the snapshot name in parent task environment to the delete counterpart can find it
        self.dispatcher.task_setenv(self.environment['parent'],
                                    'vmware_snapshot_name', vm_snapname)

        for mapping in self.datastore.query_stream('vmware.datasets'):
            if recursive:
                if not re.search('^{0}(/|$)'.format(mapping['dataset']), dataset) and \
                   not re.search('^{0}(/|$)'.format(dataset), mapping['dataset']):
                    continue
            else:
                if mapping['dataset'] != dataset:
                    continue

            peer = self.dispatcher.call_sync('peer.query',
                                             [('id', '=', mapping['peer'])],
                                             {'single': True})
            if not peer:
                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    '<mapping {0}>'.format(mapping['name']),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    'Cannot find peer entry for mapping {0}'.format(
                        mapping['name'])
                })
                continue

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(
                    host=q.get(peer, 'credentials.address'),
                    user=q.get(peer, 'credentials.username'),
                    pwd=unpassword(q.get(peer, 'credentials.password')),
                    sslContext=ssl_context)
                content = si.RetrieveContent()
                vm_view = content.viewManager.CreateContainerView(
                    content.rootFolder, [vim.VirtualMachine], True)
            except BaseException as err:
                logger.warning(
                    'Connecting to VMware instance at {0} failed: {1}'.format(
                        q.get(peer, 'credentials.address'), str(err)))

                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    q.get(peer, 'credentials.address'),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    getattr(err, 'msg') or str(err)
                })

                continue

            for vm in vm_view.view:
                if mapping[
                        'vm_filter_op'] == 'INCLUDE' and vm.summary.config.name not in mapping[
                            'vm_filter_entries']:
                    continue

                if mapping[
                        'vm_filter_op'] == 'EXCLUDE' and vm.summary.config.name in mapping[
                            'vm_filter_entries']:
                    continue

                if not any(i.info.name == mapping['datastore']
                           for i in vm.datastore):
                    continue

                if vm.snapshot and find_snapshot(vm.snapshot.rootSnapshotList,
                                                 vm_snapname):
                    continue

                logger.info(
                    'Creating snapshot of VM {0} (datastore {1})'.format(
                        vm.summary.config.name, mapping['datastore']))

                try:
                    task.WaitForTask(
                        vm.CreateSnapshot_Task(name=vm_snapname,
                                               description=vm_snapdescr,
                                               memory=False,
                                               quiesce=False))
                except vmodl.MethodFault as err:
                    logger.warning(
                        'Creating snapshot of {0} failed: {1}'.format(
                            vm.summary.config.name, err.msg))
                    failed_snapshots.append({
                        'when': 'create',
                        'vm': vm.summary.config.name,
                        'datastore': mapping['datastore'],
                        'error': err.msg
                    })

            connect.Disconnect(si)

        self.dispatcher.task_setenv(self.environment['parent'],
                                    'vmware_failed_snapshots',
                                    failed_snapshots)
Exemplo n.º 20
0
def test(si, delta, backingType, vmxVersion, ds):
    suffix = ''.join(
        random.choice(string.letters + string.digits) for i in xrange(8))

    vm1Name = '-'.join(['LinkedParent', suffix])
    print('Creating %s VM on %s' % (vm1Name, ds))
    task.WaitForTasks(
        [vm1.Destroy() for vm1 in folder.GetVmAll() if vm1.name == vm1Name])
    vm1 = vm.CreateQuickDummy(vm1Name,
                              numScsiDisks=1,
                              datastoreName=ds,
                              diskSizeInMB=1,
                              vmxVersion=vmxVersion,
                              backingType=backingType)
    vm1DirName = vm1.config.files.snapshotDirectory

    print('Creating Snapshot S1 for %s' % vm1Name)
    vm.CreateSnapshot(vm1, 'S1', '', False, False)
    s1 = vm1.snapshot.currentSnapshot

    disks = vmconfig.CheckDevice(s1.config, vim.vm.Device.VirtualDisk)
    if len(disks) != 1:
        raise Exception('Failed to find parent disk from snapshot')

    parent = disks[0].backing

    vm2Name = '-'.join(['LinkedChild', suffix])
    print('Creating %s VM on %s' % (vm2Name, ds))
    task.WaitForTasks(
        [vm2.Destroy() for vm2 in folder.GetVmAll() if vm2.name == vm2Name])
    vm2 = vm.CreateQuickDummy(vm2Name, datastoreName=ds, vmxVersion=vmxVersion)
    vm2DirName = vm2.config.files.snapshotDirectory

    configSpec = vim.vm.ConfigSpec()
    configSpec = vmconfig.AddScsiCtlr(configSpec)
    configSpec = vmconfig.AddScsiDisk(configSpec,
                                      datastorename=ds,
                                      capacity=1024,
                                      backingType=backingType)
    child = configSpec.deviceChange[1].device.backing
    child.parent = parent
    child.deltaDiskFormat = delta

    # this edit is expected to fail
    configSpec = vmconfig.AddFloppy(
        configSpec,
        type="image",
        backingName=
        "[] /these/are/not/the/floppy/images/you/are/looking/for.flp")
    floppy = configSpec.deviceChange[2].device
    floppy.backing = None

    print('Reconfigure %s (1) adding a disk backed by snapshot of %s and (2) '
          'adding floppy backed by non-existent image. Expecting a failure' %
          (vm2Name, vm1Name))
    try:
        vm.Reconfigure(vm2, configSpec)
    except Exception as e:
        pass
    else:
        raise Exception(
            'Expected exception during %s reconfigure. But it succeeded instead'
            % vm2Name)

    print('Destroying %s' % vm2Name)
    vm.Destroy(vm2)
    print('Destroying %s' % vm1Name)
    vm.Destroy(vm1)

    hostSystem = host.GetHostSystem(si)
    datastoreBrowser = hostSystem.GetDatastoreBrowser()

    try:
        task.WaitForTask(datastoreBrowser.Search(vm1DirName))
    except vim.fault.FileNotFound:
        pass
    else:
        raise Exception(
            "Expected that '%s' will be gone but it still present" %
            vm1DirName)

    try:
        task.WaitForTask(datastoreBrowser.Search(vm2DirName))
    except vim.fault.FileNotFound:
        pass
    else:
        raise Exception(
            "Expected that '%s' will be gone but it still present" %
            vm2DirName)
Exemplo n.º 21
0
def destroy_vm(vm):
    from pyVim import task
    if vm.runtime.powerState == 'poweredOn':
        powerOff_vm(vm)
    TASK = vm.Destroy_Task()
    task.WaitForTask(TASK)
Exemplo n.º 22
0
Arquivo: vcFT.py Projeto: free-Zen/pvc
def HandleAction(action):
    global si
    global primaryVm
    global secondaryVm

    if action == "enable":
        Log("*** EnableSecondary ***")
        task.WaitForTask(primaryVm.EnableSecondary(secondaryVm))
        WaitForPowerState(primaryVm, si, 'poweredOn', nsec=120)
        WaitForPowerState(secondaryVm, si, 'poweredOn', nsec=120)
        WaitForFTState(secondaryVm, FTState.running)
        WaitForFTState(primaryVm, FTState.running)
        WaitForDasProtection(primaryVm, True)
    elif action == "disable":
        Log("*** DisableSecondary ***")
        task.WaitForTask(primaryVm.DisableSecondary(secondaryVm))
        WaitForPowerState(primaryVm, si, 'poweredOn', nsec=120)
        WaitForPowerState(secondaryVm, si, 'poweredOff', nsec=120)
        WaitForFTState(secondaryVm, FTState.disabled)
        WaitForFTState(primaryVm, FTState.disabled)
    elif action == "on":
        Log("*** TurnOn ***")
        vmTask = primaryVm.CreateSecondary()
        task.WaitForTask(vmTask)
        secondaryVm = vmTask.info.result.vm
        if vmTask.info.result.powerOnAttempted:
            WaitForPowerState(primaryVm, si, 'poweredOn', nsec=120)
            WaitForPowerState(secondaryVm, si, 'poweredOn', nsec=120)
            WaitForFTState(secondaryVm, FTState.running)
            WaitForFTState(primaryVm, FTState.running)
    elif action == "off":
        Log("*** TurnOff ***")
        task.WaitForTask(primaryVm.TurnOffFaultTolerance())
        WaitForFTState(primaryVm, FTState.notConfigured)
    elif action == "makePrimary" or action == "terminate":
        if action == "makePrimary":
            Log("*** MakePrimary ***")
            task.WaitForTask(primaryVm.MakePrimary(secondaryVm))
            WaitForRunning()
        else:
            Log("*** TerminateVM ***")
            task.WaitForTask(primaryVm.Terminate())
        Log("Waiting for secondary to go down")
        WaitForNotPowerState(secondaryVm, si, 'poweredOn', nsec=120)
        Log("Waiting for secondary to come up")
        WaitForPowerState(secondaryVm, si, 'poweredOn', nsec=900)

    elif action == "terminateSecondary":
        Log("*** TerminateSecondary ***")
        task.WaitForTask(primaryVm.TerminateFaultTolerantVM(secondaryVm))

        Log("Waiting for secondary to go down")
        WaitForNotPowerState(secondaryVm, si, 'poweredOn', nsec=120)
        Log("Waiting for secondary to come up")
        WaitForPowerState(secondaryVm, si, 'poweredOn', nsec=900)

        WaitForRunning()
    elif action == "migratePrimary":
        Log("*** Migrate Primary ***")
        task.WaitForTask(
            primaryVm.Migrate(
                priority=vim.VirtualMachine.MovePriority.highPriority))
    elif action == "migrateSecondary":
        Log("*** Migrate Secondary ***")
        task.WaitForTask(
            secondaryVm.Migrate(
                priority=vim.VirtualMachine.MovePriority.highPriority))
    else:
        Log("Unknown action: '%s'" % action)