Exemplo n.º 1
0
    def test_60_finalize(self, conn, pvim):
        auth = Authentication([{
            'id': 'vsp',
            'type': 'vSphere',
            'host': 'https://vspherehost',
            'username': '******',
            'password': '******'
        }])
        vsphere_cloud = self.get_vsphere_cloud()

        smatconn = MagicMock()
        conn.return_value = smatconn
        retcont = MagicMock()
        smatconn.RetrieveContent.return_value = retcont
        retcont.viewManager.CreateContainerView.side_effect = self.CreateContainerView
        pvim.VirtualMachine = vim.VirtualMachine

        pvim.TaskInfo.State.success = vim.TaskInfo.State.success
        pvim.Task = vim.Task
        property_collector = MagicMock()
        smatconn.content.propertyCollector = property_collector
        update = MagicMock()
        property_collector.WaitForUpdates.return_value = update
        fs = MagicMock()
        update.filterSet = [fs]
        obj1 = MagicMock()
        obj2 = MagicMock()
        fs.objectSet = [obj1, obj2]
        change = MagicMock()
        obj1.changeSet = [change]
        obj1.obj = vim.Task("PowerOffVM")
        obj2.changeSet = [change]
        obj2.obj = vim.Task("DestroyVM")
        change.name = "info.state"
        change.val = vim.TaskInfo.State.success
        inf = MagicMock()
        vm = VirtualMachine(inf, "vm-template", vsphere_cloud.cloud, "", "",
                            vsphere_cloud, 1)

        success, _ = vsphere_cloud.finalize(vm, True, auth)

        self.assertTrue(success, msg="ERROR: finalizing VM info.")
        self.assertNotIn("ERROR",
                         self.log.getvalue(),
                         msg="ERROR found in log: %s" % self.log.getvalue())
Exemplo n.º 2
0
    def configure_vsan(self):
        """
        Manage VSAN configuration

        """
        changed, result = False, None

        if self.check_vsan_config_diff():
            if not self.module.check_mode:
                vSanSpec = vim.vsan.ReconfigSpec(modify=True, )
                vSanSpec.vsanClusterConfig = vim.vsan.cluster.ConfigInfo(
                    enabled=self.enable_vsan)
                vSanSpec.vsanClusterConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo(
                    autoClaimStorage=self.params.get(
                        'vsan_auto_claim_storage'))
                if self.advanced_options is not None:
                    vSanSpec.extendedConfig = vim.vsan.VsanExtendedConfig()
                    if self.advanced_options[
                            'automatic_rebalance'] is not None:
                        vSanSpec.extendedConfig.proactiveRebalanceInfo = vim.vsan.ProactiveRebalanceInfo(
                            enabled=self.
                            advanced_options['automatic_rebalance'])
                    if self.advanced_options[
                            'disable_site_read_locality'] is not None:
                        vSanSpec.extendedConfig.disableSiteReadLocality = self.advanced_options[
                            'disable_site_read_locality']
                    if self.advanced_options[
                            'large_cluster_support'] is not None:
                        vSanSpec.extendedConfig.largeScaleClusterSupport = self.advanced_options[
                            'large_cluster_support']
                    if self.advanced_options[
                            'object_repair_timer'] is not None:
                        vSanSpec.extendedConfig.objectRepairTimer = self.advanced_options[
                            'object_repair_timer']
                    if self.advanced_options['thin_swap'] is not None:
                        vSanSpec.extendedConfig.enableCustomizedSwapObject = self.advanced_options[
                            'thin_swap']
                try:
                    task = self.vsanClusterConfigSystem.VsanClusterReconfig(
                        self.cluster, vSanSpec)
                    changed, result = wait_for_task(
                        vim.Task(task._moId, self.si._stub))
                except vmodl.RuntimeFault as runtime_fault:
                    self.module.fail_json(msg=to_native(runtime_fault.msg))
                except vmodl.MethodFault as method_fault:
                    self.module.fail_json(msg=to_native(method_fault.msg))
                except TaskError as task_e:
                    self.module.fail_json(msg=to_native(task_e))
                except Exception as generic_exc:
                    self.module.fail_json(msg="Failed to update cluster"
                                          " due to generic exception %s" %
                                          to_native(generic_exc))
            else:
                changed = True

        self.module.exit_json(changed=changed, result=result)
Exemplo n.º 3
0
    def CreateContainerView(self, rootFolder, type_list, flag):
        container = MagicMock()
        c = MagicMock()
        container.view = [c]
        if type_list[0] == vim.Datastore:
            c.name = "datastore"
        elif type_list[0] == vim.Network:
            c.name = "vsnet1"
            c.summary.ipPoolName = "ippool1"
            c2 = MagicMock()
            c2.name = "vsnet2"
            c2.summary.ipPoolName = "ippool2"
            container.view.append(c2)
        elif type_list[0] == vim.VirtualMachine:
            c.name = "vm-template"
            c.Clone.return_value = vim.Task("CreateVM")
            c.Suspend.return_value = vim.Task("SuspendVM")
            c.PowerOn.return_value = vim.Task("PowerOnVM")
            c.PowerOff.return_value = vim.Task("PowerOffVM")
            c.Destroy.return_value = vim.Task("DestroyVM")
            c.summary.runtime.powerState = self.vm_state
            c.runtime.powerState = self.vm_state
            nic1 = MagicMock()
            nic1.ipAddress = "10.0.0.1"
            nic2 = MagicMock()
            nic2.ipAddress = "8.8.8.8"
            c.guest.net = [nic1, nic2]
            dev1 = MagicMock()
            dev2 = vim.vm.device.VirtualSCSIController()
            c.config.hardware.device = [dev1, dev2]
            dev1.backing.fileName = ""
            dev1.unitNumber = 1
        else:
            raise Exception("Invalid type")

        return container
Exemplo n.º 4
0
    print (vm_obj.name)

    # Create task
    task = content.taskManager.CreateTask(vm_obj, "PrimaryIO Migrate Back",
            "administrator", True, None)

    for t in content.taskManager.recentTask:
        print(">>>>> %s" %t)
        print(">>>>> %s" % task.task._moId)
        if t._moId == task.task._moId:
            tk = t
            break

    print("tk = %s" % tk)
    pdb.set_trace()
    tk.SetState(vim.TaskInfo.State.running)

    for tsk in content.taskManager.recentTask:
        try:
            tsk.SetTaskState(vim.TaskInfo.State.running)
        except:
            continue

    import time; time.sleep(2)
    new_task = vim.Task(tk._moId, stub=vm_obj._stub)

    print("new_task = %s" % new_task)
    new_task.UpdateProgress(10)
    import time; time.sleep(2)
    tk.SetState(vim.TaskInfo.State.success)
Exemplo n.º 5
0
def ConvertVsanTaskToVcTask(vsanTask, vcStub):
    vcTask = vim.Task(vsanTask._moId, vcStub)
    return vcTask
Exemplo n.º 6
0
    def test_20_launch(self, save_data, conn, pvim):
        radl_data = """
            network net1 (outbound = 'yes' and outports = '8080,9000:9100')
            network net2 ()
            system test (
            cpu.arch='x86_64' and
            cpu.count>=1 and
            memory.size>=512m and
            net_interface.0.connection = 'net1' and
            net_interface.0.dns_name = 'test' and
            net_interface.1.connection = 'net2' and
            net_interface.1.ip = '10.0.0.2' and
            disk.0.os.name = 'linux' and
            disk.0.image.url = 'vsp://vspherehost/vm-template' and
            disk.0.os.credentials.username = '******' and
            disk.1.size=1GB and
            disk.1.device='hdb' and
            disk.1.mount_path='/mnt/path'
            )"""
        radl = radl_parse.parse_radl(radl_data)
        radl.check()

        auth = Authentication([{
            'id': 'vsp',
            'type': 'vSphere',
            'host': 'https://vspherehost',
            'username': '******',
            'password': '******'
        }])
        vsphere_cloud = self.get_vsphere_cloud()

        smatconn = MagicMock()
        conn.return_value = smatconn
        retcont = MagicMock()
        smatconn.RetrieveContent.return_value = retcont
        datacenter = MagicMock()
        retcont.rootFolder.childEntity = [datacenter]
        host = MagicMock()
        datacenter.hostFolder.childEntity = [host]
        retcont.viewManager.CreateContainerView.side_effect = self.CreateContainerView

        pvim.Datastore = vim.Datastore
        pvim.Network = vim.Network
        pvim.VirtualMachine = vim.VirtualMachine
        pvim.Task = vim.Task
        pvim.TaskInfo.State.success = vim.TaskInfo.State.success
        poolmgr = MagicMock()
        pvim.IpPoolManager.return_value = poolmgr
        ippool1 = MagicMock()
        ippool2 = MagicMock()
        poolmgr.QueryIpPools.return_value = [ippool1, ippool2]
        ippool1.name = "ippool1"
        ippool1.ipv4Config.subnetAddress = "10.0.0.1"
        ippool1.ipv4Config.netmask = "255.0.0.0"
        ippool2.name = "ippool2"
        ippool2.ipv4Config.subnetAddress = "8.8.8.8"
        ippool2.ipv4Config.netmask = "255.255.255.0"

        property_collector = MagicMock()
        smatconn.content.propertyCollector = property_collector
        update = MagicMock()
        property_collector.WaitForUpdates.return_value = update
        fs = MagicMock()
        update.filterSet = [fs]
        objs = MagicMock()
        fs.objectSet = [objs]
        change = MagicMock()
        objs.changeSet = [change]
        objs.obj = vim.Task("CreateVM")
        change.name = "info.state"
        change.val = vim.TaskInfo.State.success

        res = vsphere_cloud.launch_with_retry(InfrastructureInfo(), radl, radl,
                                              3, auth, 2, 0)
        self.assertEqual(len(res), 3)
        self.assertTrue(res[0][0])
        self.assertTrue(res[1][0])
        self.assertTrue(res[2][0])
Exemplo n.º 7
0
def convertVsanTaskToVcTask(vsanTask, vcStub):
    """
      Get a VC task from vSAN task MoId
   """
    vcTask = vim.Task(vsanTask._moId, vcStub)
    return vcTask
Exemplo n.º 8
0
def vcRestore(vmObj, location, restoredVmName, vsanStub, vsandpStub,
              vsphereInstance):
    """
   Performs vCenter level restore of vSAN data protected VM using DPS APIs
   @param vmObj VM object
   @param location Location
   @param restoredVmName Restored VM name
   @param vsanStub vSAN stub
   @param vsandpStub vSAN DP stub
   @param vsphereInstance vSphere instance
   @return Restored VM
   """
    # Retrieve protected VM's policy ID from vSAN health. For the purposes of this sample code, we shall reuse
    # same policy for the restored VM.
    # A policy ID can also be retrieved using Storage Policy Based Management server (SPBM server)
    vsanVcObjectSystem = vim.cluster.VsanObjectSystem(
        "vsan-cluster-object-system", vsanStub)
    cluster = vmObj.runtime.host.parent
    query = vim.cluster.VsanObjectQuerySpec()
    query.uuid = vmObj.config.vmStorageObjectId
    queries = []
    queries.append(query)
    vsanObjInfo = vsanVcObjectSystem.QueryVsanObjectInformation(
        cluster=cluster, vsanObjectQuerySpecs=queries)
    profileId = vsanObjInfo[0].spbmProfileUuid

    restoreSpec = vim.vsandp.cluster.VsanDataProtectionRecoverySystem.RestoreSpec(
        cluster=
        cluster,  # Restored VM will be in the same vCenter cluster as original VM
        datastore=vmObj.datastore[
            0],  # Restored VM will be in the same vSAN datastore as original VM
        name=restoredVmName,
        powerOn=True,  # Restored VM will be powered ON
        fullClone=False,  # Restored VM will be a linked clone i.e not promoted.
        # User can restore to a full clone by flipping this switch or calling
        # vCenter promoteDisks API on the linked clone VM
        folder=vmObj.
        parent,  # Restored VM will be present in same folder in vCenter as original VM
        host=vmObj.runtime.
        host,  # Restored VM will be present in same host as original VM.
        # This parameter is optional if cluster has Distributed Resource Scheduling (DRS)
        # turned ON
        resourcePool=vmObj.
        resourcePool,  # Restored VM will be present in same resource pool as original VM
        profileId=
        profileId,  # Restored VM will be associated with same policy as original VM
        location=location  # Specifies which sanpshot to restore
    )
    dpRecoverySystem = vim.vsandp.cluster.VsanDataProtectionRecoverySystem(
        'vsan-dp-recovery-system', vsandpStub)
    # Initiate restore
    restoreVmTaskMoRef = dpRecoverySystem.RestoreVm_Task(restoreSpec)
    restoreVmTask = vim.Task(restoreVmTaskMoRef._moId, vsphereInstance._stub)
    logging.info("Restoring from local snapshot using task '%s'\n",
                 restoreVmTaskMoRef)
    WaitForTask(restoreVmTask, raiseOnError=False)
    restoreVmTaskInfo = restoreVmTask.info
    if restoreVmTaskInfo.error is not None:
        msg = "Restore failed with error '{0}'".format(restoreVmTaskInfo.error)
        sys.exit(msg)
    return restoreVmTaskInfo.result
Exemplo n.º 9
0
 def get_task(self, vmware_client, task_key):
     task = vim.Task(task_key)
     task._stub = vmware_client._stub
     return task