def run(self, name, template_id, datacenter_id, resourcepool_id,
            datastore_id):
        # convert ids to stubs
        template = inventory.get_virtualmachine(self.si_content, template_id)
        datacenter = inventory.get_datacenter(self.si_content, datacenter_id)
        resourcepool = inventory.get_resource_pool(self.si_content,
                                                   resourcepool_id)
        datastore = inventory.get_datastore(self.si_content, datastore_id)
        # prep objects for consumption
        target_folder = datacenter.vmFolder

        # relocate spec
        relocatespec = vim.vm.RelocateSpec()
        relocatespec.datastore = datastore
        relocatespec.pool = resourcepool

        # clone spec
        clonespec = vim.vm.CloneSpec()
        clonespec.location = relocatespec
        clonespec.powerOn = False
        clonespec.template = False

        task = template.CloneVM_Task(folder=target_folder,
                                     name=name,
                                     spec=clonespec)
        self._wait_for_task(task)
        if task.info.state != vim.TaskInfo.State.success:
            raise Exception(task.info.error.msg)

        return {'task_id': task._moId, 'vm_id': task.info.result._moId}
Ejemplo n.º 2
0
    def run(self, vm_id, vm_name, datastore_cluster,
            datastore, disk_size, provision_type):
        # ensure that minimal inputs are provided
        checkinputs.one_of_two_strings(vm_id, vm_name, "ID or Name")

        vm = inventory.get_virtualmachine(self.si_content, vm_id, vm_name)
        spec = vim.vm.ConfigSpec()
        hdd_unit_number = self.get_next_unit_number(vm)
        ctrl_key = self.get_controller_key(vm)

        # Prepare new Disk configuration
        disk_changes = []
        disk_spec = vim.vm.device.VirtualDeviceSpec()
        disk_spec.fileOperation = "create"
        disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        disk_spec.device = vim.vm.device.VirtualDisk()
        disk_spec.device.backing =\
            vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
        disk_spec.device.backing.diskMode = "persistent"

        if provision_type == 'thin':
            disk_spec.device.backing.thinProvisioned = True

        disk_spec.device.unitNumber = hdd_unit_number
        disk_spec.device.capacityInKB = int(disk_size) * 1024 * 1024
        disk_spec.device.controllerKey = ctrl_key

        # If Datastore Cluster is provided attach Disk via that
        if datastore_cluster:
            ds_clust_obj = inventory.get_datastore_cluster(
                self.si_content, name=datastore_cluster)
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            srm = self.si_content.storageResourceManager

            storage_placement_spec = self.get_storage_placement_spec(
                ds_clust_obj, vm, spec)
            datastores = srm.RecommendDatastores(
                storageSpec=storage_placement_spec)

            if not datastores.recommendations:
                sys.stderr.write('Skipping as No datastore Recommendations')

            add_disk_task = srm.ApplyStorageDrsRecommendation_Task(
                datastores.recommendations[0].key)

        elif datastore:
            datastore_obj = inventory.get_datastore(self.si_content,
                                                    name=datastore)
            disk_spec.device.backing.datastore = datastore_obj
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)
        else:
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)

        successfully_added_disk = self._wait_for_task(add_disk_task)
        return {'state': successfully_added_disk}
Ejemplo n.º 3
0
    def run(self, name, template_id, datacenter_id, resourcepool_id,
            datastore_id, vsphere=None):
        self.establish_connection(vsphere)

        # convert ids to stubs
        template = inventory.get_virtualmachine(self.si_content, template_id)
        datacenter = inventory.get_datacenter(self.si_content, datacenter_id)
        resourcepool = inventory.get_resource_pool(self.si_content,
                                                   resourcepool_id)
        datastore = inventory.get_datastore(self.si_content, datastore_id)
        # prep objects for consumption
        target_folder = datacenter.vmFolder

        # relocate spec
        relocatespec = vim.vm.RelocateSpec()
        relocatespec.datastore = datastore
        relocatespec.pool = resourcepool

        # clone spec
        clonespec = vim.vm.CloneSpec()
        clonespec.location = relocatespec
        clonespec.powerOn = False
        clonespec.template = False

        task = template.CloneVM_Task(folder=target_folder, name=name,
                                     spec=clonespec)
        self._wait_for_task(task)
        if task.info.state != vim.TaskInfo.State.success:
            raise Exception(task.info.error.msg)

        return {'task_id': task._moId, 'vm_id': task.info.result._moId}
Ejemplo n.º 4
0
    def run(self, vm_id, vm_name, datastore_cluster, datastore, disk_size,
            provision_type):
        # ensure that minimal inputs are provided
        checkinputs.one_of_two_strings(vm_id, vm_name, "ID or Name")

        vm = inventory.get_virtualmachine(self.si_content, vm_id, vm_name)
        spec = vim.vm.ConfigSpec()
        hdd_unit_number = self.get_next_unit_number(vm)
        ctrl_key = self.get_controller_key(vm)

        # Prepare new Disk configuration
        disk_changes = []
        disk_spec = vim.vm.device.VirtualDeviceSpec()
        disk_spec.fileOperation = "create"
        disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        disk_spec.device = vim.vm.device.VirtualDisk()
        disk_spec.device.backing =\
            vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
        disk_spec.device.backing.diskMode = "persistent"

        if provision_type == 'thin':
            disk_spec.device.backing.thinProvisioned = True

        disk_spec.device.unitNumber = hdd_unit_number
        disk_spec.device.capacityInKB = int(disk_size) * 1024 * 1024
        disk_spec.device.controllerKey = ctrl_key

        # If Datastore Cluster is provided attach Disk via that
        if datastore_cluster:
            ds_clust_obj = inventory.get_datastore_cluster(
                self.si_content, name=datastore_cluster)
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            srm = self.si_content.storageResourceManager

            storage_placement_spec = self.get_storage_placement_spec(
                ds_clust_obj, vm, spec)
            datastores = srm.RecommendDatastores(
                storageSpec=storage_placement_spec)

            if not datastores.recommendations:
                sys.stderr.write('Skipping as No datastore Recommendations')

            add_disk_task = srm.ApplyStorageDrsRecommendation_Task(
                datastores.recommendations[0].key)

        elif datastore:
            datastore_obj = inventory.get_datastore(self.si_content,
                                                    name=datastore)
            disk_spec.device.backing.datastore = datastore_obj
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)
        else:
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)

        successfully_added_disk = self._wait_for_task(add_disk_task)
        return {'state': successfully_added_disk}
    def run(self,
            vm_id,
            vm_name,
            datastore_cluster,
            datastore,
            disk_size,
            provision_type,
            vsphere=None):
        """
        Add Hard Drive object to Virtual Machine

        Args:
        - vm_id: Moid of Virtual Machine to edit
        - vm_name: Name of Virtual Machine to edit
        - datastore_cluster: Datastore Cluster to store new hdd files
        - datastore: Datastore to put new files in
        - disk_size: Sze of HDD in GB
        - provisioning_type: Type of Provisioning to use for HDD
        - vsphere: Pre-configured vsphere connection details (config.yaml)


        Returns:
        - dict: Success
        """
        # ensure that minimal inputs are provided
        checkinputs.one_of_two_strings(vm_id, vm_name, "ID or Name")

        self.establish_connection(vsphere)

        vm = inventory.get_virtualmachine(self.si_content, vm_id, vm_name)
        spec = vim.vm.ConfigSpec()
        hdd_unit_number = self.get_next_unit_number(vm)
        ctrl_key = self.get_controller_key(vm)

        # Prepare new Disk configuration
        disk_changes = []
        disk_spec = vim.vm.device.VirtualDeviceSpec()
        disk_spec.fileOperation = "create"
        disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        disk_spec.device = vim.vm.device.VirtualDisk()
        disk_spec.device.backing =\
            vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
        disk_spec.device.backing.diskMode = "persistent"

        if provision_type == 'thin':
            disk_spec.device.backing.thinProvisioned = True

        disk_spec.device.unitNumber = hdd_unit_number
        disk_spec.device.capacityInKB = int(disk_size) * 1024 * 1024
        disk_spec.device.controllerKey = ctrl_key

        # If Datastore Cluster is provided attach Disk via that
        if datastore_cluster:
            ds_clust_obj = inventory.get_datastore_cluster(
                self.si_content, name=datastore_cluster)
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            srm = self.si_content.storageResourceManager

            storage_placement_spec = self.get_storage_placement_spec(
                ds_clust_obj, vm, spec)
            datastores = srm.RecommendDatastores(
                storageSpec=storage_placement_spec)

            if not datastores.recommendations:
                sys.stderr.write('Skipping as No datastore Recommendations')

            add_disk_task = srm.ApplyStorageDrsRecommendation_Task(
                datastores.recommendations[0].key)

        elif datastore:
            datastore_obj = inventory.get_datastore(self.si_content,
                                                    name=datastore)
            disk_spec.device.backing.datastore = datastore_obj
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)
        else:
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)

        successfully_added_disk = self._wait_for_task(add_disk_task)
        return {'state': successfully_added_disk}
Ejemplo n.º 6
0
    def run(self,
            name,
            template_id,
            datacenter_id,
            resourcepool_id,
            datastore_id,
            vsphere=None,
            networks=[]):
        is_success = True
        self.establish_connection(vsphere)

        # convert ids to stubs
        template = inventory.get_virtualmachine(self.si_content, template_id)
        datacenter = inventory.get_datacenter(self.si_content, datacenter_id)
        resourcepool = inventory.get_resource_pool(self.si_content,
                                                   resourcepool_id)
        datastore = inventory.get_datastore(self.si_content, datastore_id)
        # prep objects for consumption
        target_folder = datacenter.vmFolder

        # relocate spec
        relocatespec = vim.vm.RelocateSpec()
        relocatespec.datastore = datastore
        relocatespec.pool = resourcepool

        # When the customize parameters for the network-adapters are specified,
        # this makes a configration to customize a deploying virtual machine.
        # (By default, this customizes nothing)
        custom_adapters = []
        for network in networks:
            # The validator for the 'networks' parameter is needed because of
            # the restriction[*1] that there is no validation processing for the
            # array type value in the action parameter yet.
            #
            # [*1] https://github.com/StackStorm/st2/issues/3160
            if not self._validate_networks_param(network):
                break

            # set customize configuration to set IP address to the network adapters.
            adaptermap = vim.vm.customization.AdapterMapping()
            adaptermap.adapter = vim.vm.customization.IPSettings()
            adaptermap.adapter.ip = vim.vm.customization.FixedIp()
            adaptermap.adapter.ip.ipAddress = network.get('ipaddr', '0.0.0.0')
            adaptermap.adapter.subnetMask = network.get(
                'netmask', '255.255.255.0')
            adaptermap.adapter.gateway = network.get('gateway', '0.0.0.0')

            custom_adapters.append(adaptermap)

        # clone spec
        clonespec = vim.vm.CloneSpec()
        clonespec.location = relocatespec
        clonespec.powerOn = False
        clonespec.template = False

        # customize networ adapter only when the networks parameter is specified
        if custom_adapters:
            customspec = vim.vm.customization.Specification()

            customspec.identity = vim.vm.customization.LinuxPrep(
                domain=network.get('domain', 'localhost'),
                hostName=vim.vm.customization.FixedName(name=name))
            customspec.nicSettingMap = custom_adapters
            customspec.globalIPSettings = vim.vm.customization.GlobalIPSettings(
            )

            clonespec.customization = customspec

        task = template.CloneVM_Task(folder=target_folder,
                                     name=name,
                                     spec=clonespec)
        self._wait_for_task(task)
        if task.info.state != vim.TaskInfo.State.success:
            is_success = False
            self.logger.warning(task.info.error.msg)

        return (is_success, {
            'task_id': task._moId,
            'vm_id': task.info.result._moId
        })
Ejemplo n.º 7
0
    def run(self, vm_id, vm_name, datastore_cluster, datastore, vsphere=None):
        """
        Migrate VM to specified datastore

        Args:
        - vm_id: Moid of Virtual Machine to edit
        - vm_name: Name of Virtual Machine to edit
        - datastore_cluster: Datastore Cluster to store new hdd files
        - datastore: Datastore to put new files in
        - vsphere: Pre-configured vsphere connection details (config.yaml)


        Returns:
        - dict: Success
        """
        # ensure that minimal inputs are provided
        checkinputs.one_of_two_strings(vm_id, vm_name, "ID or Name")

        self.establish_connection(vsphere)

        vm = inventory.get_virtualmachine(self.si_content, vm_id, vm_name)
        spec = vim.vm.ConfigSpec()
        #print(spec)
        disk_size = 0
        for device in vm.config.hardware.device:
            if hasattr(device.backing, 'fileName'):
                #print(device)
                disk_size += device.capacityInBytes
                #disk_size += device.capacityInKB
                #print(device.capacityInKB)
        ###print(disk_size)
        # If Datastore Cluster is provided attach Disk via that
        #if datastore_cluster:
        #    ds_clust_obj = inventory.get_datastore_cluster(
        #        self.si_content, name=datastore_cluster)
        #    disk_changes.append(disk_spec)
        #    spec.deviceChange = disk_changes
        #    srm = self.si_content.storageResourceManager
        #
        #    storage_placement_spec = self.get_storage_placement_spec(
        #        ds_clust_obj, vm, spec)
        #    datastores = srm.RecommendDatastores(
        #        storageSpec=storage_placement_spec)
        #
        #    if not datastores.recommendations:
        #        sys.stderr.write('Skipping as No datastore Recommendations')
        #
        #    add_disk_task = srm.ApplyStorageDrsRecommendation_Task(
        #        datastores.recommendations[0].key)
        #elif datastore:
        if datastore:
            datastore_obj = inventory.get_datastore(self.si_content,
                                                    name=datastore)
            relocate_spec = vim.vm.RelocateSpec(datastore=datastore_obj)
            relocate_vm = vm.Relocate(relocate_spec)

            summary = datastore_obj.summary
            if not disk_size * 2 < summary.freeSpace:
                return (False, {
                    'state':
                    False,
                    'msg':
                    'Datastore %s doesn\'t have enough free space.' %
                    summary.name
                })
        else:
            return {'state': 'Datastore not provided.'}

        successfully_relocated_vm = self._wait_for_task(relocate_vm)
        #print(relocate_vm)
        #print(successfully_relocated_vm)
        if successfully_relocated_vm != True:
            return (False, {
                'state': successfully_relocated_vm,
                'msg': relocate_vm
            })
        else:
            return {'state': successfully_relocated_vm}
Ejemplo n.º 8
0
    def run(self, vm_id, vm_name, datastore_cluster,
            datastore, disk_size, provision_type, vsphere=None):
        """
        Add Hard Drive object to Virtual Machine

        Args:
        - vm_id: Moid of Virtual Machine to edit
        - vm_name: Name of Virtual Machine to edit
        - datastore_cluster: Datastore Cluster to store new hdd files
        - datastore: Datastore to put new files in
        - disk_size: Sze of HDD in GB
        - provisioning_type: Type of Provisioning to use for HDD
        - vsphere: Pre-configured vsphere connection details (config.yaml)


        Returns:
        - dict: Success
        """
        # ensure that minimal inputs are provided
        checkinputs.one_of_two_strings(vm_id, vm_name, "ID or Name")

        self.establish_connection(vsphere)

        vm = inventory.get_virtualmachine(self.si_content, vm_id, vm_name)
        spec = vim.vm.ConfigSpec()
        hdd_unit_number = self.get_next_unit_number(vm)
        ctrl_key = self.get_controller_key(vm)

        # Prepare new Disk configuration
        disk_changes = []
        disk_spec = vim.vm.device.VirtualDeviceSpec()
        disk_spec.fileOperation = "create"
        disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        disk_spec.device = vim.vm.device.VirtualDisk()
        disk_spec.device.backing =\
            vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
        disk_spec.device.backing.diskMode = "persistent"

        if provision_type == 'thin':
            disk_spec.device.backing.thinProvisioned = True

        disk_spec.device.unitNumber = hdd_unit_number
        disk_spec.device.capacityInKB = int(disk_size) * 1024 * 1024
        disk_spec.device.controllerKey = ctrl_key

        # If Datastore Cluster is provided attach Disk via that
        if datastore_cluster:
            ds_clust_obj = inventory.get_datastore_cluster(
                self.si_content, name=datastore_cluster)
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            srm = self.si_content.storageResourceManager

            storage_placement_spec = self.get_storage_placement_spec(
                ds_clust_obj, vm, spec)
            datastores = srm.RecommendDatastores(
                storageSpec=storage_placement_spec)

            if not datastores.recommendations:
                sys.stderr.write('Skipping as No datastore Recommendations')

            add_disk_task = srm.ApplyStorageDrsRecommendation_Task(
                datastores.recommendations[0].key)

        elif datastore:
            datastore_obj = inventory.get_datastore(self.si_content,
                                                    name=datastore)
            disk_spec.device.backing.datastore = datastore_obj
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)
        else:
            disk_changes.append(disk_spec)
            spec.deviceChange = disk_changes
            add_disk_task = vm.ReconfigVM_Task(spec)

        successfully_added_disk = self._wait_for_task(add_disk_task)
        return {'state': successfully_added_disk}