Exemplo n.º 1
0
    def mount_wrapper(self, datastore, path, *names):
        """
        Wrapper method for mounting isos on multiple VMs.

        Args:
            datastore (str): Name of datastore where the ISO is located.
            path (str): Path inside datastore where the ISO is located.
            names (str): A tuple of VM names in vCenter.
        """
        for name in names:
            host = Query.get_obj(
                self.virtual_machines.view, name
            )

            print('Mounting [%s] %s on %s' % (datastore, path, name))
            cdrom_cfg = []
            key, controller = Query.get_key(host, 'CD/DVD')

            cdrom_cfg_opts = {}
            cdrom_cfg_opts.update(
                {
                    'datastore' : datastore,
                    'iso_path' : path,
                    'iso_name' : name,
                    'key': key,
                    'controller' : controller,
                }
            )
            cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts))

            config = {'deviceChange' : cdrom_cfg}
            self.logger.debug(cdrom_cfg_opts, config)
            self.reconfig(host, **config)
Exemplo n.º 2
0
 def nic_recfg(self):
     """ Reconfigure a VM network adapter """
     devices = []
     edit = True
     host = Query.get_obj(self.virtual_machines.view, self.opts.name)
     nic_cfg_opts = {}
     label = self.opts.nic_prefix + ' ' + str(self.opts.nic_id)
     try:
         key, controller = Query.get_key(host, label)
     except IOError:
         pass
     if self.opts.nic_id:
         for item in host.config.hardware.device:
             if label == item.deviceInfo.label:
                 if self.opts.network:
                     nic_cfg_opts.update({
                         'key': key,
                         'controller': controller,
                         'container': host.runtime.host.network,
                         'network': self.opts.network,
                         'mac_address': item.macAddress,
                         'unit': item.unitNumber,
                     })
                     if self.opts.driver == 'e1000':
                         nic_cfg_opts.update({'driver': 'VirtualE1000'})
                     devices.append(
                         self.nic_config(edit=edit, **nic_cfg_opts))
                     if devices:
                         self.logger.info('%s label: %s %s network: %s',
                                          host.name, self.opts.nic_prefix,
                                          self.opts.nic_id,
                                          self.opts.network)
                         self.reconfig(host, **{'deviceChange': devices})
Exemplo n.º 3
0
    def mount_wrapper(self, datastore, path, *names):
        """
        Wrapper method for mounting isos on multiple VMs.

        Args:
            datastore (str): Name of datastore where the ISO is located.
            path (str): Path inside datastore where the ISO is located.
            names (str): A tuple of VM names in vCenter.
        """
        for name in names:
            host = Query.get_obj(self.virtual_machines.view, name)

            print('Mounting [%s] %s on %s' % (datastore, path, name))
            cdrom_cfg = []
            key, controller = Query.get_key(host, 'CD/DVD')

            cdrom_cfg_opts = {}
            cdrom_cfg_opts.update({
                'datastore': datastore,
                'iso_path': path,
                'iso_name': name,
                'key': key,
                'controller': controller,
            })
            cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts))

            config = {'deviceChange': cdrom_cfg}
            self.logger.debug(cdrom_cfg_opts, config)
            self.reconfig(host, **config)
Exemplo n.º 4
0
    def umount_wrapper(self, *names):
        """
        Wrapper method for un-mounting isos on multiple VMs.

        Args:
            names (tuple): a tuple of VM names in vCenter.
        """
        for name in names:
            print('Umount ISO from %s' % (name))
            host = Query.get_obj(self.virtual_machines.view, name)

            key, controller = Query.get_key(host, 'CD/DVD')

            self.logger.info('ISO on %s', name)
            cdrom_cfg = []
            cdrom_cfg_opts = {}
            cdrom_cfg_opts.update({
                'umount': True,
                'key': key,
                'controller': controller,
            })
            cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts))
            #cdrom_cfg.append(self.cdrom_config(umount=True, key=key,
            #    controller=controller))
            config = {'deviceChange': cdrom_cfg}
            self.logger.debug(host, config)
            self.reconfig(host, **config)
Exemplo n.º 5
0
    def umount_wrapper(self, *names):
        """
        Wrapper method for un-mounting isos on multiple VMs.

        Args:
            names (tuple): a tuple of VM names in vCenter.
        """
        for name in names:
            print('Umount ISO from %s' % (name))
            host = Query.get_obj(self.virtual_machines.view, name)

            key, controller = Query.get_key(host, 'CD/DVD')

            self.logger.info('ISO on %s', name)
            cdrom_cfg = []
            cdrom_cfg_opts = {}
            cdrom_cfg_opts.update(
                {
                    'umount' : True,
                    'key' : key,
                    'controller' : controller,
                }
            )
            cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts))
            #cdrom_cfg.append(self.cdrom_config(umount=True, key=key,
            #    controller=controller))
            config = {'deviceChange' : cdrom_cfg}
            self.logger.debug(host, config)
            self.reconfig(host, **config)
Exemplo n.º 6
0
 def folder_recfg(self):
     """ Move a VM to another folder """
     host = Query.get_obj(self.virtual_machines.view, self.opts.name)
     folder = Query.folders_lookup(self.datacenters.view,
                                   self.opts.datacenter, self.opts.folder)
     self.logger.info('%s folder: %s', host.name, self.opts.folder)
     self.mvfolder(host, folder)
Exemplo n.º 7
0
 def folder_recfg(self):
     """ Move a VM to another folder """
     host = Query.get_obj(self.virtual_machines.view, self.opts.name)
     folder = Query.folders_lookup(
         self.datacenters.view, self.opts.datacenter, self.opts.folder
     )
     self.logger.info('%s folder: %s', host.name, self.opts.folder)
     self.mvfolder(host, folder)
Exemplo n.º 8
0
    def disk_recfg(self):
        """ Reconfigure a VM disk."""
        devices = []
        edit = True
        host = Query.get_obj(self.virtual_machines.view, self.opts.name)
        disk_cfg_opts = {}
        # KB
        tokbytes = 1024*1024
        label = self.opts.disk_prefix + ' ' + str(self.opts.disk_id)
        try:
            key, controller = Query.get_key(host, label)
        except IOError:
            pass
        if self.opts.disk_id:
            for item in host.config.hardware.device:
                if label == item.deviceInfo.label:
                    disk_new_size = self.opts.sizeGB * tokbytes
                    current_size = item.capacityInKB
                    current_size_gb = int(current_size / (1024*1024))
                    if disk_new_size == current_size:
                        raise ValueError(
                            'New size and existing size are equal'.format()
                        )
                    if disk_new_size < current_size:
                        raise ValueError(
                            'Size {0} does not exceed {1}'.format(
                                disk_new_size, current_size
                            )
                        )
                    disk_delta = disk_new_size - current_size
                    ds_capacity_kb = item.backing.datastore.summary.capacity / 1024
                    ds_free_kb = item.backing.datastore.summary.freeSpace / 1024
                    threshold_pct = 0.10
                    if (ds_free_kb - disk_delta) / ds_capacity_kb < threshold_pct:
                        raise ValueError(
                            '{0} {1} disk space low, aborting.'.format(
                                host.resourcePool.parent.name,
                                item.backing.datastore.name
                            )
                        )

                    disk_cfg_opts.update(
                        {
                            'size' : disk_new_size,
                            'key' : key,
                            'controller' : controller,
                            'unit' : item.unitNumber,
                            'filename' : item.backing.fileName
                        }
                    )
            if disk_cfg_opts:
                devices.append(self.disk_config(edit=edit, **disk_cfg_opts))
                self.logger.info(
                    '%s label: %s %s current_size: %s new_size: %s', host.name,
                    self.opts.disk_prefix, self.opts.disk_id, current_size_gb, self.opts.sizeGB
                )
                self.reconfig(host, **{'deviceChange': devices})
Exemplo n.º 9
0
    def nic_config(cls, edit=False, **kwargs):
        """
        Method returns configured object for network interface.

        kwargs:
            container (obj):  ContainerView object.
            network (str):    Name of network to add to VM.
            connected (bool): Indicates that the device is currently
                connected. Valid only while the virtual machine is running.
            start_connected (bool):
                Specifies whether or not to connect the device when the
                virtual machine starts.
            allow_guest_control (bool):
                Allows the guest to control whether the connectable device
                is connected.
            driver (str): A str that represents a network adapter driver
        Returns:
            nic (obj): A configured object for a Network device.  this should
                be appended to ConfigSpec devices attribute.
        """
        key = kwargs.get('key', None)
        controller = kwargs.get('controller', None)
        container = kwargs.get('container', None)
        mac_address = kwargs.get('mac_address', None)
        network = kwargs.get('network', None)
        connected = kwargs.get('connected', True)
        start_connected = kwargs.get('start_connected', True)
        allow_guest_control = kwargs.get('allow_get_control', True)
        unit = kwargs.get('unit', None)
        address_type = kwargs.get('address_type', 'assigned')
        driver = kwargs.get('driver', 'VirtualVmxnet3')

        nic = vim.vm.device.VirtualDeviceSpec()
        nic.device = getattr(vim.vm.device, driver)()

        if edit:
            nic.operation = 'edit'
            nic.device.key = key
            nic.device.controllerKey = controller
            nic.device.macAddress = mac_address
            nic.device.unitNumber = unit
            nic.device.addressType = address_type
        else:
            nic.operation = 'add'

        nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(
        )
        nic.device.backing.network = Query.get_obj(container, network)
        nic.device.backing.deviceName = network

        nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
        nic.device.connectable.connected = connected
        nic.device.connectable.startConnected = start_connected
        nic.device.connectable.allowGuestControl = allow_guest_control

        return nic
Exemplo n.º 10
0
    def nic_config(cls, edit=False, **kwargs):
        """
        Method returns configured object for network interface.

        kwargs:
            container (obj):  ContainerView object.
            network (str):    Name of network to add to VM.
            connected (bool): Indicates that the device is currently
                connected. Valid only while the virtual machine is running.
            start_connected (bool):
                Specifies whether or not to connect the device when the
                virtual machine starts.
            allow_guest_control (bool):
                Allows the guest to control whether the connectable device
                is connected.
            driver (str): A str that represents a network adapter driver
        Returns:
            nic (obj): A configured object for a Network device.  this should
                be appended to ConfigSpec devices attribute.
        """
        key = kwargs.get('key', None)
        controller = kwargs.get('controller', None)
        container = kwargs.get('container', None)
        mac_address = kwargs.get('mac_address', None)
        network = kwargs.get('network', None)
        connected = kwargs.get('connected', True)
        start_connected = kwargs.get('start_connected', True)
        allow_guest_control = kwargs.get('allow_get_control', True)
        unit = kwargs.get('unit', None)
        address_type = kwargs.get('address_type', 'assigned')
        driver = kwargs.get('driver', 'VirtualVmxnet3')

        nic = vim.vm.device.VirtualDeviceSpec()
        nic.device = getattr(vim.vm.device, driver)()

        if edit:
            nic.operation = 'edit'
            nic.device.key = key
            nic.device.controllerKey = controller
            nic.device.macAddress = mac_address
            nic.device.unitNumber = unit
            nic.device.addressType = address_type
        else:
            nic.operation = 'add'

        nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
        nic.device.backing.network = Query.get_obj(container, network)
        nic.device.backing.deviceName = network

        nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
        nic.device.connectable.connected = connected
        nic.device.connectable.startConnected = start_connected
        nic.device.connectable.allowGuestControl = allow_guest_control

        return nic
Exemplo n.º 11
0
    def power_wrapper(self, state, *names):
        """
        Wrapper method for changing the power state on multiple VMs.

        Args:
            state (str): choices: on, off, reset, reboot, shutdown
            names (str): a tuple of VM names in vCenter.
        """
        for name in names:
            host = Query.get_obj(self.virtual_machines.view, name)
            print('%s changing power state to %s' % (name, state))
            self.logger.debug(host, state)
            self.power(host, state)
Exemplo n.º 12
0
    def power_wrapper(self, state, *names):
        """
        Wrapper method for changing the power state on multiple VMs.

        Args:
            state (str): choices: on, off, reset, reboot, shutdown
            names (str): a tuple of VM names in vCenter.
        """
        for name in names:
            host = Query.get_obj(self.virtual_machines.view, name)
            print('%s changing power state to %s' % (name, state))
            self.logger.debug(host, state)
            self.power(host, state)
Exemplo n.º 13
0
 def disk_recfg(self):
     """ Reconfigure a VM disk."""
     devices = []
     edit = True
     host = Query.get_obj(self.virtual_machines.view, self.opts.name)
     disk_cfg_opts = {}
     # KB
     tokbytes = 1024 * 1024
     label = self.opts.disk_prefix + ' ' + str(self.opts.disk_id)
     try:
         key, controller = Query.get_key(host, label)
     except IOError:
         pass
     if self.opts.disk_id:
         for item in host.config.hardware.device:
             if label == item.deviceInfo.label:
                 disk_new_size = self.opts.sizeGB * tokbytes
                 current_size = item.capacityInKB
                 current_size_gb = int(current_size / (1024 * 1024))
                 if disk_new_size == current_size:
                     raise ValueError(
                         'New size and existing size are equal'.format())
                 elif disk_new_size < current_size:
                     raise ValueError('Size {0} does not exceed {1}'.format(
                         disk_new_size, current_size))
                 disk_delta = disk_new_size - current_size
                 ds_capacity_kb = item.backing.datastore.summary.capacity / 1024
                 ds_free_kb = item.backing.datastore.summary.freeSpace / 1024
                 threshold_pct = 0.10
                 if (ds_free_kb -
                         disk_delta) / ds_capacity_kb < threshold_pct:
                     raise ValueError(
                         '{0} {1} disk space low, aborting.'.format(
                             host.resourcePool.parent.name,
                             item.backing.datastore.name))
                 else:
                     disk_cfg_opts.update({
                         'size': disk_new_size,
                         'key': key,
                         'controller': controller,
                         'unit': item.unitNumber,
                         'filename': item.backing.fileName
                     })
         if disk_cfg_opts:
             devices.append(self.disk_config(edit=edit, **disk_cfg_opts))
             self.logger.info(
                 '%s label: %s %s current_size: %s new_size: %s', host.name,
                 self.opts.disk_prefix, self.opts.disk_id, current_size_gb,
                 self.opts.sizeGB)
             self.reconfig(host, **{'deviceChange': devices})
Exemplo n.º 14
0
 def nic_recfg(self):
     """ Reconfigure a VM network adapter """
     devices = []
     edit = True
     host = Query.get_obj(self.virtual_machines.view, self.opts.name)
     nic_cfg_opts = {}
     label = self.opts.nic_prefix + ' ' + str(self.opts.nic_id)
     try:
         key, controller = Query.get_key(host, label)
     except IOError:
         pass
     if self.opts.nic_id:
         for item in host.config.hardware.device:
             if label == item.deviceInfo.label:
                 if self.opts.network:
                     nic_cfg_opts.update(
                         {
                             'key' : key,
                             'controller' : controller,
                             'container' : host.runtime.host.network,
                             'network' : self.opts.network,
                             'mac_address': item.macAddress,
                             'unit' : item.unitNumber,
                         }
                     )
                     if self.opts.driver == 'e1000':
                         nic_cfg_opts.update({'driver': 'VirtualE1000'})
                     devices.append(
                         self.nic_config(edit=edit, **nic_cfg_opts)
                     )
                     if devices:
                         self.logger.info(
                             '%s label: %s %s network: %s', host.name,
                             self.opts.nic_prefix, self.opts.nic_id,
                             self.opts.network
                         )
                         self.reconfig(host, **{'deviceChange': devices})
Exemplo n.º 15
0
    def disk_config(cls, edit=False, **kwargs):
        """
        Method returns configured VirtualDisk object

        Kwargs:
            container (obj): Cluster container object
            datastore (str): Name of datastore for the disk files location.
            size (int):      Integer of disk in kilobytes
            key  (int):      Integer value of scsi device
            unit (int):      unitNumber of device.
            mode (str):      The disk persistence mode.
            thin (bool):     If True, then it enables thin provisioning

        Returns:
            disk (obj): A configured object for a VMDK Disk.  this should
                be appended to ConfigSpec devices attribute.
        """
        # capacityInKB is deprecated but also a required field. See pyVmomi bug #218

        container = kwargs.get('container', None)
        datastore = kwargs.get('datastore', None)
        size = kwargs.get('size', None)
        key = kwargs.get('key', None)
        unit = kwargs.get('unit', 0)
        mode = kwargs.get('mode', 'persistent')
        thin = kwargs.get('thin', True)
        controller = kwargs.get('controller', None)
        filename = kwargs.get('filename', None)

        disk = vim.vm.device.VirtualDeviceSpec()

        if edit:
            disk.operation = 'edit'

            disk.device = vim.vm.device.VirtualDisk()
            disk.device.capacityInKB = size
            disk.device.key = key
            # controllerKey is tied to SCSI Controller
            disk.device.controllerKey = controller
            disk.device.unitNumber = unit
            disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            )
            disk.device.backing.fileName = filename
            disk.device.backing.diskMode = mode

        else:
            disk.operation = 'add'
            disk.fileOperation = 'create'

            disk.device = vim.vm.device.VirtualDisk()
            disk.device.capacityInKB = size
            # controllerKey is tied to SCSI Controller
            disk.device.controllerKey = controller
            disk.device.unitNumber = unit
            disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo(
            )
            disk.device.backing.fileName = '[' + datastore + ']'
            disk.device.backing.datastore = Query.get_obj(container, datastore)
            disk.device.backing.diskMode = mode
            disk.device.backing.thinProvisioned = thin
            disk.device.backing.eagerlyScrub = False

        return disk
Exemplo n.º 16
0
    def main(self):
        """
        This is the main method, which parses all the argparse options and runs
        the necessary code blocks if True.
        """

        try:

            self.auth = Auth(self.opts.host)
            self.auth.login(self.opts.user, self.opts.passwd, self.opts.domain,
                            self.opts.passwd_file)

            self.opts.passwd = None
            self.logger.debug(self.opts)

            virtual_machines_container = Query.create_container(
                self.auth.session, self.auth.session.content.rootFolder,
                [vim.VirtualMachine], True)

            self.vmcfg = VMConfigHelper(self.auth, self.opts, argparser.dotrc)
            self.clustercfg = ClusterConfig(self.auth, self.opts,
                                            argparser.dotrc)

            call_count = self.auth.session.content.sessionManager.currentSession.callCount

            if self.opts.cmd == 'create':
                if self.opts.config:
                    for cfg in self.opts.config:
                        spec = self.vmcfg.dict_merge(argparser.dotrc,
                                                     yaml.load(cfg))
                        cfgcheck_update = CfgCheck.cfg_checker(
                            spec, self.auth, self.opts)
                        spec['vmconfig'].update(
                            self.vmcfg.dict_merge(spec['vmconfig'],
                                                  cfgcheck_update))
                        spec = self.vmcfg.pre_create_hooks(**spec)
                        spec = self.vmcfg.create_wrapper(**spec)
                        self.vmcfg.post_create_hooks(**spec)
                        filename = spec['vmconfig']['name'] + '.yaml'
                        server_cfg = {}
                        server_cfg['vmconfig'] = {}
                        server_cfg['vmconfig'].update(spec['vmconfig'])
                        if spec.get('mkbootiso', None):
                            server_cfg['mkbootiso'] = {}
                            server_cfg['mkbootiso'].update(spec['mkbootiso'])
                        print(yaml.dump(server_cfg, default_flow_style=False),
                              file=open(filename, 'w'))

            if self.opts.cmd == 'mount':
                self.vmcfg.mount_wrapper(self.opts.datastore, self.opts.path,
                                         *self.opts.name)

            if self.opts.cmd == 'power':
                self.vmcfg.power_wrapper(self.opts.power, *self.opts.name)

            if self.opts.cmd == 'umount':
                self.vmcfg.umount_wrapper(*self.opts.name)

            if self.opts.cmd == 'upload':
                self.vmcfg.upload_wrapper(self.opts.datastore, self.opts.dest,
                                          self.opts.verify_ssl, *self.opts.iso)

            if self.opts.cmd == 'add':
                hostname = Query.get_obj(virtual_machines_container.view,
                                         self.opts.name)

                # nics
                if self.opts.device == 'nic':
                    self.vmcfg.add_nic_recfg(hostname)

            if self.opts.cmd == 'reconfig':
                host = Query.get_obj(virtual_machines_container.view,
                                     self.opts.name)
                if self.opts.cfgs:
                    self.logger.info(
                        'reconfig: %s cfgs: %s', host.name,
                        ' '.join('%s=%s' % (k, v)
                                 for k, v in self.opts.cfgs.iteritems()))
                    self.vmcfg.reconfig(host, **self.opts.cfgs)
                if self.opts.folder:
                    self.vmcfg.folder_recfg()
                if self.opts.device == 'disk':
                    self.vmcfg.disk_recfg()
                if self.opts.device == 'nic':
                    self.vmcfg.nic_recfg()

            if self.opts.cmd == 'drs':
                if not self.opts.cluster:
                    self.opts.cluster = Prompts.clusters(self.auth.session)
                self.clustercfg.drs_rule()

            if self.opts.cmd == 'query':
                datacenters_container = Query.create_container(
                    self.auth.session, self.auth.session.content.rootFolder,
                    [vim.Datacenter], True)
                clusters_container = Query.create_container(
                    self.auth.session, self.auth.session.content.rootFolder,
                    [vim.ClusterComputeResource], True)

                if self.opts.anti_affinity_rules:
                    if self.opts.cluster:
                        anti_affinity_rules = Query.return_anti_affinity_rules(
                            clusters_container.view, self.opts.cluster)
                    else:
                        cluster = Prompts.clusters(self.auth.session)
                        anti_affinity_rules = Query.return_anti_affinity_rules(
                            clusters_container.view, cluster)
                    if not anti_affinity_rules:
                        print('No antiaffinity rules defined.')
                    else:
                        print('Antiaffinity rules:')

                        for key, val in sorted(
                                anti_affinity_rules.iteritems()):
                            print('{0}: {1}'.format(key,
                                                    ' '.join(sorted(val))))

                if self.opts.datastores:
                    if self.opts.cluster:
                        datastores = Query.return_datastores(
                            clusters_container.view, self.opts.cluster)
                    else:
                        cluster = Prompts.clusters(self.auth.session)
                        datastores = Query.return_datastores(
                            clusters_container.view, cluster)
                    for row in datastores:
                        print('{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.
                              format(*row))

                if self.opts.folders:
                    if self.opts.datacenter:
                        folders = Query.list_vm_folders(
                            datacenters_container.view, self.opts.datacenter)
                        folders.sort()
                        for folder in folders:
                            print(folder)
                    else:
                        datacenter = Prompts.datacenters(self.auth.session)
                        folders = Query.list_vm_folders(
                            datacenters_container.view, datacenter)
                        folders.sort()
                        for folder in folders:
                            print(folder)
                if self.opts.clusters:
                    clusters = Query.list_obj_attrs(clusters_container, 'name')
                    clusters.sort()
                    for cluster in clusters:
                        print(cluster)
                if self.opts.networks:
                    if self.opts.cluster:
                        cluster = Query.get_obj(clusters_container.view,
                                                self.opts.cluster)
                        networks = Query.list_obj_attrs(cluster.network,
                                                        'name',
                                                        view=False)
                        networks.sort()
                        for net in networks:
                            print(net)
                    else:
                        cluster_name = Prompts.clusters(self.auth.session)
                        cluster = Query.get_obj(clusters_container.view,
                                                cluster_name)
                        networks = Query.list_obj_attrs(cluster.network,
                                                        'name',
                                                        view=False)
                        networks.sort()
                        for net in networks:
                            print(net)
                if self.opts.vms:
                    vms = Query.list_vm_info(datacenters_container.view,
                                             self.opts.datacenter)
                    for key, value in vms.iteritems():
                        print(key, value)
                if self.opts.vmconfig:
                    for name in self.opts.vmconfig:
                        virtmachine = Query.get_obj(
                            virtual_machines_container.view, name)
                        self.logger.debug(virtmachine.config)
                        if self.opts.createcfg:
                            print(
                                yaml.dump(Query.vm_config(
                                    virtual_machines_container.view, name,
                                    self.opts.createcfg),
                                          default_flow_style=False))
                        else:
                            print(
                                yaml.dump(Query.vm_config(
                                    virtual_machines_container.view, name),
                                          default_flow_style=False))
                if self.opts.vm_by_datastore:
                    if self.opts.cluster and self.opts.datastore:
                        vms = Query.vm_by_datastore(clusters_container.view,
                                                    self.opts.cluster,
                                                    self.opts.datastore)
                        for vm_name in vms:
                            print(vm_name)
                    else:
                        if not self.opts.cluster:
                            cluster = Prompts.clusters(self.auth.session)
                        if not self.opts.datastore:
                            datastore = Prompts.datastores(
                                self.auth.session, cluster)
                        print()

                        vms = Query.vm_by_datastore(clusters_container.view,
                                                    cluster, datastore)
                        for vm_name in vms:
                            print(vm_name)

                if self.opts.vm_guest_ids:
                    for guest_id in Query.list_guestids():
                        print(guest_id)

            self.auth.logout()
            self.logger.debug('Call count: {0}'.format(call_count))

        except ValueError as err:
            self.logger.error(err, exc_info=False)
            self.auth.logout()
            self.logger.debug('Call count: {0}'.format(call_count))
            sys.exit(3)

        except vim.fault.InvalidLogin as loginerr:
            self.logger.error(loginerr.msg, exc_info=False)
            sys.exit(2)

        except KeyboardInterrupt as err:
            self.logger.error(err, exc_info=False)
            self.auth.logout()
            self.logger.debug('Call count: {0}'.format(call_count))
            sys.exit(1)
Exemplo n.º 17
0
    def cfg_checker(cfg, auth, opts):
        """
        Checks config for a valid configuration, and prompts user if
        information is missing

        Args:
            cfg    (obj): Yaml object
        """
        clusters = Query.create_container(
            auth.session, auth.session.content.rootFolder,
            [vim.ComputeResource], True
        )
        # name
        if 'vmconfig' in cfg:

            # name
            if 'name' in cfg['vmconfig']:
                name = cfg['vmconfig']['name']
            else:
                name = Prompts.name()
            # guestid
            if 'guestId' in cfg['vmconfig']:
                guestid = cfg['vmconfig']['guestId']
            else:
                guestid = Prompts.guestids()
                print('\n%s guestid selected.' % (guestid))
            # cluster
            if 'cluster' in cfg['vmconfig']:
                cluster = cfg['vmconfig']['cluster']
                cluster_obj = Query.get_obj(clusters.view, cluster)
            else:
                cluster = Prompts.clusters(auth.session)
                cluster_obj = Query.get_obj(clusters.view, cluster)
                print('\n%s cluster selected.' % (cluster))
            # datastore
            if 'datastore' in cfg['vmconfig']:
                datastore = cfg['vmconfig']['datastore']
            else:
                datastore = Prompts.datastores(auth.session, cluster)
                print('\n%s datastore selected.' % (datastore))
            # datacenter
            if not opts.datacenter:
                datacenter = Prompts.datacenters(auth.session)
                print('\n%s datacenter selected.' % (datacenter))
            else:
                datacenter = opts.datacenter
            # nics
            if 'nics' in cfg['vmconfig']:
                nics = cfg['vmconfig']['nics']
                print('nics: %s' % (nics))
            else:
                nics = Prompts.networks(cluster_obj)
                print('\n%s networks selected.' % (','.join(nics)))
            # folder
            if 'folder' in cfg['vmconfig']:
                folder = cfg['vmconfig']['folder']
            else:
                folder = Prompts.folders(auth.session, datacenter)
                print('\n%s folder selected.' % (folder))
        else:
            name = Prompts.name()
            guestid = Prompts.guestids()
            print('\n%s selected.' % (guestid))
            cluster = Prompts.clusters(auth.session)
            print('\n%s selected.' % (cluster))
            datastore = Prompts.datastores(auth.session, cluster)
            print('\n%s selected.' % (datastore))
            datacenter = Prompts.datacenters(auth.session)
            print('\n%s selected.' % (datacenter))
            nics = Prompts.networks(cluster_obj)
            print('\n%s selected.' % (','.join(nics)))
            folder = Prompts.folders(auth.session, datacenter)
            print('\n%s selected.' % (folder))

        output = {
            'name': name,
            'guestId': guestid,
            'cluster': cluster,
            'datastore': datastore,
            'datacenter': datacenter,
            'nics': nics,
            'folder': folder
        }

        return output
Exemplo n.º 18
0
    def drs_rule(self):
        """
        Method messes with DRS rules.
        Currently only Anti Affinity rules, and only add or delete.

        For safety, it has a concept of a vctools prefix.  The prefix lives in the
        rc file, or can be declared by a flag.  This is so you "can't" delete a
        rule that was not created by vctools.

        Args:
            cluster (str): cluster to modify
            type (str): currently only anti-affinity
            oper (add|delete): operation mode
            name (str): name of the rule
            vms (list): list of vms (to add, not used for delete)

        Returns true if successful.
        """
        cluster = self.opts.cluster
        drs_type = self.opts.drs_type
        name = self.opts.name
        vms = self.opts.vms
        function = self.opts.function


        self.logger.debug(cluster, drs_type, name, vms, function)

        # containers we need
        clusters = Query.create_container(
            self.auth.session, self.auth.session.content.rootFolder,
            [vim.ComputeResource], True
        )
        virtual_machines = Query.create_container(
            self.auth.session, self.auth.session.content.rootFolder,
            [vim.VirtualMachine], True
        )

        # our cluster object
        cluster_obj = Query.get_obj(clusters.view, cluster)

        if drs_type == 'anti-affinity':

            if function == 'add':

                vm_obj_list = []
                for vm_obj in vms:
                    vm_obj_list.append(Query.get_obj(virtual_machines.view, vm_obj))

                # check to see if this rule name is in use
                if Query.is_anti_affinity_rule(cluster_obj, name):
                    raise ValueError('Error: rule name "%s" is already in use' % name)

                # check to see vms are in the right cluster
                for vm_obj in vm_obj_list:
                    if not Query.is_vm_in_cluster(cluster_obj, vm_obj):
                        raise ValueError(
                            'Error: the vm "%s" is not in the stated cluster' % vm_obj.name
                        )

                # check to see if the vms already have DRS rules
                for vm_obj in vm_obj_list:
                    match = 0
                    for rule in cluster_obj.configuration.rule:
                        if hasattr(rule, 'vm'):
                            for rulevm in rule.vm:
                                if vm_obj == rulevm:
                                    match = 1
                    if match != 0:
                        raise ValueError(
                            'Error: the vm "%s" is already in a DRS rule' % vm_obj.name
                        )

                new_rule = vim.ClusterAntiAffinityRuleSpec()
                new_rule.name = name

                new_rule.userCreated = True
                new_rule.enabled = True
                for vm_obj in vm_obj_list:
                    new_rule.vm.append(vm_obj)

                rule_spec = vim.cluster.RuleSpec(info=new_rule, operation='add')
                config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
                Tasks.task_monitor(cluster_obj.ReconfigureComputeResource_Task(
                    config_spec, modify=True), False)

                self.logger.info('new AA DRS rule on %s: %s', cluster, name)


            if function == 'delete':
            #Delete an AntiAffinity Rule
                # check to see if this rule name is in use, and delete if found
                found = False
                for existing_rule in cluster_obj.configuration.rule:
                    if existing_rule.name == name:
                        found = True
                        # doublecheck this is an AA rule
                        if isinstance(existing_rule, vim.cluster.AntiAffinityRuleSpec):
                            rule_spec = vim.cluster.RuleSpec(
                                removeKey=existing_rule.key, operation='remove')
                            config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
                            Tasks.task_monitor(cluster_obj.ReconfigureComputeResource_Task(
                                config_spec, modify=True), False)
                            self.logger.info('Deleted AA DRS rule on %s: %s', cluster, name)
                        else:
                            raise ValueError(
                                'Error: rule name "%s" not an AntiAffinity rule' % name
                            )
                if not found:
                    raise ValueError('Error: rule name "%s" not found' % name)
Exemplo n.º 19
0
    def nic_config(cls, edit=False, **kwargs):
        """
        Method returns configured object for network interface.

        kwargs:
            container (obj):  ContainerView object.
            network (str):    Name of network to add to VM.
            connected (bool): Indicates that the device is currently
                connected. Valid only while the virtual machine is running.
            start_connected (bool):
                Specifies whether or not to connect the device when the
                virtual machine starts.
            allow_guest_control (bool):
                Allows the guest to control whether the connectable device
                is connected.
            driver (str): A str that represents a network adapter driver
            switch_type (str): Use "standard" or "distributed" switch for
                networking.
        Returns:
            nic (obj): A configured object for a Network device.  this should
                be appended to ConfigSpec devices attribute.
        """
        key = kwargs.get('key', None)
        controller = kwargs.get('controller', None)
        container = kwargs.get('container', None)
        mac_address = kwargs.get('mac_address', None)
        network = kwargs.get('network', None)
        connected = kwargs.get('connected', True)
        start_connected = kwargs.get('start_connected', True)
        allow_guest_control = kwargs.get('allow_get_control', True)
        unit = kwargs.get('unit', None)
        address_type = kwargs.get('address_type', 'assigned')
        driver = kwargs.get('driver', 'VirtualVmxnet3')
        switch_type = kwargs.get('switch_type', 'standard')

        nic = vim.vm.device.VirtualDeviceSpec()
        nic.device = getattr(vim.vm.device, driver)()

        if edit:
            nic.operation = 'edit'
            nic.device.key = key
            nic.device.controllerKey = controller
            nic.device.macAddress = mac_address
            nic.device.unitNumber = unit
            nic.device.addressType = address_type
        else:
            nic.operation = 'add'

        if switch_type == 'distributed':
            network_obj = Query.get_obj(container, network)
            dvs = network_obj.config.distributedVirtualSwitch
            criteria = vim.dvs.PortCriteria()
            criteria.connected = False
            criteria.inside = True
            criteria.portgroupKey = network_obj.key
            dvports = dvs.FetchDVPorts(criteria)

            if dvports:
                # pylint: disable=line-too-long
                nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
                nic.device.backing.port = vim.dvs.PortConnection()
                nic.device.backing.port.portgroupKey = dvports[0].portgroupKey
                nic.device.backing.port.switchUuid = dvports[0].dvsUuid
                nic.device.backing.port.portKey = dvports[0].key
            else:
                cls.logger.error(
                    'No available distributed virtual port found, so network config failed!'
                )
                cls.logger.debug('%s', dvports)

        elif switch_type == 'standard':
            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
            nic.device.backing.network = Query.get_obj(container, network)
            nic.device.backing.deviceName = network

        nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
        nic.device.connectable.connected = connected
        nic.device.connectable.startConnected = start_connected
        nic.device.connectable.allowGuestControl = allow_guest_control

        return nic
Exemplo n.º 20
0
    def create_wrapper(self, **spec):
        """
        Wrapper method for creating VMs. If certain information was
        not provided in the yaml config (like a datastore), then the client
        will be prompted to select one inside the cfg_checker method.

        Args:
            yaml_cfg (file): A yaml file containing the necessary information
                for creating a new VM. This file will override the defaults set
                in the dotrc file.
        """

        # create a copy before manipulating the data for vsphere
        server_cfg = copy.deepcopy(spec)

        cluster = spec['vmconfig']['cluster']
        datastore = spec['vmconfig']['datastore']
        folder = spec['vmconfig']['folder']

        del server_cfg['general']['passwd']

        self.logger.info('vmconfig %s', server_cfg)
        cluster_obj = Query.get_obj(self.clusters.view, cluster)

        # list of cdrom and disk devices
        devices = []

        # add the cdrom device
        devices.append(self.cdrom_config())

        scsis = []
        if isinstance(spec['vmconfig']['disks'], dict):
            for scsi, disks in spec['vmconfig']['disks'].iteritems():
                scsis.append(self.scsi_config(scsi))
                devices.append(scsis[scsi][1])
                for disk in enumerate(disks):
                    disk_cfg_opts = {}
                    disk_cfg_opts.update({
                        'container': cluster_obj.datastore,
                        'datastore': datastore,
                        'size': int(disk[1]) * (1024 * 1024),
                        'controller': scsis[scsi][0],
                        'unit': disk[0],
                    })
                    devices.append(self.disk_config(**disk_cfg_opts))
        else:
            # attach up to four disks, each on its own scsi adapter
            for scsi, disk in enumerate(spec['vmconfig']['disks']):
                scsis.append(self.scsi_config(scsi))
                devices.append(scsis[scsi][1])
                disk_cfg_opts = {}
                disk_cfg_opts.update({
                    'container': cluster_obj.datastore,
                    'datastore': datastore,
                    'size': int(disk) * (1024 * 1024),
                    'controller': scsis[scsi][0],
                    'unit': 0,
                })
                devices.append(self.disk_config(**disk_cfg_opts))

        # configure each network and add to devices
        for nic in spec['vmconfig']['nics']:
            nic_cfg_opts = {}
            nic_cfg_opts.update({
                'container': cluster_obj.network,
                'network': nic
            })
            devices.append(self.nic_config(**nic_cfg_opts))

        spec['vmconfig'].update({'deviceChange': devices})

        folder = Query.folders_lookup(self.datacenters.view,
                                      self.opts.datacenter, folder)

        # delete items that are no longer needed

        # delete keys that vSphere does not understand, so we can pass it a
        # dictionary to build the VM.
        del spec['vmconfig']['disks']
        del spec['vmconfig']['nics']
        del spec['vmconfig']['folder']
        del spec['vmconfig']['datastore']
        del spec['vmconfig']['datacenter']
        del spec['vmconfig']['cluster']

        pool = cluster_obj.resourcePool

        self.logger.debug(folder, datastore, pool, devices, spec)
        self.create(folder, datastore, pool, **spec['vmconfig'])

        return server_cfg
Exemplo n.º 21
0
    def cfg_checker(cfg, auth, opts):
        """
        Checks config for a valid configuration, and prompts user if
        information is missing

        Args:
            cfg    (obj): Yaml object
        """
        clusters = Query.create_container(auth.session,
                                          auth.session.content.rootFolder,
                                          [vim.ComputeResource], True)
        # name
        if 'vmconfig' in cfg:

            # name
            if 'name' in cfg['vmconfig']:
                name = cfg['vmconfig']['name']
            else:
                name = Prompts.name()
            # guestid
            if 'guestId' in cfg['vmconfig']:
                guestid = cfg['vmconfig']['guestId']
            else:
                guestid = Prompts.guestids()
                print('\n%s selected.' % (guestid))
            # cluster
            if 'cluster' in cfg['vmconfig']:
                cluster = cfg['vmconfig']['cluster']
                cluster_obj = Query.get_obj(clusters.view, cluster)
            else:
                cluster = Prompts.clusters(auth.session)
                cluster_obj = Query.get_obj(clusters.view, cluster)
                print('\n%s selected.' % (cluster))
            # datastore
            if 'datastore' in cfg['vmconfig']:
                datastore = cfg['vmconfig']['datastore']
            else:
                datastore = Prompts.datastores(auth.session, cluster)
                print('\n%s selected.' % (datastore))
            # datacenter
            if not opts.datacenter:
                datacenter = Prompts.datacenters(auth.session)
                print('\n%s selected.' % (datacenter))
            else:
                datacenter = opts.datacenter
            # nics
            if 'nics' in cfg['vmconfig']:
                nics = cfg['vmconfig']['nics']
                print('nics: %s' % (nics))
            else:
                nics = Prompts.networks(cluster_obj)
                print('\n%s selected.' % (','.join(nics)))
            # folder
            if 'folder' in cfg['vmconfig']:
                folder = cfg['vmconfig']['folder']
            else:
                folder = Prompts.folders(auth.session, datacenter)
                print('\n%s selected.' % (folder))
        else:
            name = Prompts.name()
            guestid = Prompts.guestids()
            print('\n%s selected.' % (guestid))
            cluster = Prompts.clusters(auth.session)
            print('\n%s selected.' % (cluster))
            datastore = Prompts.datastores(auth.session, cluster)
            print('\n%s selected.' % (datastore))
            datacenter = Prompts.datacenters(auth.session)
            print('\n%s selected.' % (datacenter))
            nics = Prompts.networks(cluster_obj)
            print('\n%s selected.' % (','.join(nics)))
            folder = Prompts.folders(auth.session, datacenter)
            print('\n%s selected.' % (folder))

        output = {
            'name': name,
            'guestId': guestid,
            'cluster': cluster,
            'datastore': datastore,
            'nics': nics,
            'folder': folder
        }

        return output
Exemplo n.º 22
0
    def create_wrapper(self, **spec):
        """
        Wrapper method for creating VMs. If certain information was
        not provided in the yaml config (like a datastore), then the client
        will be prompted to select one inside the cfg_checker method.

        Args:
            yaml_cfg (file): A yaml file containing the necessary information
                for creating a new VM. This file will override the defaults set
                in the dotrc file.
        """

        # create a copy before manipulating the data for vsphere
        server_cfg = copy.deepcopy(spec)

        cluster = spec['vmconfig']['cluster']
        datastore = spec['vmconfig']['datastore']
        folder = spec['vmconfig']['folder']

        if server_cfg.get('general', None):
            del server_cfg['general']['passwd']

        self.logger.info('vmconfig %s', server_cfg)
        cluster_obj = Query.get_obj(self.clusters.view, cluster)

        # list of cdrom and disk devices
        devices = []

        # add the cdrom device
        devices.append(self.cdrom_config())

        scsis = []
        if isinstance(spec['vmconfig']['disks'], dict):
            for scsi, disks in spec['vmconfig']['disks'].items():
                scsis.append(self.scsi_config(scsi))
                devices.append(scsis[scsi][1])
                for disk in enumerate(disks):
                    disk_cfg_opts = {}
                    disk_cfg_opts.update(
                        {
                            'container' : cluster_obj.datastore,
                            'datastore' : datastore,
                            'size' : int(disk[1]) * (1024*1024),
                            'controller' : scsis[scsi][0],
                            'unit' : disk[0],
                        }
                    )
                    devices.append(self.disk_config(**disk_cfg_opts))
        else:
            # attach up to four disks, each on its own scsi adapter
            for scsi, disk in enumerate(spec['vmconfig']['disks']):
                scsis.append(self.scsi_config(scsi))
                devices.append(scsis[scsi][1])
                disk_cfg_opts = {}
                disk_cfg_opts.update(
                    {
                        'container' : cluster_obj.datastore,
                        'datastore' : datastore,
                        'size' : int(disk) * (1024*1024),
                        'controller' : scsis[scsi][0],
                        'unit' : 0,
                    }
                )
                devices.append(self.disk_config(**disk_cfg_opts))

        # configure each network and add to devices
        for nic in spec['vmconfig']['nics']:
            nic_cfg_opts = {}

            if spec['vmconfig'].get('switch_type', None) == 'distributed':
                nic_cfg_opts.update({'switch_type' : 'distributed'})

            nic_cfg_opts.update({'container' : cluster_obj.network, 'network' : nic})
            devices.append(self.nic_config(**nic_cfg_opts))

        spec['vmconfig'].update({'deviceChange':devices})

        if self.opts.datacenter:
            folder = Query.folders_lookup(
                self.datacenters.view, self.opts.datacenter, folder
            )
        else:
            folder = Query.folders_lookup(
                self.datacenters.view, spec['vmconfig']['datacenter'], folder
            )

        # delete keys that vSphere does not understand, so we can pass it a
        # dictionary to build the VM.
        del spec['vmconfig']['disks']
        del spec['vmconfig']['nics']
        del spec['vmconfig']['folder']
        del spec['vmconfig']['datastore']
        del spec['vmconfig']['datacenter']
        del spec['vmconfig']['cluster']

        if spec['vmconfig'].get('switch_type', None):
            del spec['vmconfig']['switch_type']

        pool = cluster_obj.resourcePool

        self.logger.debug(folder, datastore, pool, devices, spec)
        self.create(folder, datastore, pool, **spec['vmconfig'])

        return server_cfg
Exemplo n.º 23
0
    def disk_config(cls, edit=False, **kwargs):
        """
        Method returns configured VirtualDisk object

        Kwargs:
            container (obj): Cluster container object
            datastore (str): Name of datastore for the disk files location.
            size (int):      Integer of disk in kilobytes
            key  (int):      Integer value of scsi device
            unit (int):      unitNumber of device.
            mode (str):      The disk persistence mode.
            thin (bool):     If True, then it enables thin provisioning

        Returns:
            disk (obj): A configured object for a VMDK Disk.  this should
                be appended to ConfigSpec devices attribute.
        """
        # capacityInKB is deprecated but also a required field. See pyVmomi bug #218

        container = kwargs.get('container', None)
        datastore = kwargs.get('datastore', None)
        size = kwargs.get('size', None)
        key = kwargs.get('key', None)
        unit = kwargs.get('unit', 0)
        mode = kwargs.get('mode', 'persistent')
        thin = kwargs.get('thin', True)
        controller = kwargs.get('controller', None)
        filename = kwargs.get('filename', None)

        disk = vim.vm.device.VirtualDeviceSpec()

        if edit:
            disk.operation = 'edit'

            disk.device = vim.vm.device.VirtualDisk()
            disk.device.capacityInKB = size
            disk.device.key = key
            # controllerKey is tied to SCSI Controller
            disk.device.controllerKey = controller
            disk.device.unitNumber = unit
            disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
            disk.device.backing.fileName = filename
            disk.device.backing.diskMode = mode

        else:
            disk.operation = 'add'
            disk.fileOperation = 'create'

            disk.device = vim.vm.device.VirtualDisk()
            disk.device.capacityInKB = size
            # controllerKey is tied to SCSI Controller
            disk.device.controllerKey = controller
            disk.device.unitNumber = unit
            disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
            disk.device.backing.fileName = '['+datastore+']'
            disk.device.backing.datastore = Query.get_obj(container, datastore)
            disk.device.backing.diskMode = mode
            disk.device.backing.thinProvisioned = thin
            disk.device.backing.eagerlyScrub = False

        return disk
Exemplo n.º 24
0
    def main(self):
        """
        This is the main method, which parses all the argparse options and runs
        the necessary code blocks if True.
        """

        try:
            call_count = 0

            self.auth = Auth(self.opts.host)
            self.auth.login(
                self.opts.user, self.opts.passwd, self.opts.domain, self.opts.passwd_file
            )

            self.opts.passwd = None
            self.logger.debug(self.opts)


            virtual_machines_container = Query.create_container(
                self.auth.session, self.auth.session.content.rootFolder,
                [vim.VirtualMachine], True
            )

            self.vmcfg = VMConfigHelper(self.auth, self.opts, argparser.dotrc)
            self.clustercfg = ClusterConfig(self.auth, self.opts, argparser.dotrc)

            call_count = self.auth.session.content.sessionManager.currentSession.callCount

            if not self.opts.datacenter:
                self.opts.datacenter = Prompts.datacenters(self.auth.session)

            if self.opts.cmd == 'create':
                if self.opts.config:
                    for cfg in self.opts.config:
                        spec = self.vmcfg.dict_merge(
                            argparser.dotrc, yaml.load(cfg, Loader=yaml.FullLoader)
                        )
                        cfgcheck_update = CfgCheck.cfg_checker(spec, self.auth, self.opts)
                        spec['vmconfig'].update(
                            self.vmcfg.dict_merge(spec['vmconfig'], cfgcheck_update)
                        )
                        spec = self.vmcfg.pre_create_hooks(**spec)
                        spec = self.vmcfg.create_wrapper(**spec)
                        self.vmcfg.post_create_hooks(**spec)
                        filename = spec['vmconfig']['name'] + '.yaml'
                        server_cfg = {}
                        server_cfg['vmconfig'] = {}
                        server_cfg['vmconfig'].update(spec['vmconfig'])
                        if spec.get('mkbootiso', None):
                            server_cfg['mkbootiso'] = {}
                            server_cfg['mkbootiso'].update(spec['mkbootiso'])

                        print(
                            yaml.dump(server_cfg, default_flow_style=False),
                            file=open(os.path.join(os.environ['OLDPWD'], filename), 'w')
                        )

            if self.opts.cmd == 'mount':
                self.vmcfg.mount_wrapper(self.opts.datastore, self.opts.path, *self.opts.name)

            if self.opts.cmd == 'power':
                self.vmcfg.power_wrapper(self.opts.power, *self.opts.name)

            if self.opts.cmd == 'umount':
                self.vmcfg.umount_wrapper(*self.opts.name)

            if self.opts.cmd == 'upload':
                self.vmcfg.upload_wrapper(
                    self.opts.datastore, self.opts.dest,
                    self.opts.verify_ssl, *self.opts.iso
                )

            if self.opts.cmd == 'add':
                hostname = Query.get_obj(virtual_machines_container.view, self.opts.name)

                # nics
                if self.opts.device == 'nic':
                    self.vmcfg.add_nic_recfg(hostname)

            if self.opts.cmd == 'reconfig':
                host = Query.get_obj(virtual_machines_container.view, self.opts.name)
                if self.opts.cfgs:
                    self.logger.info(
                        'reconfig: %s cfgs: %s', host.name,
                        ' '.join('%s=%s' % (k, v) for k, v in self.opts.cfgs.items())
                    )
                    self.vmcfg.reconfig(host, **self.opts.cfgs)
                if self.opts.folder:
                    self.vmcfg.folder_recfg()
                if self.opts.device == 'disk':
                    self.vmcfg.disk_recfg()
                if self.opts.device == 'nic':
                    self.vmcfg.nic_recfg()

            if self.opts.cmd == 'drs':
                if not self.opts.cluster:
                    self.opts.cluster = Prompts.clusters(self.auth.session)
                self.clustercfg.drs_rule()

            if self.opts.cmd == 'query':
                datacenters_container = Query.create_container(
                    self.auth.session, self.auth.session.content.rootFolder,
                    [vim.Datacenter], True
                )
                clusters_container = Query.create_container(
                    self.auth.session, self.auth.session.content.rootFolder,
                    [vim.ClusterComputeResource], True
                )

                if self.opts.anti_affinity_rules:
                    if self.opts.cluster:
                        anti_affinity_rules = Query.return_anti_affinity_rules(
                            clusters_container.view, self.opts.cluster
                        )
                    else:
                        cluster = Prompts.clusters(self.auth.session)
                        anti_affinity_rules = Query.return_anti_affinity_rules(
                            clusters_container.view, cluster
                        )
                    if not anti_affinity_rules:
                        print('No antiaffinity rules defined.')
                    else:
                        print('Antiaffinity rules:')

                        for key, val in sorted(anti_affinity_rules.items()):
                            print('{0}: {1}'.format(key, ' '.join(sorted(val))))

                if self.opts.datastores:
                    if self.opts.cluster:
                        datastores = Query.return_datastores(
                            clusters_container.view, self.opts.cluster
                        )
                    else:
                        cluster = Prompts.clusters(self.auth.session)
                        datastores = Query.return_datastores(clusters_container.view, cluster)
                    for row in datastores:
                        print('{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.format(*row))

                if self.opts.folders:
                    if self.opts.datacenter:
                        folders = Query.list_vm_folders(
                            datacenters_container.view, self.opts.datacenter
                        )
                        folders.sort()
                        for folder in folders:
                            print(folder)
                    else:
                        datacenter = Prompts.datacenters(self.auth.session)
                        folders = Query.list_vm_folders(datacenters_container.view, datacenter)
                        folders.sort()
                        for folder in folders:
                            print(folder)
                if self.opts.clusters:
                    clusters = Query.list_obj_attrs(clusters_container, 'name')
                    clusters.sort()
                    for cluster in clusters:
                        print(cluster)
                if self.opts.networks:
                    if self.opts.cluster:
                        cluster = Query.get_obj(clusters_container.view, self.opts.cluster)
                        networks = Query.list_obj_attrs(cluster.network, 'name', view=False)
                        networks.sort()
                        for net in networks:
                            print(net)
                    else:
                        cluster_name = Prompts.clusters(self.auth.session)
                        cluster = Query.get_obj(clusters_container.view, cluster_name)
                        networks = Query.list_obj_attrs(cluster.network, 'name', view=False)
                        networks.sort()
                        for net in networks:
                            print(net)
                if self.opts.vms:
                    vms = Query.list_vm_info(datacenters_container.view, self.opts.datacenter)
                    for key, value in vms.items():
                        print(key, value)
                if self.opts.vmconfig:
                    for name in self.opts.vmconfig:
                        virtmachine = Query.get_obj(virtual_machines_container.view, name)
                        self.logger.debug(virtmachine.config)
                        if self.opts.createcfg:
                            print(
                                yaml.dump(
                                    Query.vm_config(
                                        virtual_machines_container.view, name, self.opts.createcfg
                                    ),
                                    default_flow_style=False
                                )
                            )
                        else:
                            print(
                                yaml.dump(
                                    Query.vm_config(virtual_machines_container.view, name),
                                    default_flow_style=False
                                )
                            )
                if self.opts.vm_by_datastore:
                    if self.opts.cluster and self.opts.datastore:
                        vms = Query.vm_by_datastore(
                            clusters_container.view, self.opts.cluster, self.opts.datastore
                        )
                        for vm_name in vms:
                            print(vm_name)
                    else:
                        if not self.opts.cluster:
                            cluster = Prompts.clusters(self.auth.session)
                        if not self.opts.datastore:
                            datastore = Prompts.datastores(self.auth.session, cluster)
                        print()

                        vms = Query.vm_by_datastore(clusters_container.view, cluster, datastore)
                        for vm_name in vms:
                            print(vm_name)

                if self.opts.vm_guest_ids:
                    for guest_id in Query.list_guestids():
                        print(guest_id)

            self.auth.logout()
            self.logger.debug('Call count: {0}'.format(call_count))

        except ValueError as err:
            self.logger.error(err, exc_info=False)
            self.auth.logout()
            self.logger.debug('Call count: {0}'.format(call_count))
            sys.exit(3)

        except vim.fault.InvalidLogin as loginerr:
            self.logger.error(loginerr.msg, exc_info=False)
            sys.exit(2)

        except ssl.CertificateError as err:
            self.logger.error(err, exc_info=False)
            sys.exit(2)

        except KeyboardInterrupt as err:
            self.logger.error(err, exc_info=False)
            self.auth.logout()
            self.logger.debug('Call count: {0}'.format(call_count))
            sys.exit(1)
Exemplo n.º 25
0
    def drs_rule(self):
        """
        Method messes with DRS rules.
        Currently only Anti Affinity rules, and only add or delete.

        For safety, it has a concept of a vctools prefix.  The prefix lives in the
        rc file, or can be declared by a flag.  This is so you "can't" delete a
        rule that was not created by vctools.

        Args:
            cluster (str): cluster to modify
            type (str): currently only anti-affinity
            oper (add|delete): operation mode
            name (str): name of the rule
            vms (list): list of vms (to add, not used for delete)

        Returns true if successful.
        """
        cluster = self.opts.cluster
        drs_type = self.opts.drs_type
        name = self.opts.name
        vms = self.opts.vms
        function = self.opts.function

        self.logger.debug(cluster, drs_type, name, vms, function)

        # containers we need
        clusters = Query.create_container(self.auth.session,
                                          self.auth.session.content.rootFolder,
                                          [vim.ComputeResource], True)
        virtual_machines = Query.create_container(
            self.auth.session, self.auth.session.content.rootFolder,
            [vim.VirtualMachine], True)

        # our cluster object
        cluster_obj = Query.get_obj(clusters.view, cluster)

        if drs_type == 'anti-affinity':

            if function == 'add':

                vm_obj_list = []
                for vm_obj in vms:
                    vm_obj_list.append(
                        Query.get_obj(virtual_machines.view, vm_obj))

                # check to see if this rule name is in use
                if Query.is_anti_affinity_rule(cluster_obj, name):
                    raise ValueError(
                        'Error: rule name "%s" is already in use' % name)

                # check to see vms are in the right cluster
                for vm_obj in vm_obj_list:
                    if not Query.is_vm_in_cluster(cluster_obj, vm_obj):
                        raise ValueError(
                            'Error: the vm "%s" is not in the stated cluster' %
                            vm_obj.name)

                # check to see if the vms already have DRS rules
                for vm_obj in vm_obj_list:
                    match = 0
                    for rule in cluster_obj.configuration.rule:
                        if hasattr(rule, 'vm'):
                            for rulevm in rule.vm:
                                if vm_obj == rulevm:
                                    match = 1
                    if match != 0:
                        raise ValueError(
                            'Error: the vm "%s" is already in a DRS rule' %
                            vm_obj.name)

                new_rule = vim.ClusterAntiAffinityRuleSpec()
                new_rule.name = name

                new_rule.userCreated = True
                new_rule.enabled = True
                for vm_obj in vm_obj_list:
                    new_rule.vm.append(vm_obj)

                rule_spec = vim.cluster.RuleSpec(info=new_rule,
                                                 operation='add')
                config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
                Tasks.task_monitor(
                    cluster_obj.ReconfigureComputeResource_Task(config_spec,
                                                                modify=True),
                    False)

                self.logger.info('new AA DRS rule on %s: %s', cluster, name)

            if function == 'delete':
                #Delete an AntiAffinity Rule
                # check to see if this rule name is in use, and delete if found
                found = False
                for existing_rule in cluster_obj.configuration.rule:
                    if existing_rule.name == name:
                        found = True
                        # doublecheck this is an AA rule
                        if isinstance(existing_rule,
                                      vim.cluster.AntiAffinityRuleSpec):
                            rule_spec = vim.cluster.RuleSpec(
                                removeKey=existing_rule.key,
                                operation='remove')
                            config_spec = vim.cluster.ConfigSpecEx(
                                rulesSpec=[rule_spec])
                            Tasks.task_monitor(
                                cluster_obj.ReconfigureComputeResource_Task(
                                    config_spec, modify=True), False)
                            self.logger.info('Deleted AA DRS rule on %s: %s',
                                             cluster, name)
                        else:
                            raise ValueError(
                                'Error: rule name "%s" not an AntiAffinity rule'
                                % name)
                if not found:
                    raise ValueError('Error: rule name "%s" not found' % name)