def clusters(cls, session): """ Method will prompt user to select a cluster Args: session (obj): Auth session object Returns: cluster (str): Name of selected cluster """ clusters_choices = Query.create_container(session, session.content.rootFolder, [vim.ComputeResource], True) clusters = Query.list_obj_attrs(clusters_choices, 'name') clusters.sort() for num, opt in enumerate(clusters, start=1): print('%s: %s' % (num, opt)) while True: val = int(raw_input('\nPlease select number: ').strip()) if int(val) <= len(clusters): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_cluster = clusters[val] break else: print('Invalid number.') continue cls.logger.info(selected_cluster) return selected_cluster
def umount_wrapper(self, *names): """ Wrapper method for un-mounting isos on multiple VMs. Args: names (tuple): a tuple of VM names in vCenter. """ for name in names: print('Umount ISO from %s' % (name)) host = Query.get_obj(self.virtual_machines.view, name) key, controller = Query.get_key(host, 'CD/DVD') self.logger.info('ISO on %s', name) cdrom_cfg = [] cdrom_cfg_opts = {} cdrom_cfg_opts.update( { 'umount' : True, 'key' : key, 'controller' : controller, } ) cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts)) #cdrom_cfg.append(self.cdrom_config(umount=True, key=key, # controller=controller)) config = {'deviceChange' : cdrom_cfg} self.logger.debug(host, config) self.reconfig(host, **config)
def folders(cls, session, datacenter): """ Method will prompt user to select a folder from a datacenter Args: session (obj): Auth session object datacenter (str): Name of datacenter Returns: folder (str): Name of selected folder """ datacenters = Query.create_container( session, session.content.rootFolder, [vim.Datacenter], True ) folders = Query.list_vm_folders( datacenters.view, datacenter ) folders.sort() for num, opt in enumerate(folders, start=1): print('%s: %s' % (num, opt)) while True: val = int(raw_input('\nPlease select number: ').strip()) if int(val) <= len(folders): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_folder = folders[val] break else: print('Invalid number.') continue cls.logger.info(selected_folder) return selected_folder
def clusters(cls, session): """ Method will prompt user to select a cluster Args: session (obj): Auth session object Returns: cluster (str): Name of selected cluster """ clusters_choices = Query.create_container( session, session.content.rootFolder, [vim.ComputeResource], True ) clusters = Query.list_obj_attrs(clusters_choices, 'name') clusters.sort() for num, opt in enumerate(clusters, start=1): print('%s: %s' % (num, opt)) while True: val = int(raw_input('\nPlease select number: ').strip()) if int(val) <= len(clusters): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_cluster = clusters[val] break else: print('Invalid number.') continue cls.logger.info(selected_cluster) return selected_cluster
def mount_wrapper(self, datastore, path, *names): """ Wrapper method for mounting isos on multiple VMs. Args: datastore (str): Name of datastore where the ISO is located. path (str): Path inside datastore where the ISO is located. names (str): A tuple of VM names in vCenter. """ for name in names: host = Query.get_obj(self.virtual_machines.view, name) print('Mounting [%s] %s on %s' % (datastore, path, name)) cdrom_cfg = [] key, controller = Query.get_key(host, 'CD/DVD') cdrom_cfg_opts = {} cdrom_cfg_opts.update({ 'datastore': datastore, 'iso_path': path, 'iso_name': name, 'key': key, 'controller': controller, }) cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts)) config = {'deviceChange': cdrom_cfg} self.logger.debug(cdrom_cfg_opts, config) self.reconfig(host, **config)
def umount_wrapper(self, *names): """ Wrapper method for un-mounting isos on multiple VMs. Args: names (tuple): a tuple of VM names in vCenter. """ for name in names: print('Umount ISO from %s' % (name)) host = Query.get_obj(self.virtual_machines.view, name) key, controller = Query.get_key(host, 'CD/DVD') self.logger.info('ISO on %s', name) cdrom_cfg = [] cdrom_cfg_opts = {} cdrom_cfg_opts.update({ 'umount': True, 'key': key, 'controller': controller, }) cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts)) #cdrom_cfg.append(self.cdrom_config(umount=True, key=key, # controller=controller)) config = {'deviceChange': cdrom_cfg} self.logger.debug(host, config) self.reconfig(host, **config)
def nic_recfg(self): """ Reconfigure a VM network adapter """ devices = [] edit = True host = Query.get_obj(self.virtual_machines.view, self.opts.name) nic_cfg_opts = {} label = self.opts.nic_prefix + ' ' + str(self.opts.nic_id) try: key, controller = Query.get_key(host, label) except IOError: pass if self.opts.nic_id: for item in host.config.hardware.device: if label == item.deviceInfo.label: if self.opts.network: nic_cfg_opts.update({ 'key': key, 'controller': controller, 'container': host.runtime.host.network, 'network': self.opts.network, 'mac_address': item.macAddress, 'unit': item.unitNumber, }) if self.opts.driver == 'e1000': nic_cfg_opts.update({'driver': 'VirtualE1000'}) devices.append( self.nic_config(edit=edit, **nic_cfg_opts)) if devices: self.logger.info('%s label: %s %s network: %s', host.name, self.opts.nic_prefix, self.opts.nic_id, self.opts.network) self.reconfig(host, **{'deviceChange': devices})
def folder_recfg(self): """ Move a VM to another folder """ host = Query.get_obj(self.virtual_machines.view, self.opts.name) folder = Query.folders_lookup(self.datacenters.view, self.opts.datacenter, self.opts.folder) self.logger.info('%s folder: %s', host.name, self.opts.folder) self.mvfolder(host, folder)
def mount_wrapper(self, datastore, path, *names): """ Wrapper method for mounting isos on multiple VMs. Args: datastore (str): Name of datastore where the ISO is located. path (str): Path inside datastore where the ISO is located. names (str): A tuple of VM names in vCenter. """ for name in names: host = Query.get_obj( self.virtual_machines.view, name ) print('Mounting [%s] %s on %s' % (datastore, path, name)) cdrom_cfg = [] key, controller = Query.get_key(host, 'CD/DVD') cdrom_cfg_opts = {} cdrom_cfg_opts.update( { 'datastore' : datastore, 'iso_path' : path, 'iso_name' : name, 'key': key, 'controller' : controller, } ) cdrom_cfg.append(self.cdrom_config(**cdrom_cfg_opts)) config = {'deviceChange' : cdrom_cfg} self.logger.debug(cdrom_cfg_opts, config) self.reconfig(host, **config)
def folders(cls, session, datacenter): """ Method will prompt user to select a folder from a datacenter Args: session (obj): Auth session object datacenter (str): Name of datacenter Returns: folder (str): Name of selected folder """ datacenters = Query.create_container(session, session.content.rootFolder, [vim.Datacenter], True) folders = Query.list_vm_folders(datacenters.view, datacenter) folders.sort() for num, opt in enumerate(folders, start=1): print('%s: %s' % (num, opt)) while True: val = int(raw_input('\nPlease select number: ').strip()) if int(val) <= len(folders): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_folder = folders[val] break else: print('Invalid number.') continue cls.logger.info(selected_folder) return selected_folder
def folder_recfg(self): """ Move a VM to another folder """ host = Query.get_obj(self.virtual_machines.view, self.opts.name) folder = Query.folders_lookup( self.datacenters.view, self.opts.datacenter, self.opts.folder ) self.logger.info('%s folder: %s', host.name, self.opts.folder) self.mvfolder(host, folder)
def disk_recfg(self): """ Reconfigure a VM disk.""" devices = [] edit = True host = Query.get_obj(self.virtual_machines.view, self.opts.name) disk_cfg_opts = {} # KB tokbytes = 1024*1024 label = self.opts.disk_prefix + ' ' + str(self.opts.disk_id) try: key, controller = Query.get_key(host, label) except IOError: pass if self.opts.disk_id: for item in host.config.hardware.device: if label == item.deviceInfo.label: disk_new_size = self.opts.sizeGB * tokbytes current_size = item.capacityInKB current_size_gb = int(current_size / (1024*1024)) if disk_new_size == current_size: raise ValueError( 'New size and existing size are equal'.format() ) if disk_new_size < current_size: raise ValueError( 'Size {0} does not exceed {1}'.format( disk_new_size, current_size ) ) disk_delta = disk_new_size - current_size ds_capacity_kb = item.backing.datastore.summary.capacity / 1024 ds_free_kb = item.backing.datastore.summary.freeSpace / 1024 threshold_pct = 0.10 if (ds_free_kb - disk_delta) / ds_capacity_kb < threshold_pct: raise ValueError( '{0} {1} disk space low, aborting.'.format( host.resourcePool.parent.name, item.backing.datastore.name ) ) disk_cfg_opts.update( { 'size' : disk_new_size, 'key' : key, 'controller' : controller, 'unit' : item.unitNumber, 'filename' : item.backing.fileName } ) if disk_cfg_opts: devices.append(self.disk_config(edit=edit, **disk_cfg_opts)) self.logger.info( '%s label: %s %s current_size: %s new_size: %s', host.name, self.opts.disk_prefix, self.opts.disk_id, current_size_gb, self.opts.sizeGB ) self.reconfig(host, **{'deviceChange': devices})
def disk_recfg(self): """ Reconfigure a VM disk.""" devices = [] edit = True host = Query.get_obj(self.virtual_machines.view, self.opts.name) disk_cfg_opts = {} # KB tokbytes = 1024 * 1024 label = self.opts.disk_prefix + ' ' + str(self.opts.disk_id) try: key, controller = Query.get_key(host, label) except IOError: pass if self.opts.disk_id: for item in host.config.hardware.device: if label == item.deviceInfo.label: disk_new_size = self.opts.sizeGB * tokbytes current_size = item.capacityInKB current_size_gb = int(current_size / (1024 * 1024)) if disk_new_size == current_size: raise ValueError( 'New size and existing size are equal'.format()) elif disk_new_size < current_size: raise ValueError('Size {0} does not exceed {1}'.format( disk_new_size, current_size)) disk_delta = disk_new_size - current_size ds_capacity_kb = item.backing.datastore.summary.capacity / 1024 ds_free_kb = item.backing.datastore.summary.freeSpace / 1024 threshold_pct = 0.10 if (ds_free_kb - disk_delta) / ds_capacity_kb < threshold_pct: raise ValueError( '{0} {1} disk space low, aborting.'.format( host.resourcePool.parent.name, item.backing.datastore.name)) else: disk_cfg_opts.update({ 'size': disk_new_size, 'key': key, 'controller': controller, 'unit': item.unitNumber, 'filename': item.backing.fileName }) if disk_cfg_opts: devices.append(self.disk_config(edit=edit, **disk_cfg_opts)) self.logger.info( '%s label: %s %s current_size: %s new_size: %s', host.name, self.opts.disk_prefix, self.opts.disk_id, current_size_gb, self.opts.sizeGB) self.reconfig(host, **{'deviceChange': devices})
def datastores(cls, session, cluster): """ Method will prompt user to select a datastore from a cluster Args: session (obj): Auth session object cluster (str): Name of cluster Returns: datastore (str): Name of selected datastore """ clusters = Query.create_container(session, session.content.rootFolder, [vim.ComputeResource], True) datastores = Query.return_datastores(clusters.view, cluster) print('\n') if (len(datastores) - 1) == 0: print('No Datastores Found.') sys.exit(1) else: print('%s Datastores Found.\n' % (len(datastores) - 1)) for num, opt in enumerate(datastores): # the first item is the header information, so we will # not allow it as an option. if num == 0: print('\t%s' % ('{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.format( *opt))) else: print('%s: %s' % (num, '{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.format( *opt))) while True: val = int(raw_input('\nPlease select number: ').strip()) if val > 0 and val <= (len(datastores) - 1): break else: print('Invalid number') continue datastore = datastores[val][0] cls.logger.info(datastore) return datastore
def __init__(self, auth, opts, dotrc): VMConfig.__init__(self) self.auth = auth self.opts = opts self.dotrc = dotrc self.datacenters = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.Datacenter], True) self.clusters = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.ComputeResource], True) self.folders = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.Folder], True) self.virtual_machines = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.VirtualMachine], True)
def nic_config(cls, edit=False, **kwargs): """ Method returns configured object for network interface. kwargs: container (obj): ContainerView object. network (str): Name of network to add to VM. connected (bool): Indicates that the device is currently connected. Valid only while the virtual machine is running. start_connected (bool): Specifies whether or not to connect the device when the virtual machine starts. allow_guest_control (bool): Allows the guest to control whether the connectable device is connected. driver (str): A str that represents a network adapter driver Returns: nic (obj): A configured object for a Network device. this should be appended to ConfigSpec devices attribute. """ key = kwargs.get('key', None) controller = kwargs.get('controller', None) container = kwargs.get('container', None) mac_address = kwargs.get('mac_address', None) network = kwargs.get('network', None) connected = kwargs.get('connected', True) start_connected = kwargs.get('start_connected', True) allow_guest_control = kwargs.get('allow_get_control', True) unit = kwargs.get('unit', None) address_type = kwargs.get('address_type', 'assigned') driver = kwargs.get('driver', 'VirtualVmxnet3') nic = vim.vm.device.VirtualDeviceSpec() nic.device = getattr(vim.vm.device, driver)() if edit: nic.operation = 'edit' nic.device.key = key nic.device.controllerKey = controller nic.device.macAddress = mac_address nic.device.unitNumber = unit nic.device.addressType = address_type else: nic.operation = 'add' nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo( ) nic.device.backing.network = Query.get_obj(container, network) nic.device.backing.deviceName = network nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.connected = connected nic.device.connectable.startConnected = start_connected nic.device.connectable.allowGuestControl = allow_guest_control return nic
def datastores(cls, session, cluster): """ Method will prompt user to select a datastore from a cluster Args: session (obj): Auth session object cluster (str): Name of cluster Returns: datastore (str): Name of selected datastore """ clusters = Query.create_container( session, session.content.rootFolder, [vim.ComputeResource], True ) datastores = Query.return_datastores(clusters.view, cluster) print('\n') if (len(datastores) -1) == 0: print('No Datastores Found.') sys.exit(1) else: print('%s Datastores Found.\n' % (len(datastores) - 1)) for num, opt in enumerate(datastores): # the first item is the header information, so we will # not allow it as an option. if num == 0: print('\t%s' % ('{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.format(*opt))) else: print('%s: %s' % (num, '{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.format(*opt))) while True: val = int(raw_input('\nPlease select number: ').strip()) if val > 0 and val <= (len(datastores) - 1): break else: print('Invalid number') continue datastore = datastores[val][0] cls.logger.info(datastore) return datastore
def nic_config(cls, edit=False, **kwargs): """ Method returns configured object for network interface. kwargs: container (obj): ContainerView object. network (str): Name of network to add to VM. connected (bool): Indicates that the device is currently connected. Valid only while the virtual machine is running. start_connected (bool): Specifies whether or not to connect the device when the virtual machine starts. allow_guest_control (bool): Allows the guest to control whether the connectable device is connected. driver (str): A str that represents a network adapter driver Returns: nic (obj): A configured object for a Network device. this should be appended to ConfigSpec devices attribute. """ key = kwargs.get('key', None) controller = kwargs.get('controller', None) container = kwargs.get('container', None) mac_address = kwargs.get('mac_address', None) network = kwargs.get('network', None) connected = kwargs.get('connected', True) start_connected = kwargs.get('start_connected', True) allow_guest_control = kwargs.get('allow_get_control', True) unit = kwargs.get('unit', None) address_type = kwargs.get('address_type', 'assigned') driver = kwargs.get('driver', 'VirtualVmxnet3') nic = vim.vm.device.VirtualDeviceSpec() nic.device = getattr(vim.vm.device, driver)() if edit: nic.operation = 'edit' nic.device.key = key nic.device.controllerKey = controller nic.device.macAddress = mac_address nic.device.unitNumber = unit nic.device.addressType = address_type else: nic.operation = 'add' nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() nic.device.backing.network = Query.get_obj(container, network) nic.device.backing.deviceName = network nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.connected = connected nic.device.connectable.startConnected = start_connected nic.device.connectable.allowGuestControl = allow_guest_control return nic
def __init__(self, auth, opts, dotrc): VMConfig.__init__(self) self.auth = auth self.opts = opts self.dotrc = dotrc self.datacenters = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.Datacenter], True ) self.clusters = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.ComputeResource], True ) self.folders = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.Folder], True ) self.virtual_machines = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.VirtualMachine], True )
def power_wrapper(self, state, *names): """ Wrapper method for changing the power state on multiple VMs. Args: state (str): choices: on, off, reset, reboot, shutdown names (str): a tuple of VM names in vCenter. """ for name in names: host = Query.get_obj(self.virtual_machines.view, name) print('%s changing power state to %s' % (name, state)) self.logger.debug(host, state) self.power(host, state)
def upload_wrapper(self, datastore, dest, verify_ssl, *isos): """ Wrapper method for uploading multiple isos into a datastore. Args: isos (tuple): a tuple of isos locally on machine that will be uploaded. The path for each iso should be absolute. """ for iso in isos: print( 'Uploading ISO: %s, file size: %s, remote location: [%s] %s' % ( iso, Query.disk_size_format(os.path.getsize(iso)), datastore, dest ) ) self.logger.info( 'Uploading ISO: %s, file size: %s, remote location: [%s] %s', iso, Query.disk_size_format(os.path.getsize(iso)), datastore, dest ) upload_args = {} upload_args.update( { 'host': self.opts.host, 'cookie' : self.auth.session._stub.cookie, 'datacenter' : self.opts.datacenter, 'dest_folder' : dest, 'datastore' : datastore, 'iso' : iso, 'verify' : verify_ssl, } ) result = self.upload_iso(**upload_args) self.logger.debug(result, upload_args) if result == 200 or 201: self.logger.info('result: %s %s uploaded successfully', result, iso) else: self.logger.error('result: %s %s upload failed', result, iso)
def power_wrapper(self, state, *names): """ Wrapper method for changing the power state on multiple VMs. Args: state (str): choices: on, off, reset, reboot, shutdown names (str): a tuple of VM names in vCenter. """ for name in names: host = Query.get_obj(self.virtual_machines.view, name) print('%s changing power state to %s' % (name, state)) self.logger.debug(host, state) self.power(host, state)
def nic_recfg(self): """ Reconfigure a VM network adapter """ devices = [] edit = True host = Query.get_obj(self.virtual_machines.view, self.opts.name) nic_cfg_opts = {} label = self.opts.nic_prefix + ' ' + str(self.opts.nic_id) try: key, controller = Query.get_key(host, label) except IOError: pass if self.opts.nic_id: for item in host.config.hardware.device: if label == item.deviceInfo.label: if self.opts.network: nic_cfg_opts.update( { 'key' : key, 'controller' : controller, 'container' : host.runtime.host.network, 'network' : self.opts.network, 'mac_address': item.macAddress, 'unit' : item.unitNumber, } ) if self.opts.driver == 'e1000': nic_cfg_opts.update({'driver': 'VirtualE1000'}) devices.append( self.nic_config(edit=edit, **nic_cfg_opts) ) if devices: self.logger.info( '%s label: %s %s network: %s', host.name, self.opts.nic_prefix, self.opts.nic_id, self.opts.network ) self.reconfig(host, **{'deviceChange': devices})
def upload_wrapper(self, datastore, dest, verify_ssl, *isos): """ Wrapper method for uploading multiple isos into a datastore. Args: isos (tuple): a tuple of isos locally on machine that will be uploaded. The path for each iso should be absolute. """ for iso in isos: print( 'Uploading ISO: %s, file size: %s, remote location: [%s] %s' % (iso, Query.disk_size_format( os.path.getsize(iso)), datastore, dest)) self.logger.info( 'Uploading ISO: %s, file size: %s, remote location: [%s] %s', iso, Query.disk_size_format(os.path.getsize(iso)), datastore, dest) upload_args = {} upload_args.update({ 'host': self.opts.host, 'cookie': self.auth.session._stub.cookie, 'datacenter': self.opts.datacenter, 'dest_folder': dest, 'datastore': datastore, 'iso': iso, 'verify': verify_ssl, }) result = self.upload_iso(**upload_args) self.logger.debug(result, upload_args) if result == 200 or 201: self.logger.info('result: %s %s uploaded successfully', result, iso) else: self.logger.error('result: %s %s upload failed', result, iso)
def guestids(cls): """ Method will prompt user to select a guest ID (supported OS). """ guestids = Query.list_guestids() for num, guestid in enumerate(guestids, start=1): print('%s:%s' % (num, guestid)) while True: val = int(raw_input('\nPlease select number: ').strip()) if int(val) <= len(guestids): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_guestid = guestids[val] break else: print('Invalid number.') continue cls.logger.info(selected_guestid) return selected_guestid
def guestids(cls): """ Method will prompt user to select a guest ID (supported OS). """ guestids = Query.list_guestids() for num, guestid in enumerate(guestids, start=1): print('%s:%s' % (num, guestid)) while True: val = int(raw_input('\nPlease select number: ').strip()) if int(val) <= len(guestids): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_guestid = guestids[val] break else: print('Invalid number.') continue cls.logger.info(selected_guestid) return selected_guestid
def disk_config(cls, edit=False, **kwargs): """ Method returns configured VirtualDisk object Kwargs: container (obj): Cluster container object datastore (str): Name of datastore for the disk files location. size (int): Integer of disk in kilobytes key (int): Integer value of scsi device unit (int): unitNumber of device. mode (str): The disk persistence mode. thin (bool): If True, then it enables thin provisioning Returns: disk (obj): A configured object for a VMDK Disk. this should be appended to ConfigSpec devices attribute. """ # capacityInKB is deprecated but also a required field. See pyVmomi bug #218 container = kwargs.get('container', None) datastore = kwargs.get('datastore', None) size = kwargs.get('size', None) key = kwargs.get('key', None) unit = kwargs.get('unit', 0) mode = kwargs.get('mode', 'persistent') thin = kwargs.get('thin', True) controller = kwargs.get('controller', None) filename = kwargs.get('filename', None) disk = vim.vm.device.VirtualDeviceSpec() if edit: disk.operation = 'edit' disk.device = vim.vm.device.VirtualDisk() disk.device.capacityInKB = size disk.device.key = key # controllerKey is tied to SCSI Controller disk.device.controllerKey = controller disk.device.unitNumber = unit disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() disk.device.backing.fileName = filename disk.device.backing.diskMode = mode else: disk.operation = 'add' disk.fileOperation = 'create' disk.device = vim.vm.device.VirtualDisk() disk.device.capacityInKB = size # controllerKey is tied to SCSI Controller disk.device.controllerKey = controller disk.device.unitNumber = unit disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() disk.device.backing.fileName = '['+datastore+']' disk.device.backing.datastore = Query.get_obj(container, datastore) disk.device.backing.diskMode = mode disk.device.backing.thinProvisioned = thin disk.device.backing.eagerlyScrub = False return disk
def cfg_checker(cfg, auth, opts): """ Checks config for a valid configuration, and prompts user if information is missing Args: cfg (obj): Yaml object """ clusters = Query.create_container(auth.session, auth.session.content.rootFolder, [vim.ComputeResource], True) # name if 'vmconfig' in cfg: # name if 'name' in cfg['vmconfig']: name = cfg['vmconfig']['name'] else: name = Prompts.name() # guestid if 'guestId' in cfg['vmconfig']: guestid = cfg['vmconfig']['guestId'] else: guestid = Prompts.guestids() print('\n%s selected.' % (guestid)) # cluster if 'cluster' in cfg['vmconfig']: cluster = cfg['vmconfig']['cluster'] cluster_obj = Query.get_obj(clusters.view, cluster) else: cluster = Prompts.clusters(auth.session) cluster_obj = Query.get_obj(clusters.view, cluster) print('\n%s selected.' % (cluster)) # datastore if 'datastore' in cfg['vmconfig']: datastore = cfg['vmconfig']['datastore'] else: datastore = Prompts.datastores(auth.session, cluster) print('\n%s selected.' % (datastore)) # datacenter if not opts.datacenter: datacenter = Prompts.datacenters(auth.session) print('\n%s selected.' % (datacenter)) else: datacenter = opts.datacenter # nics if 'nics' in cfg['vmconfig']: nics = cfg['vmconfig']['nics'] print('nics: %s' % (nics)) else: nics = Prompts.networks(cluster_obj) print('\n%s selected.' % (','.join(nics))) # folder if 'folder' in cfg['vmconfig']: folder = cfg['vmconfig']['folder'] else: folder = Prompts.folders(auth.session, datacenter) print('\n%s selected.' % (folder)) else: name = Prompts.name() guestid = Prompts.guestids() print('\n%s selected.' % (guestid)) cluster = Prompts.clusters(auth.session) print('\n%s selected.' % (cluster)) datastore = Prompts.datastores(auth.session, cluster) print('\n%s selected.' % (datastore)) datacenter = Prompts.datacenters(auth.session) print('\n%s selected.' % (datacenter)) nics = Prompts.networks(cluster_obj) print('\n%s selected.' % (','.join(nics))) folder = Prompts.folders(auth.session, datacenter) print('\n%s selected.' % (folder)) output = { 'name': name, 'guestId': guestid, 'cluster': cluster, 'datastore': datastore, 'nics': nics, 'folder': folder } return output
def networks(cls, net_obj): """ Method will prompt user to select a networks. Since multiple networks can be added to a VM, it will prompt the user to exit or add more. The networks should be selected in the order of which they want the interfaces set on the VM. For example, the first network selected will be configured on eth0 on the VM. Args: session (obj): Auth session object net_obj (cls): class has network managed object attribute multiple (bool): Allow for method to accept multiple inputs, otherwise it will return the first selection Returns: selected_networks (list): A list of selected networks """ if getattr(net_obj, 'network'): networks = Query.list_obj_attrs(net_obj.network, 'name', view=False) networks.sort() else: raise ValueError('network managed object not found in %s' % (type(net_obj))) print('\n') print('%s Networks Found.\n' % (len(networks))) for num, opt in enumerate(networks, start=1): print('%s: %s' % (num, opt)) selected_networks = [] while True: if selected_networks: print('selected: ' + ','.join(selected_networks)) val = raw_input( '\nPlease select number:\n(Q)uit (S)how Networks\n' ).strip() # need to test whether selection is an integer or not. try: if int(val) <= len(networks): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_networks.append(networks[val]) continue else: print('Invalid number.') continue except ValueError: if val == 'Q': break elif val == 'S': for num, opt in enumerate(networks, start=1): print('%s: %s' % (num, opt)) else: print('Invalid option.') continue cls.logger.info(selected_networks) return selected_networks
def drs_rule(self): """ Method messes with DRS rules. Currently only Anti Affinity rules, and only add or delete. For safety, it has a concept of a vctools prefix. The prefix lives in the rc file, or can be declared by a flag. This is so you "can't" delete a rule that was not created by vctools. Args: cluster (str): cluster to modify type (str): currently only anti-affinity oper (add|delete): operation mode name (str): name of the rule vms (list): list of vms (to add, not used for delete) Returns true if successful. """ cluster = self.opts.cluster drs_type = self.opts.drs_type name = self.opts.name vms = self.opts.vms function = self.opts.function self.logger.debug(cluster, drs_type, name, vms, function) # containers we need clusters = Query.create_container(self.auth.session, self.auth.session.content.rootFolder, [vim.ComputeResource], True) virtual_machines = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.VirtualMachine], True) # our cluster object cluster_obj = Query.get_obj(clusters.view, cluster) if drs_type == 'anti-affinity': if function == 'add': vm_obj_list = [] for vm_obj in vms: vm_obj_list.append( Query.get_obj(virtual_machines.view, vm_obj)) # check to see if this rule name is in use if Query.is_anti_affinity_rule(cluster_obj, name): raise ValueError( 'Error: rule name "%s" is already in use' % name) # check to see vms are in the right cluster for vm_obj in vm_obj_list: if not Query.is_vm_in_cluster(cluster_obj, vm_obj): raise ValueError( 'Error: the vm "%s" is not in the stated cluster' % vm_obj.name) # check to see if the vms already have DRS rules for vm_obj in vm_obj_list: match = 0 for rule in cluster_obj.configuration.rule: if hasattr(rule, 'vm'): for rulevm in rule.vm: if vm_obj == rulevm: match = 1 if match != 0: raise ValueError( 'Error: the vm "%s" is already in a DRS rule' % vm_obj.name) new_rule = vim.ClusterAntiAffinityRuleSpec() new_rule.name = name new_rule.userCreated = True new_rule.enabled = True for vm_obj in vm_obj_list: new_rule.vm.append(vm_obj) rule_spec = vim.cluster.RuleSpec(info=new_rule, operation='add') config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) Tasks.task_monitor( cluster_obj.ReconfigureComputeResource_Task(config_spec, modify=True), False) self.logger.info('new AA DRS rule on %s: %s', cluster, name) if function == 'delete': #Delete an AntiAffinity Rule # check to see if this rule name is in use, and delete if found found = False for existing_rule in cluster_obj.configuration.rule: if existing_rule.name == name: found = True # doublecheck this is an AA rule if isinstance(existing_rule, vim.cluster.AntiAffinityRuleSpec): rule_spec = vim.cluster.RuleSpec( removeKey=existing_rule.key, operation='remove') config_spec = vim.cluster.ConfigSpecEx( rulesSpec=[rule_spec]) Tasks.task_monitor( cluster_obj.ReconfigureComputeResource_Task( config_spec, modify=True), False) self.logger.info('Deleted AA DRS rule on %s: %s', cluster, name) else: raise ValueError( 'Error: rule name "%s" not an AntiAffinity rule' % name) if not found: raise ValueError('Error: rule name "%s" not found' % name)
def create_wrapper(self, **spec): """ Wrapper method for creating VMs. If certain information was not provided in the yaml config (like a datastore), then the client will be prompted to select one inside the cfg_checker method. Args: yaml_cfg (file): A yaml file containing the necessary information for creating a new VM. This file will override the defaults set in the dotrc file. """ # create a copy before manipulating the data for vsphere server_cfg = copy.deepcopy(spec) cluster = spec['vmconfig']['cluster'] datastore = spec['vmconfig']['datastore'] folder = spec['vmconfig']['folder'] if server_cfg.get('general', None): del server_cfg['general']['passwd'] self.logger.info('vmconfig %s', server_cfg) cluster_obj = Query.get_obj(self.clusters.view, cluster) # list of cdrom and disk devices devices = [] # add the cdrom device devices.append(self.cdrom_config()) scsis = [] if isinstance(spec['vmconfig']['disks'], dict): for scsi, disks in spec['vmconfig']['disks'].items(): scsis.append(self.scsi_config(scsi)) devices.append(scsis[scsi][1]) for disk in enumerate(disks): disk_cfg_opts = {} disk_cfg_opts.update( { 'container' : cluster_obj.datastore, 'datastore' : datastore, 'size' : int(disk[1]) * (1024*1024), 'controller' : scsis[scsi][0], 'unit' : disk[0], } ) devices.append(self.disk_config(**disk_cfg_opts)) else: # attach up to four disks, each on its own scsi adapter for scsi, disk in enumerate(spec['vmconfig']['disks']): scsis.append(self.scsi_config(scsi)) devices.append(scsis[scsi][1]) disk_cfg_opts = {} disk_cfg_opts.update( { 'container' : cluster_obj.datastore, 'datastore' : datastore, 'size' : int(disk) * (1024*1024), 'controller' : scsis[scsi][0], 'unit' : 0, } ) devices.append(self.disk_config(**disk_cfg_opts)) # configure each network and add to devices for nic in spec['vmconfig']['nics']: nic_cfg_opts = {} if spec['vmconfig'].get('switch_type', None) == 'distributed': nic_cfg_opts.update({'switch_type' : 'distributed'}) nic_cfg_opts.update({'container' : cluster_obj.network, 'network' : nic}) devices.append(self.nic_config(**nic_cfg_opts)) spec['vmconfig'].update({'deviceChange':devices}) if self.opts.datacenter: folder = Query.folders_lookup( self.datacenters.view, self.opts.datacenter, folder ) else: folder = Query.folders_lookup( self.datacenters.view, spec['vmconfig']['datacenter'], folder ) # delete keys that vSphere does not understand, so we can pass it a # dictionary to build the VM. del spec['vmconfig']['disks'] del spec['vmconfig']['nics'] del spec['vmconfig']['folder'] del spec['vmconfig']['datastore'] del spec['vmconfig']['datacenter'] del spec['vmconfig']['cluster'] if spec['vmconfig'].get('switch_type', None): del spec['vmconfig']['switch_type'] pool = cluster_obj.resourcePool self.logger.debug(folder, datastore, pool, devices, spec) self.create(folder, datastore, pool, **spec['vmconfig']) return server_cfg
def create(): """ POST /mkbootiso/mkbootiso <json> Create ISOs on remote server. This route supports basic Anaconda configuration options to create an ISO with specific network information. This information can be used for automating server installations with static IPs. Dependencies: python 2.7+ genisoimage Preparation: Download an ISO from a vendor that supports Anaconda. Mount it using the loop option: mount -o loop rhel-server-7.2-x86_64-boot.iso /mnt/tmp/rhel7/ Copy necessary contents to a folder. In this example, only isolinux/ is needed. Copying only mandatory files will keep the size down to save bandwidth and disk space. cp -a /mnt/tmp/rhel7/isolinux/ /opt/isos/rhel7/ Permissions: The Apache user should have write permissions to files inside isolinux/, and write permissions to the output directories. Example: curl -i -k -H "Content-Type: application/json" -X POST \\ https://hostname.domain.com/mkbootiso/mkbootiso \\ -d @- << EOF { "source" : "/opt/isos/rhel7", "ks" : "http://ks.domain.com/rhel7-ks.cfg", "options" : { "biosdevname" : "0", "gateway" : "10.10.10.1", "hostname" : "hostname.domain.com", "ip" : "10.10.10.10", "nameserver" : "4.2.2.2", "net.ifnames" : "0", "netmask" : "255.255.255.0" }, "output": "/tmp" } EOF """ if request.method == 'GET': return textwrap.dedent(create.__doc__) if request.method == 'POST': data = request.get_json() label = """ default vesamenu.c32 display boot.msg timeout 5 label iso created by {0} menu default kernel vmlinuz append initrd=initrd.img {1} {2} """.format( __name__, 'ks=' + data['ks'], ' '.join("%s=%s" % (key, val) for (key, val) in data['options'].iteritems())) # update the iso with open(data['source'] + '/isolinux/isolinux.cfg', 'w') as iso_cfg: iso_cfg.write(textwrap.dedent(label)) if not data.get('filename', None): data.update({'filename': data['options']['hostname'] + '.iso'}) cmd = """ /usr/bin/genisoimage -quiet -J -T -o {0} -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table -R -m TRANS.TBL -graft-points {1}""".format( data['output'] + '/' + data['filename'], data['source']) # create the iso create_iso = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, shell=False) stdout, stderr = create_iso.communicate() if stdout: mkbootiso.logger.info(stdout) if stderr: mkbootiso.logger.error(stderr) if create_iso.returncode == 0: iso_size = Query.disk_size_format( os.stat(data['output'] + '/' + data['filename']).st_size) return '{0} {1}\n'.format(data['output'] + '/' + data['filename'], iso_size) return None
def main(self): """ This is the main method, which parses all the argparse options and runs the necessary code blocks if True. """ try: self.auth = Auth(self.opts.host) self.auth.login(self.opts.user, self.opts.passwd, self.opts.domain, self.opts.passwd_file) self.opts.passwd = None self.logger.debug(self.opts) virtual_machines_container = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.VirtualMachine], True) self.vmcfg = VMConfigHelper(self.auth, self.opts, argparser.dotrc) self.clustercfg = ClusterConfig(self.auth, self.opts, argparser.dotrc) call_count = self.auth.session.content.sessionManager.currentSession.callCount if self.opts.cmd == 'create': if self.opts.config: for cfg in self.opts.config: spec = self.vmcfg.dict_merge(argparser.dotrc, yaml.load(cfg)) cfgcheck_update = CfgCheck.cfg_checker( spec, self.auth, self.opts) spec['vmconfig'].update( self.vmcfg.dict_merge(spec['vmconfig'], cfgcheck_update)) spec = self.vmcfg.pre_create_hooks(**spec) spec = self.vmcfg.create_wrapper(**spec) self.vmcfg.post_create_hooks(**spec) filename = spec['vmconfig']['name'] + '.yaml' server_cfg = {} server_cfg['vmconfig'] = {} server_cfg['vmconfig'].update(spec['vmconfig']) if spec.get('mkbootiso', None): server_cfg['mkbootiso'] = {} server_cfg['mkbootiso'].update(spec['mkbootiso']) print(yaml.dump(server_cfg, default_flow_style=False), file=open(filename, 'w')) if self.opts.cmd == 'mount': self.vmcfg.mount_wrapper(self.opts.datastore, self.opts.path, *self.opts.name) if self.opts.cmd == 'power': self.vmcfg.power_wrapper(self.opts.power, *self.opts.name) if self.opts.cmd == 'umount': self.vmcfg.umount_wrapper(*self.opts.name) if self.opts.cmd == 'upload': self.vmcfg.upload_wrapper(self.opts.datastore, self.opts.dest, self.opts.verify_ssl, *self.opts.iso) if self.opts.cmd == 'add': hostname = Query.get_obj(virtual_machines_container.view, self.opts.name) # nics if self.opts.device == 'nic': self.vmcfg.add_nic_recfg(hostname) if self.opts.cmd == 'reconfig': host = Query.get_obj(virtual_machines_container.view, self.opts.name) if self.opts.cfgs: self.logger.info( 'reconfig: %s cfgs: %s', host.name, ' '.join('%s=%s' % (k, v) for k, v in self.opts.cfgs.iteritems())) self.vmcfg.reconfig(host, **self.opts.cfgs) if self.opts.folder: self.vmcfg.folder_recfg() if self.opts.device == 'disk': self.vmcfg.disk_recfg() if self.opts.device == 'nic': self.vmcfg.nic_recfg() if self.opts.cmd == 'drs': if not self.opts.cluster: self.opts.cluster = Prompts.clusters(self.auth.session) self.clustercfg.drs_rule() if self.opts.cmd == 'query': datacenters_container = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.Datacenter], True) clusters_container = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.ClusterComputeResource], True) if self.opts.anti_affinity_rules: if self.opts.cluster: anti_affinity_rules = Query.return_anti_affinity_rules( clusters_container.view, self.opts.cluster) else: cluster = Prompts.clusters(self.auth.session) anti_affinity_rules = Query.return_anti_affinity_rules( clusters_container.view, cluster) if not anti_affinity_rules: print('No antiaffinity rules defined.') else: print('Antiaffinity rules:') for key, val in sorted( anti_affinity_rules.iteritems()): print('{0}: {1}'.format(key, ' '.join(sorted(val)))) if self.opts.datastores: if self.opts.cluster: datastores = Query.return_datastores( clusters_container.view, self.opts.cluster) else: cluster = Prompts.clusters(self.auth.session) datastores = Query.return_datastores( clusters_container.view, cluster) for row in datastores: print('{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'. format(*row)) if self.opts.folders: if self.opts.datacenter: folders = Query.list_vm_folders( datacenters_container.view, self.opts.datacenter) folders.sort() for folder in folders: print(folder) else: datacenter = Prompts.datacenters(self.auth.session) folders = Query.list_vm_folders( datacenters_container.view, datacenter) folders.sort() for folder in folders: print(folder) if self.opts.clusters: clusters = Query.list_obj_attrs(clusters_container, 'name') clusters.sort() for cluster in clusters: print(cluster) if self.opts.networks: if self.opts.cluster: cluster = Query.get_obj(clusters_container.view, self.opts.cluster) networks = Query.list_obj_attrs(cluster.network, 'name', view=False) networks.sort() for net in networks: print(net) else: cluster_name = Prompts.clusters(self.auth.session) cluster = Query.get_obj(clusters_container.view, cluster_name) networks = Query.list_obj_attrs(cluster.network, 'name', view=False) networks.sort() for net in networks: print(net) if self.opts.vms: vms = Query.list_vm_info(datacenters_container.view, self.opts.datacenter) for key, value in vms.iteritems(): print(key, value) if self.opts.vmconfig: for name in self.opts.vmconfig: virtmachine = Query.get_obj( virtual_machines_container.view, name) self.logger.debug(virtmachine.config) if self.opts.createcfg: print( yaml.dump(Query.vm_config( virtual_machines_container.view, name, self.opts.createcfg), default_flow_style=False)) else: print( yaml.dump(Query.vm_config( virtual_machines_container.view, name), default_flow_style=False)) if self.opts.vm_by_datastore: if self.opts.cluster and self.opts.datastore: vms = Query.vm_by_datastore(clusters_container.view, self.opts.cluster, self.opts.datastore) for vm_name in vms: print(vm_name) else: if not self.opts.cluster: cluster = Prompts.clusters(self.auth.session) if not self.opts.datastore: datastore = Prompts.datastores( self.auth.session, cluster) print() vms = Query.vm_by_datastore(clusters_container.view, cluster, datastore) for vm_name in vms: print(vm_name) if self.opts.vm_guest_ids: for guest_id in Query.list_guestids(): print(guest_id) self.auth.logout() self.logger.debug('Call count: {0}'.format(call_count)) except ValueError as err: self.logger.error(err, exc_info=False) self.auth.logout() self.logger.debug('Call count: {0}'.format(call_count)) sys.exit(3) except vim.fault.InvalidLogin as loginerr: self.logger.error(loginerr.msg, exc_info=False) sys.exit(2) except KeyboardInterrupt as err: self.logger.error(err, exc_info=False) self.auth.logout() self.logger.debug('Call count: {0}'.format(call_count)) sys.exit(1)
def cfg_checker(cfg, auth, opts): """ Checks config for a valid configuration, and prompts user if information is missing Args: cfg (obj): Yaml object """ clusters = Query.create_container( auth.session, auth.session.content.rootFolder, [vim.ComputeResource], True ) # name if 'vmconfig' in cfg: # name if 'name' in cfg['vmconfig']: name = cfg['vmconfig']['name'] else: name = Prompts.name() # guestid if 'guestId' in cfg['vmconfig']: guestid = cfg['vmconfig']['guestId'] else: guestid = Prompts.guestids() print('\n%s guestid selected.' % (guestid)) # cluster if 'cluster' in cfg['vmconfig']: cluster = cfg['vmconfig']['cluster'] cluster_obj = Query.get_obj(clusters.view, cluster) else: cluster = Prompts.clusters(auth.session) cluster_obj = Query.get_obj(clusters.view, cluster) print('\n%s cluster selected.' % (cluster)) # datastore if 'datastore' in cfg['vmconfig']: datastore = cfg['vmconfig']['datastore'] else: datastore = Prompts.datastores(auth.session, cluster) print('\n%s datastore selected.' % (datastore)) # datacenter if not opts.datacenter: datacenter = Prompts.datacenters(auth.session) print('\n%s datacenter selected.' % (datacenter)) else: datacenter = opts.datacenter # nics if 'nics' in cfg['vmconfig']: nics = cfg['vmconfig']['nics'] print('nics: %s' % (nics)) else: nics = Prompts.networks(cluster_obj) print('\n%s networks selected.' % (','.join(nics))) # folder if 'folder' in cfg['vmconfig']: folder = cfg['vmconfig']['folder'] else: folder = Prompts.folders(auth.session, datacenter) print('\n%s folder selected.' % (folder)) else: name = Prompts.name() guestid = Prompts.guestids() print('\n%s selected.' % (guestid)) cluster = Prompts.clusters(auth.session) print('\n%s selected.' % (cluster)) datastore = Prompts.datastores(auth.session, cluster) print('\n%s selected.' % (datastore)) datacenter = Prompts.datacenters(auth.session) print('\n%s selected.' % (datacenter)) nics = Prompts.networks(cluster_obj) print('\n%s selected.' % (','.join(nics))) folder = Prompts.folders(auth.session, datacenter) print('\n%s selected.' % (folder)) output = { 'name': name, 'guestId': guestid, 'cluster': cluster, 'datastore': datastore, 'datacenter': datacenter, 'nics': nics, 'folder': folder } return output
def networks(cls, net_obj): """ Method will prompt user to select a networks. Since multiple networks can be added to a VM, it will prompt the user to exit or add more. The networks should be selected in the order of which they want the interfaces set on the VM. For example, the first network selected will be configured on eth0 on the VM. Args: session (obj): Auth session object net_obj (cls): class has network managed object attribute multiple (bool): Allow for method to accept multiple inputs, otherwise it will return the first selection Returns: selected_networks (list): A list of selected networks """ if getattr(net_obj, 'network'): networks = Query.list_obj_attrs(net_obj.network, 'name', view=False) networks.sort() else: raise ValueError('network managed object not found in %s' % (type(net_obj))) print('\n') print('%s Networks Found.\n' % (len(networks))) for num, opt in enumerate(networks, start=1): print('%s: %s' % (num, opt)) selected_networks = [] while True: if selected_networks: print('selected: ' + ','.join(selected_networks)) val = raw_input( '\nPlease select number:\n(Q)uit (S)how Networks\n').strip() # need to test whether selection is an integer or not. try: if int(val) <= len(networks): # need to substract 1 since we start enumeration at 1. val = int(val) - 1 selected_networks.append(networks[val]) continue else: print('Invalid number.') continue except ValueError: if val == 'Q': break elif val == 'S': for num, opt in enumerate(networks, start=1): print('%s: %s' % (num, opt)) else: print('Invalid option.') continue cls.logger.info(selected_networks) return selected_networks
def create_wrapper(self, **spec): """ Wrapper method for creating VMs. If certain information was not provided in the yaml config (like a datastore), then the client will be prompted to select one inside the cfg_checker method. Args: yaml_cfg (file): A yaml file containing the necessary information for creating a new VM. This file will override the defaults set in the dotrc file. """ # create a copy before manipulating the data for vsphere server_cfg = copy.deepcopy(spec) cluster = spec['vmconfig']['cluster'] datastore = spec['vmconfig']['datastore'] folder = spec['vmconfig']['folder'] del server_cfg['general']['passwd'] self.logger.info('vmconfig %s', server_cfg) cluster_obj = Query.get_obj(self.clusters.view, cluster) # list of cdrom and disk devices devices = [] # add the cdrom device devices.append(self.cdrom_config()) scsis = [] if isinstance(spec['vmconfig']['disks'], dict): for scsi, disks in spec['vmconfig']['disks'].iteritems(): scsis.append(self.scsi_config(scsi)) devices.append(scsis[scsi][1]) for disk in enumerate(disks): disk_cfg_opts = {} disk_cfg_opts.update({ 'container': cluster_obj.datastore, 'datastore': datastore, 'size': int(disk[1]) * (1024 * 1024), 'controller': scsis[scsi][0], 'unit': disk[0], }) devices.append(self.disk_config(**disk_cfg_opts)) else: # attach up to four disks, each on its own scsi adapter for scsi, disk in enumerate(spec['vmconfig']['disks']): scsis.append(self.scsi_config(scsi)) devices.append(scsis[scsi][1]) disk_cfg_opts = {} disk_cfg_opts.update({ 'container': cluster_obj.datastore, 'datastore': datastore, 'size': int(disk) * (1024 * 1024), 'controller': scsis[scsi][0], 'unit': 0, }) devices.append(self.disk_config(**disk_cfg_opts)) # configure each network and add to devices for nic in spec['vmconfig']['nics']: nic_cfg_opts = {} nic_cfg_opts.update({ 'container': cluster_obj.network, 'network': nic }) devices.append(self.nic_config(**nic_cfg_opts)) spec['vmconfig'].update({'deviceChange': devices}) folder = Query.folders_lookup(self.datacenters.view, self.opts.datacenter, folder) # delete items that are no longer needed # delete keys that vSphere does not understand, so we can pass it a # dictionary to build the VM. del spec['vmconfig']['disks'] del spec['vmconfig']['nics'] del spec['vmconfig']['folder'] del spec['vmconfig']['datastore'] del spec['vmconfig']['datacenter'] del spec['vmconfig']['cluster'] pool = cluster_obj.resourcePool self.logger.debug(folder, datastore, pool, devices, spec) self.create(folder, datastore, pool, **spec['vmconfig']) return server_cfg
def nic_config(cls, edit=False, **kwargs): """ Method returns configured object for network interface. kwargs: container (obj): ContainerView object. network (str): Name of network to add to VM. connected (bool): Indicates that the device is currently connected. Valid only while the virtual machine is running. start_connected (bool): Specifies whether or not to connect the device when the virtual machine starts. allow_guest_control (bool): Allows the guest to control whether the connectable device is connected. driver (str): A str that represents a network adapter driver switch_type (str): Use "standard" or "distributed" switch for networking. Returns: nic (obj): A configured object for a Network device. this should be appended to ConfigSpec devices attribute. """ key = kwargs.get('key', None) controller = kwargs.get('controller', None) container = kwargs.get('container', None) mac_address = kwargs.get('mac_address', None) network = kwargs.get('network', None) connected = kwargs.get('connected', True) start_connected = kwargs.get('start_connected', True) allow_guest_control = kwargs.get('allow_get_control', True) unit = kwargs.get('unit', None) address_type = kwargs.get('address_type', 'assigned') driver = kwargs.get('driver', 'VirtualVmxnet3') switch_type = kwargs.get('switch_type', 'standard') nic = vim.vm.device.VirtualDeviceSpec() nic.device = getattr(vim.vm.device, driver)() if edit: nic.operation = 'edit' nic.device.key = key nic.device.controllerKey = controller nic.device.macAddress = mac_address nic.device.unitNumber = unit nic.device.addressType = address_type else: nic.operation = 'add' if switch_type == 'distributed': network_obj = Query.get_obj(container, network) dvs = network_obj.config.distributedVirtualSwitch criteria = vim.dvs.PortCriteria() criteria.connected = False criteria.inside = True criteria.portgroupKey = network_obj.key dvports = dvs.FetchDVPorts(criteria) if dvports: # pylint: disable=line-too-long nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() nic.device.backing.port = vim.dvs.PortConnection() nic.device.backing.port.portgroupKey = dvports[0].portgroupKey nic.device.backing.port.switchUuid = dvports[0].dvsUuid nic.device.backing.port.portKey = dvports[0].key else: cls.logger.error( 'No available distributed virtual port found, so network config failed!' ) cls.logger.debug('%s', dvports) elif switch_type == 'standard': nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() nic.device.backing.network = Query.get_obj(container, network) nic.device.backing.deviceName = network nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.connected = connected nic.device.connectable.startConnected = start_connected nic.device.connectable.allowGuestControl = allow_guest_control return nic
def drs_rule(self): """ Method messes with DRS rules. Currently only Anti Affinity rules, and only add or delete. For safety, it has a concept of a vctools prefix. The prefix lives in the rc file, or can be declared by a flag. This is so you "can't" delete a rule that was not created by vctools. Args: cluster (str): cluster to modify type (str): currently only anti-affinity oper (add|delete): operation mode name (str): name of the rule vms (list): list of vms (to add, not used for delete) Returns true if successful. """ cluster = self.opts.cluster drs_type = self.opts.drs_type name = self.opts.name vms = self.opts.vms function = self.opts.function self.logger.debug(cluster, drs_type, name, vms, function) # containers we need clusters = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.ComputeResource], True ) virtual_machines = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.VirtualMachine], True ) # our cluster object cluster_obj = Query.get_obj(clusters.view, cluster) if drs_type == 'anti-affinity': if function == 'add': vm_obj_list = [] for vm_obj in vms: vm_obj_list.append(Query.get_obj(virtual_machines.view, vm_obj)) # check to see if this rule name is in use if Query.is_anti_affinity_rule(cluster_obj, name): raise ValueError('Error: rule name "%s" is already in use' % name) # check to see vms are in the right cluster for vm_obj in vm_obj_list: if not Query.is_vm_in_cluster(cluster_obj, vm_obj): raise ValueError( 'Error: the vm "%s" is not in the stated cluster' % vm_obj.name ) # check to see if the vms already have DRS rules for vm_obj in vm_obj_list: match = 0 for rule in cluster_obj.configuration.rule: if hasattr(rule, 'vm'): for rulevm in rule.vm: if vm_obj == rulevm: match = 1 if match != 0: raise ValueError( 'Error: the vm "%s" is already in a DRS rule' % vm_obj.name ) new_rule = vim.ClusterAntiAffinityRuleSpec() new_rule.name = name new_rule.userCreated = True new_rule.enabled = True for vm_obj in vm_obj_list: new_rule.vm.append(vm_obj) rule_spec = vim.cluster.RuleSpec(info=new_rule, operation='add') config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) Tasks.task_monitor(cluster_obj.ReconfigureComputeResource_Task( config_spec, modify=True), False) self.logger.info('new AA DRS rule on %s: %s', cluster, name) if function == 'delete': #Delete an AntiAffinity Rule # check to see if this rule name is in use, and delete if found found = False for existing_rule in cluster_obj.configuration.rule: if existing_rule.name == name: found = True # doublecheck this is an AA rule if isinstance(existing_rule, vim.cluster.AntiAffinityRuleSpec): rule_spec = vim.cluster.RuleSpec( removeKey=existing_rule.key, operation='remove') config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) Tasks.task_monitor(cluster_obj.ReconfigureComputeResource_Task( config_spec, modify=True), False) self.logger.info('Deleted AA DRS rule on %s: %s', cluster, name) else: raise ValueError( 'Error: rule name "%s" not an AntiAffinity rule' % name ) if not found: raise ValueError('Error: rule name "%s" not found' % name)
def main(self): """ This is the main method, which parses all the argparse options and runs the necessary code blocks if True. """ try: call_count = 0 self.auth = Auth(self.opts.host) self.auth.login( self.opts.user, self.opts.passwd, self.opts.domain, self.opts.passwd_file ) self.opts.passwd = None self.logger.debug(self.opts) virtual_machines_container = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.VirtualMachine], True ) self.vmcfg = VMConfigHelper(self.auth, self.opts, argparser.dotrc) self.clustercfg = ClusterConfig(self.auth, self.opts, argparser.dotrc) call_count = self.auth.session.content.sessionManager.currentSession.callCount if not self.opts.datacenter: self.opts.datacenter = Prompts.datacenters(self.auth.session) if self.opts.cmd == 'create': if self.opts.config: for cfg in self.opts.config: spec = self.vmcfg.dict_merge( argparser.dotrc, yaml.load(cfg, Loader=yaml.FullLoader) ) cfgcheck_update = CfgCheck.cfg_checker(spec, self.auth, self.opts) spec['vmconfig'].update( self.vmcfg.dict_merge(spec['vmconfig'], cfgcheck_update) ) spec = self.vmcfg.pre_create_hooks(**spec) spec = self.vmcfg.create_wrapper(**spec) self.vmcfg.post_create_hooks(**spec) filename = spec['vmconfig']['name'] + '.yaml' server_cfg = {} server_cfg['vmconfig'] = {} server_cfg['vmconfig'].update(spec['vmconfig']) if spec.get('mkbootiso', None): server_cfg['mkbootiso'] = {} server_cfg['mkbootiso'].update(spec['mkbootiso']) print( yaml.dump(server_cfg, default_flow_style=False), file=open(os.path.join(os.environ['OLDPWD'], filename), 'w') ) if self.opts.cmd == 'mount': self.vmcfg.mount_wrapper(self.opts.datastore, self.opts.path, *self.opts.name) if self.opts.cmd == 'power': self.vmcfg.power_wrapper(self.opts.power, *self.opts.name) if self.opts.cmd == 'umount': self.vmcfg.umount_wrapper(*self.opts.name) if self.opts.cmd == 'upload': self.vmcfg.upload_wrapper( self.opts.datastore, self.opts.dest, self.opts.verify_ssl, *self.opts.iso ) if self.opts.cmd == 'add': hostname = Query.get_obj(virtual_machines_container.view, self.opts.name) # nics if self.opts.device == 'nic': self.vmcfg.add_nic_recfg(hostname) if self.opts.cmd == 'reconfig': host = Query.get_obj(virtual_machines_container.view, self.opts.name) if self.opts.cfgs: self.logger.info( 'reconfig: %s cfgs: %s', host.name, ' '.join('%s=%s' % (k, v) for k, v in self.opts.cfgs.items()) ) self.vmcfg.reconfig(host, **self.opts.cfgs) if self.opts.folder: self.vmcfg.folder_recfg() if self.opts.device == 'disk': self.vmcfg.disk_recfg() if self.opts.device == 'nic': self.vmcfg.nic_recfg() if self.opts.cmd == 'drs': if not self.opts.cluster: self.opts.cluster = Prompts.clusters(self.auth.session) self.clustercfg.drs_rule() if self.opts.cmd == 'query': datacenters_container = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.Datacenter], True ) clusters_container = Query.create_container( self.auth.session, self.auth.session.content.rootFolder, [vim.ClusterComputeResource], True ) if self.opts.anti_affinity_rules: if self.opts.cluster: anti_affinity_rules = Query.return_anti_affinity_rules( clusters_container.view, self.opts.cluster ) else: cluster = Prompts.clusters(self.auth.session) anti_affinity_rules = Query.return_anti_affinity_rules( clusters_container.view, cluster ) if not anti_affinity_rules: print('No antiaffinity rules defined.') else: print('Antiaffinity rules:') for key, val in sorted(anti_affinity_rules.items()): print('{0}: {1}'.format(key, ' '.join(sorted(val)))) if self.opts.datastores: if self.opts.cluster: datastores = Query.return_datastores( clusters_container.view, self.opts.cluster ) else: cluster = Prompts.clusters(self.auth.session) datastores = Query.return_datastores(clusters_container.view, cluster) for row in datastores: print('{0:30}\t{1:10}\t{2:10}\t{3:6}\t{4:10}\t{5:6}'.format(*row)) if self.opts.folders: if self.opts.datacenter: folders = Query.list_vm_folders( datacenters_container.view, self.opts.datacenter ) folders.sort() for folder in folders: print(folder) else: datacenter = Prompts.datacenters(self.auth.session) folders = Query.list_vm_folders(datacenters_container.view, datacenter) folders.sort() for folder in folders: print(folder) if self.opts.clusters: clusters = Query.list_obj_attrs(clusters_container, 'name') clusters.sort() for cluster in clusters: print(cluster) if self.opts.networks: if self.opts.cluster: cluster = Query.get_obj(clusters_container.view, self.opts.cluster) networks = Query.list_obj_attrs(cluster.network, 'name', view=False) networks.sort() for net in networks: print(net) else: cluster_name = Prompts.clusters(self.auth.session) cluster = Query.get_obj(clusters_container.view, cluster_name) networks = Query.list_obj_attrs(cluster.network, 'name', view=False) networks.sort() for net in networks: print(net) if self.opts.vms: vms = Query.list_vm_info(datacenters_container.view, self.opts.datacenter) for key, value in vms.items(): print(key, value) if self.opts.vmconfig: for name in self.opts.vmconfig: virtmachine = Query.get_obj(virtual_machines_container.view, name) self.logger.debug(virtmachine.config) if self.opts.createcfg: print( yaml.dump( Query.vm_config( virtual_machines_container.view, name, self.opts.createcfg ), default_flow_style=False ) ) else: print( yaml.dump( Query.vm_config(virtual_machines_container.view, name), default_flow_style=False ) ) if self.opts.vm_by_datastore: if self.opts.cluster and self.opts.datastore: vms = Query.vm_by_datastore( clusters_container.view, self.opts.cluster, self.opts.datastore ) for vm_name in vms: print(vm_name) else: if not self.opts.cluster: cluster = Prompts.clusters(self.auth.session) if not self.opts.datastore: datastore = Prompts.datastores(self.auth.session, cluster) print() vms = Query.vm_by_datastore(clusters_container.view, cluster, datastore) for vm_name in vms: print(vm_name) if self.opts.vm_guest_ids: for guest_id in Query.list_guestids(): print(guest_id) self.auth.logout() self.logger.debug('Call count: {0}'.format(call_count)) except ValueError as err: self.logger.error(err, exc_info=False) self.auth.logout() self.logger.debug('Call count: {0}'.format(call_count)) sys.exit(3) except vim.fault.InvalidLogin as loginerr: self.logger.error(loginerr.msg, exc_info=False) sys.exit(2) except ssl.CertificateError as err: self.logger.error(err, exc_info=False) sys.exit(2) except KeyboardInterrupt as err: self.logger.error(err, exc_info=False) self.auth.logout() self.logger.debug('Call count: {0}'.format(call_count)) sys.exit(1)
def disk_config(cls, edit=False, **kwargs): """ Method returns configured VirtualDisk object Kwargs: container (obj): Cluster container object datastore (str): Name of datastore for the disk files location. size (int): Integer of disk in kilobytes key (int): Integer value of scsi device unit (int): unitNumber of device. mode (str): The disk persistence mode. thin (bool): If True, then it enables thin provisioning Returns: disk (obj): A configured object for a VMDK Disk. this should be appended to ConfigSpec devices attribute. """ # capacityInKB is deprecated but also a required field. See pyVmomi bug #218 container = kwargs.get('container', None) datastore = kwargs.get('datastore', None) size = kwargs.get('size', None) key = kwargs.get('key', None) unit = kwargs.get('unit', 0) mode = kwargs.get('mode', 'persistent') thin = kwargs.get('thin', True) controller = kwargs.get('controller', None) filename = kwargs.get('filename', None) disk = vim.vm.device.VirtualDeviceSpec() if edit: disk.operation = 'edit' disk.device = vim.vm.device.VirtualDisk() disk.device.capacityInKB = size disk.device.key = key # controllerKey is tied to SCSI Controller disk.device.controllerKey = controller disk.device.unitNumber = unit disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo( ) disk.device.backing.fileName = filename disk.device.backing.diskMode = mode else: disk.operation = 'add' disk.fileOperation = 'create' disk.device = vim.vm.device.VirtualDisk() disk.device.capacityInKB = size # controllerKey is tied to SCSI Controller disk.device.controllerKey = controller disk.device.unitNumber = unit disk.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo( ) disk.device.backing.fileName = '[' + datastore + ']' disk.device.backing.datastore = Query.get_obj(container, datastore) disk.device.backing.diskMode = mode disk.device.backing.thinProvisioned = thin disk.device.backing.eagerlyScrub = False return disk