def refresh(self): self._config = self._restHelpers.getConfigResource() self._pending_config = self._restHelpers.getPendingConfigResource() current_bios_settings = self._restHelpers.getCurrentBiosSettings() pending_bios_settings = self._restHelpers.getPendingBiosSettings() chassis = self._restHelpers.getChassis() drives_data = self._restHelpers.getChassisDrives(chassis) if not self._config or not self._pending_config or not current_bios_settings or not chassis or not drives_data: raise NoChangesFoundOrMadeError(u"Unable to retrieve Scalable Persistent Memory configuration.") self._drives = self._restHelpers.generateDriveConfiguration(self._config, current_bios_settings, pending_bios_settings, drives_data) if not self._drives: raise NoChangesFoundOrMadeError(u"Unable to retrieve backup storage device information.") self._regions = self._restHelpers.generateRegionConfiguration(self._config, self._pending_config, current_bios_settings) if not self._regions: raise NoChangesFoundOrMadeError(u"Unable to retrieve Scalable Persistent Memory information") if self._validatorObject and self._chif_lib: # update the config object's theoretical vales with validated ones total = self._regions.maxPmemGiB available = self._regions.availableSizeGiB configured_pmem_GiB = total - available (maxPmemGiB, backupBootSec) = self._validatorObject.calculateMaxPmemGiBAndBackupTime(self._chif_lib, configured_pmem_GiB, self._config, self._drives.selectedDrives) self._regions.maxPmemGiB = maxPmemGiB self._regions.backupBootSec = backupBootSec
def replace_drive(self, scalable_pmem_config, old_drive, new_drive): """ Replaces a backup storage drive. :param scalable_pmem_config: the Scalable Persistent Memory configuration :param old_drive: the drive to be replaced :param new_drive: the replacement drive """ backup_drives = scalable_pmem_config.drives.selectedDrives if old_drive not in backup_drives: raise NoChangesFoundOrMadeError(u"Device {} is not configured "\ "for backup storage".format(old_drive.generatedId)) if new_drive in backup_drives: raise NoChangesFoundOrMadeError(u"Device {} is already configured " \ "for backup storage".format(new_drive.generatedId)) backup_drives.remove(old_drive) backup_drives.append(new_drive) config_data = scalable_pmem_config.config_resource # new backup storage drives must adhere to the drive policy requirements is_valid, error_msg = self._validator.checkLogicalNvdimmDrivePolicies(\ config_data, backup_drives) if not is_valid: raise NoChangesFoundOrMadeError(error_msg) # new backup storage drives must support the current logical NVDIMM configuration max_pmem = self._validator.calculateMaxPmemGiB(self._chif_lib, \ config_data, backup_drives) allocated_pmem = scalable_pmem_config.regions.totalSizeGiB if allocated_pmem > max_pmem: raise NoChangesFoundOrMadeError(u"The new backup storage devices must support " u"the current logical NVDIMM configuration.") if old_drive.currentMode == Drive.MODE_NVDIMM and scalable_pmem_config.\ hasActiveConfiguredRegions: # actual drive replacement occurring with data at risk if self._rdmc.interactive: sys.stdout.write(u"\nAll backup storage devices will be initialized upon restart." u"\nData on any existing logical NVDIMMs will be lost.\n") uinput = raw_input(u"\nConfirm changes [y/N]? ") if uinput.lower() != 'y': raise NoChangesFoundOrMadeError(u"No changes have been made") self._rest_helpers.setDrives(new_drives=[new_drive], old_drives=[old_drive])
def delete_existing_chunks_and_tasks(self, memory_chunk_tasks, memory_chunks): """ Delete existing Memory Chunks and Pending Configuration Tasks :param memory_chunk_tasks: Pending Configuration Tasks :type memory_chunk_tasks: list :memory_chunks: Memory Chunks in the existing configuration :type memory_chunks: list :returns: None """ # Delete any pending configuration tasks if memory_chunk_tasks: self.auxcommands["clearpmmpendingconfig"].delete_tasks( memory_chunk_tasks) # Delete any existing configuration if memory_chunks: for chunk in memory_chunks: data_id = chunk.get("@odata.id") resp = RestHelpers( rdmcObject=self.rdmc).delete_resource(data_id) if not resp: raise NoChangesFoundOrMadeError( "Error occured while deleting " "existing configuration") return None
def commitfunction(self, options=None): """ Main commit worker function :param options: command line options :type options: list. """ if options.encode and options.user and options.password: options.user = Encryption.decode_credentials(options.user) options.password = Encryption.decode_credentials(options.password) self.commitvalidation(options) sys.stdout.write(u"Committing changes...\n") if options: if options.biospassword: self._rdmc.app.update_bios_password(options.biospassword) try: if not self._rdmc.app.commit(verbose=self._rdmc.opts.verbose): raise NoChangesFoundOrMadeError("No changes found or made " \ "during commit operation.") except Exception, excp: raise excp
def warn_existing_chunks_and_tasks(memory_chunk_tasks, memory_chunks): """ Checks for existing Memory Chunks and Pending Configuration Task resources on a server where a user is trying to apply a pre-defined configuration :param memory_chunk_tasks: Pending Configuration Tasks :type memory_chunk_tasks: list :memory_chunks: Memory Chunks in the existing configuration :type memory_chunks: list :returns: None """ # If Memory Chunks exist, display Existing configuration warning if memory_chunks: sys.stdout.write( "\nWarning: Existing configuration found. Proceeding with applying a new " "configuration will result in overwriting the current configuration and " "cause data loss.\n") # If Pending Configuration Tasks exist, display warning if memory_chunk_tasks: sys.stdout.write( "\nWarning: Pending configuration tasks found. Proceeding with applying " "a new configuration will result in overwriting the pending " "configuration tasks.\n") # Raise a NoChangesFoundOrMade exception when either of the above conditions exist if memory_chunks or memory_chunk_tasks: # Line feed for proper formatting sys.stdout.write("\n") raise NoChangesFoundOrMadeError( "Found one or more of Existing Configuration or " "Pending Configuration Tasks. Please use the " "'--force | -f' flag with the same command to " "approve these changes.") return None
def commitfunction(self, options=None): """ Main commit worker function :param options: command line options :type options: list. """ self.commitvalidation() self.rdmc.ui.printer("Committing changes...\n") if options: if options.biospassword: self.rdmc.app.current_client.bios_password = options.biospassword try: failure = False commit_opp = self.rdmc.app.commit() for path in commit_opp: if self.rdmc.opts.verbose: self.rdmc.ui.printer( 'Changes are being made to path: %s\n' % path) if next(commit_opp): failure = True except NothingSelectedError: raise NoChangesFoundOrMadeError( "No changes found or made during commit operation.") else: if failure: raise FailureDuringCommitError('One or more types failed to commit. Run the '\ 'status command to see uncommitted data. '\ 'if you wish to discard failed changes refresh the '\ 'type using select with the --refresh flag.') if options.reboot: self.auxcommands['reboot'].run(options.reboot) self.auxcommands['logout'].run("")
def validateAllConfigurationPolicies(self, scalable_pmem_config, output_as_json=False): (isValid, overallMessage, messages) = scalable_pmem_config.validateConfigurationPolicies() if not isValid: if output_as_json: UI().print_out_json({ "IsValid": isValid, "OverallMessage": overallMessage, "Messages": messages }) raise NoChangesFoundOrMadeError(u"") else: sys.stdout.write(u"\n\n{}:\n".format(overallMessage)) self.printLimitedMessageList(messages, 3) sys.stdout.write(u"\n\n") raise NoChangesFoundOrMadeError(u"Unable to continue")
def validateFeatureEnabledByUser(self, scalable_pmem_config, output_as_json=False): (isEnabled, overallMessage, messages) = scalable_pmem_config.isEnabledByUser if not isEnabled: if output_as_json: UI().print_out_json({ "IsEnabled": isEnabled, "OverallMessage": overallMessage, "Messages": messages }) raise NoChangesFoundOrMadeError(u"") else: sys.stdout.write(u"\n\n{}:\n".format(overallMessage)) self.printLimitedMessageList(messages, 3) sys.stdout.write(u"\n\n") raise NoChangesFoundOrMadeError(u"Unable to continue")
def configure_pmm(self, options): """ Applies selected configuration to the PMMs :param options: options specified by user :returns: None """ # Retrieve Memory Chunks and Task Resources from server. (task_members, domain_members, memory_chunks) = self._rest_helpers \ .retrieve_task_members_and_mem_domains() # Filter Task Resources to include only Pending Configuration Tasks. memory_chunk_tasks = self._rest_helpers.filter_task_members(task_members) if not domain_members: raise NoContentsFoundForOperationError("Failed to retrieve Memory Domain Resources") # Dict with input config data config_data = {"size": options.memorymode, "pmeminterleave": options.interleave, "proc": options.proc} # Check for given processor id list is valid or not. if options.proc: config_data["proc"] = self.get_valid_processor_list(config_data["proc"], domain_members) memory_chunks = self.filter_memory_chunks(memory_chunks, domain_members, config_data["proc"]) # In case of 100% Volatile, Interleaving memory regions is not applicable. if config_data["size"] == 100 and config_data["pmeminterleave"] and \ config_data["pmeminterleave"].lower() == "on": raise InvalidCommandLineError("Selected configuration is invalid. " "Interleaving not supported in 100% volatile.") if options.force: self.delete_existing_chunks_and_tasks(memory_chunk_tasks, memory_chunks) else: self.warn_existing_chunks_and_tasks(memory_chunk_tasks, memory_chunks) for member in domain_members: proc_id = member['Id'].encode('ascii', 'ignore') # If given proc list is not empty, applies configuration to selected processors if not config_data["proc"] or (proc_id in config_data["proc"]): path = member['MemoryChunks'].get('@odata.id').encode('ascii', 'ignore') data = self.get_post_data(config_data, member['InterleavableMemorySets']) for body in data: resp = self._rest_helpers.post_resource(path, body) if resp is None: raise NoChangesFoundOrMadeError("Error occurred while applying configuration") # Display warning sys.stdout.write("\n***WARNING: Configuration changes require reboot to take effect***\n") # Display pending configuration self._show_pmem_config.show_pending_config(type("MyOptions", (object, ), dict(json=False))) return None
def commitfunction(self, options=None): """ Main commit worker function :param options: command line options :type options: list. """ self.commitvalidation() sys.stdout.write("Committing changes...\n") if not self._rdmc.app.commit(verbose=self._rdmc.opts.verbose): raise NoChangesFoundOrMadeError("No changes found or made " \ "during commit operation.") self.logoutobj.logoutfunction("")
def apply_predefined_config(self, options): """ Apply the selected pre-defined configuration to Persistent Memory Modules. :param options: option specified by the user. :returns: None """ # Retrieve Memory Chunks and Task Resources from server (task_members, domain_members, memory_chunks) = self._rest_helpers \ .retrieve_task_members_and_mem_domains() # Filter Task Resources to include only Pending Configuration Tasks memory_chunk_tasks = self._rest_helpers.filter_task_members( task_members) if options.force: self.delete_existing_chunks_and_tasks(memory_chunk_tasks, memory_chunks) else: self.warn_existing_chunks_and_tasks(memory_chunk_tasks, memory_chunks) if not domain_members: raise NoContentsFoundForOperationError( "Failed to retrive Memory Domain Resources") # Get the user specified configID config_data = next((config_id for config_id in self.config_ids \ if config_id.get("name").lower() == options.config.lower()), None) for proc in domain_members: path = proc['MemoryChunks'].get('@odata.id').encode( 'ascii', 'ignore') data = self.get_post_data(config_data, proc['InterleavableMemorySets']) for body in data: resp = self._rest_helpers.post_resource(path, body) if resp is None: raise NoChangesFoundOrMadeError( "Error occured while applying configuration") # display warning sys.stdout.write( "\n***WARNING: Configuration changes require reboot to take effect***\n" ) # display pending configuration self._show_pmem_config.show_pending_config( type("MyOptions", (object, ), dict(json=False)))
def failNoChifLibrary(): sys.stdout.write(u""" ------------------------------------------------------------------------------ Scalable Persistent Memory configuration requires the CHIF library, which is not available as open-source. For full functionality, please obtain the latest version from the Hewlett Packard Enterprise product support site: hpe.com/us/en/product-catalog/detail/pip.restful-interface-tool.7630408.html ------------------------------------------------------------------------------ """) raise NoChangesFoundOrMadeError("Unable to continue")
def removeAllDrives(self): """ setlogicalnvdimmdrives command worker function """ scalable_pmem_config = self.common_setup() if scalable_pmem_config.hasConfiguredRegions: raise NoChangesFoundOrMadeError(u"Backup drives cannot be removed "\ "while logical NVDIMMs are configured") self._restHelpers.setDrives( old_drives=scalable_pmem_config.drives.selectedDrives) #self._restHelpers.enableConfiguration() scalable_pmem_config.refresh() self._helpers.displayDrivesConfiguration(scalable_pmem_config) return ReturnCodes.SUCCESS
def delete_tasks(self, memory_chunk_tasks, verbose=False): """ Function to delete pending configuration tasks :param memory_chunk_tasks: Pending confguration tasks. :type memory_chunk_tasks: list :param verbose: Toggles verbose mode, which print task IDs as individual tasks are deleted. :type verbose: Boolean :returns: None """ for task in memory_chunk_tasks: data_id = task.get("@odata.id") task_id = task.get("Id") resp = self._rest_helpers.delete_resource(data_id) if resp: if verbose: sys.stdout.write("Deleted Task #{}".format(task_id) + "\n") else: raise NoChangesFoundOrMadeError("Error occured while deleting " "task #{}".format(task_id)) return None
def delete_existing_chunks_and_tasks(self, memory_chunk_tasks, memory_chunks): """ Delete existing Memory Chunks and Pending Configuration Tasks :param memory_chunk_tasks: Pending Configuration Tasks :type memory_chunk_tasks: list :memory_chunks: Memory Chunks in the existing configuration :type memory_chunks: list :returns: None """ # Delete any pending configuration tasks if memory_chunk_tasks: _clear_pending = ClearPendingConfigCommand(self._rdmc) _clear_pending.delete_tasks(memory_chunk_tasks) # Delete any existing configuration if memory_chunks: for chunk in memory_chunks: data_id = chunk.get("@odata.id") resp = self._rest_helpers.delete_resource(data_id) if not resp: raise NoChangesFoundOrMadeError("Error occurred while deleting " "existing configuration") return None
def confirmChanges(self, message): sys.stdout.write(message) s = raw_input(u"\nConfirm changes [y/N]? ") if s.lower() != 'y': raise NoChangesFoundOrMadeError(u"No changes have been made")
def autoselectdrives(self, pmem_size_GiB, confirm): """ function to perform the automatic selection of backup drives :param pmem_size_GiB: requested scalable persistent memory size :type pmem_size_GiB: int :param confirm: whether or not to automatically confirm the selected drives :type confirm: Boolean """ scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\ self._validator, self._chif_lib) scalable_pmem_config.refresh() # pre-validation self._helpers.validateAllConfigurationPolicies(scalable_pmem_config) # make sure this is an un-configured system if scalable_pmem_config.isConfiguredSystem: raise InvalidCommandLineError(u"This operation is not supported on "\ "a configured system") # get policies policies = resolve_pointer(scalable_pmem_config.config_resource, \ "/Attributes/Policy", None) sameModel = False sameSize = False if policies: sameModel = policies.get("SameModelNVMe", False) sameSize = policies.get("SameSizeNVMe", False) # separate the supported drives into supported groups, based on model or size # if same model or size, then order doesn't matter; else the # drives should be sorted largest to smallest supported_drives_groups = self.sort_drives(sameModel, sameSize, \ scalable_pmem_config.drives.supportedDrives) # loop through the group until a valid config is found or all drives # have been tested isValid = False i = 0 num_groups = len(supported_drives_groups) while not isValid and i < num_groups: drive_group = supported_drives_groups[i] drivesToUse = [] for drive in drive_group: drivesToUse.append(drive) # calculate the maximum supported by the new configuration, # which may be different from the requested capacity max_pmem_supported = self._validator.calculateMaxPmemGiB(\ self._chif_lib, scalable_pmem_config.config_resource, \ drivesToUse) if max_pmem_supported >= pmem_size_GiB: # check drive policies (isValidDrivePolicies, _) = self._validator.\ checkLogicalNvdimmDrivePolicies(scalable_pmem_config.\ config_resource, drivesToUse) if isValidDrivePolicies: isValid = True break i += 1 if not isValid: # TODO: more info? maybe build a list of reasons why certain drives will not work raise InvalidCommandLineError(u"Requested size of {} GiB is not "\ "supported by the installed backup "\ "devices".format(pmem_size_GiB)) # get a list of the drives to show to the user summary_drive_list = ["{:15} ({} GB)".format(d.formattedLocation, \ d.sizeGB) for d in drivesToUse] # Make sure the user confirms the changes sys.stdout.write(u"\nThe following backup devices have been "\ "automatically selected for Scalable PMEM:\n") self._helpers.printLimitedMessageList(summary_drive_list, 99) sys.stdout.write(u"\n") if not confirm: if self._rdmc.interactive: # TODO: timeout s = raw_input( u"\nConfirm changes? Y(y) to confirm. N(n) to cancel: ") if s == 'y' or s == 'Y': confirm = True else: raise NoChangesFoundOrMadeError( u"No changes have been made") else: raise NoChangesFoundOrMadeError(u"No changes have been made. "\ "To confirm the changes, specify --confirm") if confirm: # if all is valid, configure the related BIOS setting self._restHelpers.setDrives(new_drives=drivesToUse) #self._restHelpers.enableConfiguration() scalable_pmem_config.refresh() self._helpers.displayDrivesConfiguration(scalable_pmem_config)
def setDrives(self, options=None): """ Main setlogicalnvdimmdrives command worker function :param options: command options :type options: options. """ drivesToUse = [] if not options.driveId: raise InvalidCommandLineError(u"No device IDs specified") if len(options.driveId) > len(set(options.driveId)): raise InvalidCommandLineError(u"Duplicate device IDs specified") scalable_pmem_config = self.common_setup() for userProvidedId in options.driveId: matchingDrive = scalable_pmem_config.drives.findDrive( userProvidedId) if not matchingDrive: raise InvalidCommandLineError( u"Invalid device ID: {}".format(userProvidedId)) drivesToUse.append(matchingDrive) if scalable_pmem_config.hasConfiguredRegions: # allow drives to be added but not removed if not set(scalable_pmem_config.drives.selectedDrives).issubset( drivesToUse): raise NoChangesFoundOrMadeError(u"Backup devices cannot be "\ "removed while logical NVDIMMs are configured") # check the configuration policies (isValidDrivePolicies, drivePoliciesMessage) = self._validator.\ checkLogicalNvdimmDrivePolicies(scalable_pmem_config.config_resource,\ drivesToUse) if not isValidDrivePolicies: sys.stdout.write( u"\nThe set of devices specified is not a valid configuration:\n" ) sys.stdout.write(drivePoliciesMessage) sys.stdout.write(u"\n\n") raise NoChangesFoundOrMadeError(u"Unable to continue") # calculate maximum persistent memory supported maxPmem = self._validator.calculateMaxPmemGiB(self._chif_lib, scalable_pmem_config.\ config_resource, drivesToUse) # compare this to the TOTAL of the pmem regions in the current/pending settings totalPmemAllocated = scalable_pmem_config.regions.totalSizeGiB if totalPmemAllocated != 0 and maxPmem < totalPmemAllocated: sys.stdout.write( u"\nThe set of devices specified is not a valid configuration:\n" ) sys.stdout.write( u"\nScalable Persistent Memory supported by requested configuration: {} GiB" \ u"\nAllocated Scalable Persistent Memory: {} GiB.".\ format(maxPmem, totalPmemAllocated)) sys.stdout.write(u"\n\n") raise NoChangesFoundOrMadeError(u"Unable to continue") # if all is valid, configure the related BIOS setting if self._rdmc.interactive: self._helpers.confirmBeforeConfigCausesDataLoss( scalable_pmem_config) self._restHelpers.setDrives(new_drives=drivesToUse, old_drives=scalable_pmem_config.\ drives.selectedDrives) scalable_pmem_config.refresh() self._helpers.displayDrivesConfiguration(scalable_pmem_config) return ReturnCodes.SUCCESS