def test_open_from_cache(self): # First, store example in cache fsconf, fs = create_lustrefs("../conf/models/example.lmf") self.assertTrue(fsconf) self.assertTrue(fs) # Try to re-read them fsconf, fs = open_lustrefs(fsconf.get_fs_name()) self.assertTrue(fsconf) self.assertTrue(fs) fsconf.unregister_fs()
def setUp(self): FSUtilsTest.setUp(self) tmpfile = makeTempFile(""" fs_name: param nid_map: nodes=foo[1-10] nids=foo[1-10]@tcp client: node=foo[7-10] mgt: node=foo1 dev=/dev/sda mdt: node=foo1 dev=/dev/sdb ost: node=foo2 dev=/dev/sdc ha_node=foo3 ha_node=foo4 #index 0 ost: node=foo2 dev=/dev/sdd ha_node=foo4 ha_node=foo3 #index 1 ost: node=foo3 dev=/dev/sdc ha_node=foo2 ha_node=foo4 #index 2 ost: node=foo3 dev=/dev/sdd ha_node=foo4 ha_node=foo2 #index 3 router: node=foo[5-6] mount_path: /param """) self.fsconf, self.fs = create_lustrefs(tmpfile.name)
def setUp(self): FSUtilsTest.setUp(self) tmpfile = makeTempFile(""" fs_name: param nid_map: nodes=foo[1-13] nids=foo[1-13]@tcp client: node=foo[8-11] mgt: node=foo1 dev=/dev/sda mdt: node=foo1 dev=/dev/sdb ost: node=foo2 dev=/dev/sdc ha_node=foo3 ha_node=foo4 #index 0 ost: node=foo2 dev=/dev/sdd ha_node=foo4 ha_node=foo3 #index 1 ost: node=foo3 dev=/dev/sdc ha_node=foo2 ha_node=foo4 #index 2 ost: node=foo3 dev=/dev/sdd ha_node=foo4 ha_node=foo2 #index 3 ost: node=foo5 dev=/dev/sde ha_node=foo6 ha_node=foo7 #index 4 ost: node=foo5 dev=/dev/sdf ha_node=foo7 ha_node=foo6 #index 5 ost: node=foo6 dev=/dev/sde ha_node=foo5 ha_node=foo7 #index 6 ost: node=foo6 dev=/dev/sdf ha_node=foo7 ha_node=foo5 #index 7 router: node=foo[12-13] mount_path: /param """) self.fsconf, self.fs = create_lustrefs(tmpfile.name)
def setUp(self): FSUtilsTest.setUp(self) tmpfile = makeTempFile( """ fs_name: param nid_map: nodes=foo[1-13] nids=foo[1-13]@tcp client: node=foo[8-11] mgt: node=foo1 dev=/dev/sda mdt: node=foo1 dev=/dev/sdb ost: node=foo2 dev=/dev/sdc ha_node=foo3 ha_node=foo4 #index 0 ost: node=foo2 dev=/dev/sdd ha_node=foo4 ha_node=foo3 #index 1 ost: node=foo3 dev=/dev/sdc ha_node=foo2 ha_node=foo4 #index 2 ost: node=foo3 dev=/dev/sdd ha_node=foo4 ha_node=foo2 #index 3 ost: node=foo5 dev=/dev/sde ha_node=foo6 ha_node=foo7 #index 4 ost: node=foo5 dev=/dev/sdf ha_node=foo7 ha_node=foo6 #index 5 ost: node=foo6 dev=/dev/sde ha_node=foo5 ha_node=foo7 #index 6 ost: node=foo6 dev=/dev/sdf ha_node=foo7 ha_node=foo5 #index 7 router: node=foo[12-13] mount_path: /param """ ) self.fsconf, self.fs = create_lustrefs(tmpfile.name)
comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.unregister_targets(tgtlist) # # NewFS # # Register the new conf self.__debug("Create new filesystem version") # XXX: Replace that with a simple fs save. newconf, newfs = create_lustrefs(lmf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh, update_mode=True) newfs.set_debug(self.options.debug) # Register the new targets in backend if 'format' in actions: self.__verbose("Register target(s) %s into backend." % actions['format'].labels()) for comp in actions['format']: tgtlist = [ newconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.register_targets(tgtlist)
def test_create_example(self): fsconf, fs = create_lustrefs("../conf/models/example.lmf") self.assertTrue(fsconf) self.assertTrue(fs) fsconf.unregister_fs()
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK if not self.options.model: raise CommandHelpException( "Lustre model file path" "(-m <model_file>) argument required.", self) eh = FSGlobalEventHandler(self) # Use this Shine.FSUtils convenience function. lmf = self.get_lmf_path() if lmf: print "Using Lustre model file %s" % lmf else: raise CommandHelpException( "Lustre model file for ``%s'' not found:" " please use filename or full LMF path.\n" "Your default model files directory (lmf_dir) is: %s" % (self.options.model, Globals().get_lmf_dir()), self) install_nodes = self.options.nodes excluded_nodes = self.options.excludes fs_conf, fs = create_lustrefs(self.get_lmf_path(), event_handler=eh, nodes=install_nodes, excluded=excluded_nodes) # Register the filesystem in backend print "Registering FS %s to backend..." % fs.fs_name if self.options.dryrun: rc = 0 else: rc = self.register_fs(fs_conf) if rc: msg = "Error: failed to register FS to backend (rc=%d)" % rc print >> sys.stderr, msg else: print "Filesystem %s registered." % fs.fs_name # Helper message. # If user specified nodes which were not used, warn him about it. actual_nodes = fs.components.managed().servers() if not self.check_valid_list(fs_conf.get_fs_name(), \ actual_nodes, "install"): return RC_FAILURE # Install file system configuration files; normally, this should # not be done by the Shine.Lustre.FileSystem object itself, but as # all proxy methods are currently handled by it, it is more # convenient this way... try: fs.install(fs_conf.get_cfg_filename(), dryrun=self.options.dryrun) tuning_conf = Globals().get_tuning_file() if tuning_conf: fs.install(tuning_conf, dryrun=self.options.dryrun) except FSRemoteError, error: print "WARNING: Due to error, installation skipped on %s" \ % error.nodes rc = RC_FAILURE
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK # Check lmf = self.lmfpath() # Load next model newconf = open_model(lmf) newfsconf = newconf._fs newfsconf.setup_target_devices(update_mode=True) neweh = self.GLOBAL_EH(self) newfs = instantiate_lustrefs(newconf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh) newfs.set_debug(self.options.debug) # Load current registered FS oldeh = self.GLOBAL_EH(self) oldconf, oldfs = open_lustrefs(newfsconf.fs_name, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=oldeh) oldfs.set_debug(self.options.debug) # Compare them actions = oldconf._fs.compare(newfsconf) # Convert Configuration objects to ComponentGroup # for old filesystem oldcomps = ComponentGroup() for action in ('unmount', 'stop', 'remove'): if action in actions: actions[action] = convert_comparison( oldconf, oldfs, actions[action]).managed() if len(actions[action]) == 0: del actions[action] else: for comp in actions[action]: if comp not in oldcomps: oldcomps.add(comp) # for new filesystem for action in ('format', 'start', 'mount'): if action in actions: # XXX: Do we need to add .managed() here? actions[action] = convert_comparison(newconf, newfs, actions[action]) self.display_changes(actions) # XXX: Update message with node list if not self.ask_confirm( "Update `%s': do you want to continue?" % oldfs.fs_name): return RC_FAILURE # Will call the handle_pre() method defined by the event handler. if hasattr(oldeh, 'pre'): oldeh.pre(oldfs) # # UNINSTALL unused component for old filesystem version. # try: # Check status of removed components if len(oldcomps): self._precheck(oldfs, oldfs.status, 'verify', comps=oldcomps) # Unmount what will be removed or remounted if 'unmount' in actions: comps = actions['unmount'] self._apply(oldfs, oldfs.umount, 'unmount', comps, OFFLINE) # Stop what need to be stopped or will be removed if 'stop' in actions: self._apply(oldfs, oldfs.stop, 'stop', actions['stop'], OFFLINE) # Remove conf on now unused nodes # XXX: This does not take _precheck() status into account. oldservers = oldcomps.managed().allservers() newservers = newfs.components.managed().allservers() removedsrvs = oldservers.difference(newservers) if len(removedsrvs) > 0: self.__verbose("Remove configuration from %s" % removedsrvs) self._remove(oldfs, oldfs.remove, "uninstall", removedsrvs) except CannotApplyError as exp: self.__warning(str(exp)) print("Please fix the error or disable %s and restart the update" % exp.elements + " command") return 1 # Unregister from backend if 'remove' in actions: self.__verbose("Remove target(s) %s from backend." % actions['remove'].labels()) for comp in actions['remove'].filter(supports='dev'): tgtlist = [ oldconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.unregister_targets(tgtlist) # # NewFS # # Register the new conf self.__debug("Create new filesystem version") # XXX: Replace that with a simple fs save. newconf, newfs = create_lustrefs(lmf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh, update_mode=True) newfs.set_debug(self.options.debug) # Register the new targets in backend if 'format' in actions: self.__verbose("Register target(s) %s into backend." % actions['format'].labels()) for comp in actions['format']: tgtlist = [ newconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.register_targets(tgtlist) # Will call the handle_pre() method defined by the event handler. if hasattr(neweh, 'pre'): neweh.pre(newfs) # Update with new conf # Note: For user convenience, we always copy configuration, this could # help when nodes are misinstalled. self._copy(newfs, newconf.get_cfg_filename()) if Globals().get_tuning_file(): self._copy(newfs, Globals().get_tuning_file()) next_actions = [] # Tunefs if needed if 'tunefs' in actions or 'writeconf' in actions: next_actions.append("Need to run `tunefs' on all components.") next_actions.append(self._next_action_cmd('tunefs', newfs)) # Reformat if needed if 'reformat' in actions: next_actions.append("Need to `reformat' all targets.") next_actions.append(self._next_action_cmd('format', newfs)) # Format if needed if 'format' in actions: # XXX: Check if everything is already stopped? next_actions.append("You can now `format' %d new target(s)" % \ len(actions['format'])) next_actions.append( self._next_action_cmd('format', newfs, '-l %s' % actions['format'].labels())) # Start if needed if 'start' in actions: next_actions.append("You can now `start' %d new component(s)" % \ len(actions['start'])) next_actions.append( self._next_action_cmd('start', newfs, '-l %s' % actions['start'].labels())) # Mount if needed if 'mount' in actions: next_actions.append("You can now `mount' the needed %d client(s)" % len(actions['mount'])) next_actions.append( self._next_action_cmd('mount', newfs, '-n %s' % actions['mount'].servers())) # Tune if needed if 'tune' in actions: next_actions.append("Need tu run `tune' on all components.") next_actions.append(self._next_action_cmd('tune', newfs)) # Print this line only if there is other actions to be performed if next_actions: print() print("NEXT ACTIONS (should be done manually)") for txt in next_actions: print(">%s" % txt) print("Update is finished.") return rc
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK if not self.options.model: raise CommandHelpException("Lustre model file path" "(-m <model_file>) argument required.", self) eh = FSGlobalEventHandler(self) # Use this Shine.FSUtils convenience function. lmf = self.get_lmf_path() if lmf: print "Using Lustre model file %s" % lmf else: raise CommandHelpException( "Lustre model file for ``%s'' not found:" " please use filename or full LMF path.\n" "Your default model files directory (lmf_dir) is: %s" % (self.options.model, Globals().get_lmf_dir()), self, ) install_nodes = self.options.nodes excluded_nodes = self.options.excludes fs_conf, fs = create_lustrefs( self.get_lmf_path(), event_handler=eh, nodes=install_nodes, excluded=excluded_nodes ) # Register the filesystem in backend print "Registering FS %s to backend..." % fs.fs_name if self.options.dryrun: rc = 0 else: rc = self.register_fs(fs_conf) if rc: msg = "Error: failed to register FS to backend (rc=%d)" % rc print >> sys.stderr, msg else: print "Filesystem %s registered." % fs.fs_name # Helper message. # If user specified nodes which were not used, warn him about it. actual_nodes = fs.components.managed().servers() if not self.check_valid_list(fs_conf.get_fs_name(), actual_nodes, "install"): return RC_FAILURE # Install file system configuration files; normally, this should # not be done by the Shine.Lustre.FileSystem object itself, but as # all proxy methods are currently handled by it, it is more # convenient this way... try: fs.install(fs_conf.get_cfg_filename(), dryrun=self.options.dryrun) tuning_conf = Globals().get_tuning_file() if tuning_conf: fs.install(tuning_conf, dryrun=self.options.dryrun) except FSRemoteError, error: print "WARNING: Due to error, installation skipped on %s" % error.nodes rc = RC_FAILURE
for comp in actions['stop'].filter(supports='dev'): tgtlist = [oldconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper())] oldconf.unregister_targets(tgtlist) # # NewFS # # Register the new conf self.__debug("Create new filesystem version") # XXX: Replace that with a simple fs save. newconf, newfs = create_lustrefs(lmf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh, update_mode=True) newfs.set_debug(self.options.debug) # Register the new targets in backend if 'format' in actions: self.__verbose("Register target(s) %s into backend." % actions['format'].labels()) for comp in actions['format']: tgtlist = [newconf.get_target_from_tag_and_type(comp.tag, comp.TYPE.upper())] oldconf.register_targets(tgtlist) # Will call the handle_pre() method defined by the event handler. if hasattr(neweh, 'pre'):