def test_label(self): # shine -f param -l param-MDT0000 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), labels=NodeSet("param-MDT0000")) comps = self.complist(fs) self.assertEqual(len(comps), 1) self.assert_comp(comps[0], MDT.TYPE, 0)
def test_target_index_unknown_failover(self): # shine -f param -t ost -i 1 -F foo5 fsconf, fs = open_lustrefs( self.fsconf.get_fs_name(), target_types="ost", failover=NodeSet("foo5"), indexes=RangeSet("1") ) comps = self.complist(fs) self.assertEqual(len(comps), 0)
def test_target_only(self): # shine -f param -t ost fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types="ost") comps = self.complist(fs) self.assertEqual(len(comps), 8) for tgt in comps: self.assertEqual(tgt.TYPE, OST.TYPE)
def test_nodes_target(self): # shine -f param -t ost -n foo2 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types="ost", nodes=NodeSet("foo2")) comps = self.complist(fs) self.assertEqual(len(comps), 2) self.assert_comp(comps[0], OST.TYPE, 0, "/dev/sdc") self.assert_comp(comps[1], OST.TYPE, 1, "/dev/sdd")
def test_target_index_failover(self): # shine -f param -t ost -i 1 -F foo2 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types="ost", failover=NodeSet("foo2")) comps = self.complist(fs) self.assertEqual(len(comps), 2) self.assert_comp(comps[0], OST.TYPE, 2, "/dev/sdc") self.assert_comp(comps[1], OST.TYPE, 3, "/dev/sdd")
def _open_fs(self, fsname, eh): fs_conf, fs = open_lustrefs(fsname, None, nodes=self.options.nodes, excluded=self.options.excludes, labels=self.options.labels, event_handler=eh) return fs_conf, fs
def test_target_index_unknown_failover(self): # shine -f param -t ost -i 1 -F foo5 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types='ost', failover=NodeSet('foo5'), indexes=RangeSet("1")) comps = self.complist(fs) self.assertEqual(len(comps), 0)
def test_target_only(self): # shine -f param -t ost fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types='ost') comps = self.complist(fs) self.assertEqual(len(comps), 8) for tgt in comps: self.assertEqual(tgt.TYPE, OST.TYPE)
def test_target_index(self): # shine -f param -t ost -i 1 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types='ost', indexes=RangeSet("1")) comps = self.complist(fs) self.assertEqual(len(comps), 1) self.assert_comp(comps[0], OST.TYPE, 1)
def test_target_index_failover(self): # shine -f param -t ost -i 1 -F foo2 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types='ost', failover=NodeSet('foo2')) comps = self.complist(fs) self.assertEqual(len(comps), 2) self.assert_comp(comps[0], OST.TYPE, 2, "/dev/sdc") self.assert_comp(comps[1], OST.TYPE, 3, "/dev/sdd")
def _open_fs(self, fsname, eh): return open_lustrefs(fsname, self.options.targets, nodes=self.options.nodes, excluded=self.options.excludes, failover=self.options.failover, indexes=self.options.indexes, labels=self.options.labels, event_handler=eh)
def test_nodes_target(self): # shine -f param -t ost -n foo2 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types='ost', nodes=NodeSet('foo2')) comps = self.complist(fs) self.assertEqual(len(comps), 2) self.assert_comp(comps[0], OST.TYPE, 0, "/dev/sdc") self.assert_comp(comps[1], OST.TYPE, 1, "/dev/sdd")
def test_target_index_nodes_failover(self): # shine -f param -t ost -n foo2 -F foo3 -i 3 fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), target_types='ost', nodes=NodeSet('foo3'), failover=NodeSet('foo3'), indexes=RangeSet('1')) comps = self.complist(fs) self.assertEqual(len(comps), 1) self.assert_comp(comps[0], OST.TYPE, 1, "/dev/sdd")
def test_exclude(self): # shine -f param -x foo[3,5,8-10] fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), excluded=NodeSet("foo[3,5,8-10]")) comps = self.complist(fs) self.assertEqual(len(comps), 6) self.assertEqual(comps[0].TYPE, Client.TYPE) self.assertEqual(comps[1].TYPE, MDT.TYPE) self.assertEqual(comps[2].TYPE, MGT.TYPE) self.assert_comp(comps[3], OST.TYPE, None, "/dev/sdc") self.assert_comp(comps[4], OST.TYPE, None, "/dev/sdd") self.assertEqual(comps[5].TYPE, Router.TYPE)
def test_open_from_cache(self): # First, store example in cache fsconf, fs = create_lustrefs("../conf/models/example.lmf") self.assertTrue(fsconf) self.assertTrue(fs) # Try to re-read them fsconf, fs = open_lustrefs(fsconf.get_fs_name()) self.assertTrue(fsconf) self.assertTrue(fs) fsconf.unregister_fs()
def _open_fs(self, fsname, eh): # Status command needs to open the filesystem in extended mode. # See FSUtils.instantiate_lustrefs() for the use of this argument. fs_conf, fs = open_lustrefs(fsname, self.options.targets, nodes=self.options.nodes, excluded=self.options.excludes, failover=self.options.failover, indexes=self.options.indexes, labels=self.options.labels, event_handler=eh, extended=True) return fs_conf, fs
def test_exclude(self): # shine -f param -x foo[3,5,8-10] fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), excluded=NodeSet("foo[3,5,8-10]")) comps = self.complist(fs) self.assertEqual(len(comps), 9) self.assertEqual(comps[0].TYPE, Client.TYPE) self.assertEqual(comps[1].TYPE, MDT.TYPE) self.assertEqual(comps[2].TYPE, MGT.TYPE) self.assert_comp(comps[3], OST.TYPE, None, "/dev/sdc") self.assert_comp(comps[4], OST.TYPE, None, "/dev/sdd") self.assert_comp(comps[5], OST.TYPE, None, "/dev/sde") self.assert_comp(comps[6], OST.TYPE, None, "/dev/sdf") self.assertEqual(comps[7].TYPE, Router.TYPE) self.assertEqual(comps[8].TYPE, Router.TYPE)
def test_action_enabled_nodes(self): # shine failover extended selection with nodes fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), nodes=NodeSet("foo[1,3]"), extended=True) comps = self.complist(fs) self.assertEqual(len(comps), 6) self.assertEqual(comps[0].TYPE, MDT.TYPE) self.assertEqual(comps[1].TYPE, MGT.TYPE) self.assert_comp(comps[2], OST.TYPE, None, "/dev/sdc") self.assert_comp(comps[3], OST.TYPE, None, "/dev/sdd") self.assert_comp(comps[4], OST.TYPE, None, "/dev/sdc") self.assert_comp(comps[5], OST.TYPE, None, "/dev/sdd") self.assertTrue(comps[0].server.action_enabled) self.assertTrue(comps[1].server.action_enabled) self.assertFalse(comps[2].server.action_enabled) self.assertFalse(comps[3].server.action_enabled) self.assertTrue(comps[4].server.action_enabled) self.assertTrue(comps[5].server.action_enabled)
def test_action_enabled_exclude(self): # shine failover extended selection with exclude fsconf, fs = open_lustrefs(self.fsconf.get_fs_name(), excluded=NodeSet("foo[1,2-5,9-11]"), extended=True) comps = self.complist(fs) self.assertEqual(len(comps), 7) self.assertEqual(comps[0].TYPE, Client.TYPE) self.assert_comp(comps[1], OST.TYPE, None, "/dev/sde") self.assert_comp(comps[2], OST.TYPE, None, "/dev/sdf") self.assert_comp(comps[3], OST.TYPE, None, "/dev/sde") self.assert_comp(comps[4], OST.TYPE, None, "/dev/sdf") self.assertEqual(comps[5].TYPE, Router.TYPE) self.assertEqual(comps[6].TYPE, Router.TYPE) self.assertTrue(comps[0].server.action_enabled) self.assertFalse(comps[1].server.action_enabled) self.assertFalse(comps[2].server.action_enabled) self.assertTrue(comps[3].server.action_enabled) self.assertTrue(comps[4].server.action_enabled) self.assertTrue(comps[5].server.action_enabled)
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK # Check lmf = self.lmfpath() # Load next model newconf = open_model(lmf) newfsconf = newconf._fs newfsconf.setup_target_devices(update_mode=True) neweh = self.GLOBAL_EH(self) newfs = instantiate_lustrefs(newconf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh) newfs.set_debug(self.options.debug) # Load current registered FS oldeh = self.GLOBAL_EH(self) oldconf, oldfs = open_lustrefs(newfsconf.fs_name, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=oldeh) oldfs.set_debug(self.options.debug) # Compare them actions = oldconf._fs.compare(newfsconf) # Convert Configuration objects to ComponentGroup # for old filesystem oldcomps = ComponentGroup() for action in ('unmount', 'stop', 'remove'): if action in actions: actions[action] = convert_comparison( oldconf, oldfs, actions[action]).managed() if len(actions[action]) == 0: del actions[action] else: for comp in actions[action]: if comp not in oldcomps: oldcomps.add(comp) # for new filesystem for action in ('format', 'start', 'mount'): if action in actions: # XXX: Do we need to add .managed() here? actions[action] = convert_comparison(newconf, newfs, actions[action]) self.display_changes(actions) # XXX: Update message with node list if not self.ask_confirm( "Update `%s': do you want to continue?" % oldfs.fs_name): return RC_FAILURE # Will call the handle_pre() method defined by the event handler. if hasattr(oldeh, 'pre'): oldeh.pre(oldfs) # # UNINSTALL unused component for old filesystem version. # try: # Check status of removed components if len(oldcomps): self._precheck(oldfs, oldfs.status, 'verify', comps=oldcomps) # Unmount what will be removed or remounted if 'unmount' in actions: comps = actions['unmount'] self._apply(oldfs, oldfs.umount, 'unmount', comps, OFFLINE) # Stop what need to be stopped or will be removed if 'stop' in actions: self._apply(oldfs, oldfs.stop, 'stop', actions['stop'], OFFLINE) # Remove conf on now unused nodes # XXX: This does not take _precheck() status into account. oldservers = oldcomps.managed().allservers() newservers = newfs.components.managed().allservers() removedsrvs = oldservers.difference(newservers) if len(removedsrvs) > 0: self.__verbose("Remove configuration from %s" % removedsrvs) self._remove(oldfs, oldfs.remove, "uninstall", removedsrvs) except CannotApplyError, exp: self.__warning(str(exp)) print "Please fix the error or disable %s and restart the update" \ % exp.elements + " command" return 1
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK # Check lmf = self.lmfpath() # Load next model newconf = open_model(lmf) newfsconf = newconf._fs newfsconf.setup_target_devices(update_mode=True) neweh = self.GLOBAL_EH(self) newfs = instantiate_lustrefs(newconf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh) newfs.set_debug(self.options.debug) # Load current registered FS oldeh = self.GLOBAL_EH(self) oldconf, oldfs = open_lustrefs(newfsconf.fs_name, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=oldeh) oldfs.set_debug(self.options.debug) # Compare them actions = oldconf._fs.compare(newfsconf) # Convert Configuration objects to ComponentGroup # for old filesystem oldcomps = ComponentGroup() for action in ('unmount', 'stop'): if action in actions: actions[action] = convert_comparison(oldconf, oldfs, actions[action]).managed() if len(actions[action]) == 0: del actions[action] else: oldcomps.update(actions[action]) # for new filesystem for action in ('format', 'start', 'mount'): if action in actions: # XXX: Do we need to add .managed() here? actions[action] = convert_comparison(newconf, newfs, actions[action]) self.display_changes(actions) # XXX: Update message with node list if not self.ask_confirm("Update `%s': do you want to continue?" % oldfs.fs_name): return RC_FAILURE # Will call the handle_pre() method defined by the event handler. if hasattr(oldeh, 'pre'): oldeh.pre(oldfs) # # UNINSTALL unused component for old filesystem version. # try: # Check status of removed components if len(oldcomps): self._precheck(oldfs, oldfs.status, 'verify', comps=oldcomps) # Unmount what will be removed if 'unmount' in actions: comps = actions['unmount'] self._apply(oldfs, oldfs.umount, 'unmount', comps, OFFLINE) # Stop what will be removed if 'stop' in actions: self._apply(oldfs, oldfs.stop, 'stop', actions['stop'], OFFLINE) # Remove conf on now unused nodes # XXX: This does not take _precheck() status into account. oldservers = oldcomps.managed().allservers() newservers = newfs.components.managed().allservers() removedsrvs = oldservers.difference(newservers) if len(removedsrvs) > 0: self.__verbose("Remove configuration from %s" % removedsrvs) self._remove(oldfs, oldfs.remove, "uninstall", removedsrvs) except CannotApplyError, exp: self.__warning(str(exp)) print "Please fix the error or disable %s and restart the update" \ % exp.elements + " command" return 1
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK # Check lmf = self.lmfpath() # Load next model newconf = open_model(lmf) newfsconf = newconf._fs newfsconf.setup_target_devices(update_mode=True) neweh = self.GLOBAL_EH(self) newfs = instantiate_lustrefs(newconf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh) newfs.set_debug(self.options.debug) # Load current registered FS oldeh = self.GLOBAL_EH(self) oldconf, oldfs = open_lustrefs(newfsconf.fs_name, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=oldeh) oldfs.set_debug(self.options.debug) # Compare them actions = oldconf._fs.compare(newfsconf) # Convert Configuration objects to ComponentGroup # for old filesystem oldcomps = ComponentGroup() for action in ('unmount', 'stop', 'remove'): if action in actions: actions[action] = convert_comparison( oldconf, oldfs, actions[action]).managed() if len(actions[action]) == 0: del actions[action] else: for comp in actions[action]: if comp not in oldcomps: oldcomps.add(comp) # for new filesystem for action in ('format', 'start', 'mount'): if action in actions: # XXX: Do we need to add .managed() here? actions[action] = convert_comparison(newconf, newfs, actions[action]) self.display_changes(actions) # XXX: Update message with node list if not self.ask_confirm( "Update `%s': do you want to continue?" % oldfs.fs_name): return RC_FAILURE # Will call the handle_pre() method defined by the event handler. if hasattr(oldeh, 'pre'): oldeh.pre(oldfs) # # UNINSTALL unused component for old filesystem version. # try: # Check status of removed components if len(oldcomps): self._precheck(oldfs, oldfs.status, 'verify', comps=oldcomps) # Unmount what will be removed or remounted if 'unmount' in actions: comps = actions['unmount'] self._apply(oldfs, oldfs.umount, 'unmount', comps, OFFLINE) # Stop what need to be stopped or will be removed if 'stop' in actions: self._apply(oldfs, oldfs.stop, 'stop', actions['stop'], OFFLINE) # Remove conf on now unused nodes # XXX: This does not take _precheck() status into account. oldservers = oldcomps.managed().allservers() newservers = newfs.components.managed().allservers() removedsrvs = oldservers.difference(newservers) if len(removedsrvs) > 0: self.__verbose("Remove configuration from %s" % removedsrvs) self._remove(oldfs, oldfs.remove, "uninstall", removedsrvs) except CannotApplyError as exp: self.__warning(str(exp)) print("Please fix the error or disable %s and restart the update" % exp.elements + " command") return 1 # Unregister from backend if 'remove' in actions: self.__verbose("Remove target(s) %s from backend." % actions['remove'].labels()) for comp in actions['remove'].filter(supports='dev'): tgtlist = [ oldconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.unregister_targets(tgtlist) # # NewFS # # Register the new conf self.__debug("Create new filesystem version") # XXX: Replace that with a simple fs save. newconf, newfs = create_lustrefs(lmf, nodes=self.options.nodes, excluded=self.options.excludes, event_handler=neweh, update_mode=True) newfs.set_debug(self.options.debug) # Register the new targets in backend if 'format' in actions: self.__verbose("Register target(s) %s into backend." % actions['format'].labels()) for comp in actions['format']: tgtlist = [ newconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.register_targets(tgtlist) # Will call the handle_pre() method defined by the event handler. if hasattr(neweh, 'pre'): neweh.pre(newfs) # Update with new conf # Note: For user convenience, we always copy configuration, this could # help when nodes are misinstalled. self._copy(newfs, newconf.get_cfg_filename()) if Globals().get_tuning_file(): self._copy(newfs, Globals().get_tuning_file()) next_actions = [] # Tunefs if needed if 'tunefs' in actions or 'writeconf' in actions: next_actions.append("Need to run `tunefs' on all components.") next_actions.append(self._next_action_cmd('tunefs', newfs)) # Reformat if needed if 'reformat' in actions: next_actions.append("Need to `reformat' all targets.") next_actions.append(self._next_action_cmd('format', newfs)) # Format if needed if 'format' in actions: # XXX: Check if everything is already stopped? next_actions.append("You can now `format' %d new target(s)" % \ len(actions['format'])) next_actions.append( self._next_action_cmd('format', newfs, '-l %s' % actions['format'].labels())) # Start if needed if 'start' in actions: next_actions.append("You can now `start' %d new component(s)" % \ len(actions['start'])) next_actions.append( self._next_action_cmd('start', newfs, '-l %s' % actions['start'].labels())) # Mount if needed if 'mount' in actions: next_actions.append("You can now `mount' the needed %d client(s)" % len(actions['mount'])) next_actions.append( self._next_action_cmd('mount', newfs, '-n %s' % actions['mount'].servers())) # Tune if needed if 'tune' in actions: next_actions.append("Need tu run `tune' on all components.") next_actions.append(self._next_action_cmd('tune', newfs)) # Print this line only if there is other actions to be performed if next_actions: print() print("NEXT ACTIONS (should be done manually)") for txt in next_actions: print(">%s" % txt) print("Update is finished.") return rc