def needed_modules(self): if Globals().lustre_version_is_smaller('2.4') or \ not Globals().lustre_version_is_smaller('2.5'): return ['lustre', 'ldiskfs'] else: # lustre 2.4 needs fsfilt_ldiskfs return ['lustre', 'fsfilt_ldiskfs']
def test_tty(self): """setup with a fake tty attached""" sys.stdout = FakeFile(True) # config.color == auto Globals().replace('color', 'auto') tbl = setup_table(DummyOptions('always', True)) self.assertTrue(tbl.color) tbl = setup_table(DummyOptions('never', True)) self.assertFalse(tbl.color) tbl = setup_table(DummyOptions('auto', True)) self.assertTrue(tbl.color) # config.color == never Globals().replace('color', 'never') tbl = setup_table(DummyOptions('always', True)) self.assertTrue(tbl.color) tbl = setup_table(DummyOptions('never', True)) self.assertFalse(tbl.color) tbl = setup_table(DummyOptions('auto', True)) self.assertFalse(tbl.color) # config.color == never Globals().replace('color', 'always') tbl = setup_table(DummyOptions('always', True)) self.assertTrue(tbl.color) tbl = setup_table(DummyOptions('never', True)) self.assertFalse(tbl.color) tbl = setup_table(DummyOptions('auto', True)) self.assertTrue(tbl.color)
def testLoadExample(self): backup = Globals.DEFAULT_CONF_FILE Globals.DEFAULT_CONF_FILE = "../conf/shine.conf" self.assertEqual(Globals().get('backend'), 'None') self.assertEqual(Globals().get('storage_file'), '/etc/shine/storage.conf') Globals.DEFAULT_CONF_FILE = backup
def setup_table(options, fmt=None): """ Return a TextTable already setup based on display command line options like color and header. """ tbl = TextTable(fmt) tbl.color = (options.color == 'auto' and Globals()['color'] == 'auto' \ and sys.stdout.isatty()) or \ (options.color == 'auto' and Globals()['color'] == 'always') \ or (options.color == 'always') tbl.show_header = options.header return tbl
def get_lmf_path(self): """ Return the LMF file path. Perform some basic checks and add (if needed) the path of the base directory. """ # First check if a file exists at the specified location, if so, just # return it. if os.path.isfile(self.options.model): return self.options.model # If not, check for configuration's default LMF directory. lmf_dir = Globals().get_lmf_dir() if not os.path.isabs(self.options.model) and os.path.isdir(lmf_dir): # Directory path is valid, add supposed LMF file. file_path = os.path.join(lmf_dir, self.options.model) if os.path.isfile(file_path): return file_path else: # At last, check for missing extension. f_name, f_ext = os.path.splitext(self.options.model) if not f_ext: file_path = os.path.join(lmf_dir, "%s.lmf" % f_name) if os.path.isfile(file_path): return file_path # Failed return None
def _cache_path(cls, fsname): """Build and check a cache file path from filesystem name.""" fs_conf_dir = os.path.expandvars(Globals().get_conf_dir()) if not os.path.exists(fs_conf_dir): raise ConfigException("Cache directory does not exist '%s'" % fs_conf_dir) return "%s/%s.xmf" % (os.path.normpath(fs_conf_dir), fsname)
def cmd_show_conf(self): """Show shine.conf""" tbl = setup_table(self.options, "%param %value") for key, value in Globals().as_dict().items(): tbl.append({'param': key, 'value': str(value)}) print(str(tbl)) return 0
def __init__(self): task = task_self() task.set_info("print_debug", print_csdebug) fanout = Globals().get_ssh_fanout() if fanout > 0: task.set_info("fanout", fanout)
def test_lustre_version(self): conf = Globals() self.assertFalse(conf.lustre_version_is_smaller('1.6.5')) conf.add('lustre_version', '1.8.5') self.assertFalse(conf.lustre_version_is_smaller('1.6.7')) self.assertTrue(conf.lustre_version_is_smaller('2.0.0.1'))
def test_format_target_mdt_quota_v2x(self): """test command line format v2.x (MDT with quota)""" Globals().replace('lustre_version', '2.0.0.1') self.fs.new_target(self.srv1, 'mgt', 0, '/dev/root') tgt = self.fs.new_target(self.srv2, 'mdt', 0, '/dev/root') tgt.full_check(mountdata=False) action = Format(tgt, quota=True, quota_type='ug') self.check_cmd_format(action, '--mdt --index=0 ' + '"--mgsnode=localhost@tcp" "--param=mdd.quota_type=ug" /dev/root')
def _start_status_client(self, fs_name): status_dir = Globals().get_status_dir() if not os.path.exists(status_dir): os.mkdir(status_dir) status_file = os.path.join(status_dir, fs_name) self.status_clients[fs_name] = shelve.open(status_file)
def test_format_target_ost_quota_v24(self): """test command line format v2.4 and above (OST with quota)""" Globals().replace('lustre_version', '2.4') self.fs.new_target(self.srv1, 'mgt', 0, '/dev/root') tgt = self.fs.new_target(self.srv2, 'ost', 0, '/dev/root') tgt.full_check(mountdata=False) action = Format(tgt, quota=True, quota_type='ug') self.check_cmd_format(action, '--ost --index=0 ' + '"--mgsnode=localhost@tcp" /dev/root')
def cmd_show_storage(self): """Show storage info""" backend = BackendRegistry().selected() if not backend: # no backend? check to be sure assert Globals().get_backend() == "None", \ "Error: please check your storage backend configuration" \ "(backend=%s)" % Globals().get_backend() print("Storage backend is disabled, please check storage " "information as a per-filesystem basis with ``show info''.") else: backend.start() cnt = 0 for tgt in ['mgt', 'mdt', 'ost']: for dev in backend.get_target_devices(tgt): print(dev) cnt += 1 print("Total: %d devices" % cnt) return 0
def _run_actions(self): """ Start actions run-loop. It clears all previous proxy errors and starts task run-loop. This launches all FSProxyAction prepared before by example. """ self.proxy_errors = MsgTree() task_self().set_default("stderr_msgtree", False) task_self().set_info('connect_timeout', Globals().get_ssh_connect_timeout()) task_self().resume()
def test_tunefs_target_quota_v24(self): """test command line tunefs quota (v2.4 and above)""" Globals().replace('lustre_version', '2.4') self.fs.new_target(self.srv1, 'mgt', 0, '/dev/root') mdt = self.fs.new_target(self.srv1, 'mdt', 0, '/dev/root') action = Tunefs(mdt, quota=True, quota_type='ug') self.check_cmd_tunefs(action, '"--mgsnode=localhost@tcp" ' + '/dev/root') ost = self.fs.new_target(self.srv1, 'ost', 0, '/dev/sdb') action = Tunefs(ost, quota=True, quota_type='ug') self.check_cmd_tunefs(action, '"--mgsnode=localhost@tcp" ' + '/dev/sdb')
def get_tuning(cls, fs_conf, comps): """ Tune class method: get TuningModel for a fs configuration. """ # XXX: If no tuning.conf is defined in configuration # we still create a tuning model which will be used for quota. # Be carefull that this could be very confusing for users, who # can think tuning will be applied but is not. tuning = TuningModel() # Is the tuning configuration file name specified? if Globals().get_tuning_file(): # Load the tuning configuration file tuning.parse(filename=Globals().get_tuning_file()) # Add the quota tuning parameters to the tuning model. if Globals().lustre_version_is_smaller('2.4'): cls._add_quota_tuning(tuning, fs_conf) cls._add_active_tuning(tuning, comps) return tuning
def test_tunefs_target_quota_v2x(self): """test command line tunefs quota (v2.x)""" Globals().replace('lustre_version', '2.0.0.1') self.fs.new_target(self.srv1, 'mgt', 0, self.block1) mdt = self.fs.new_target(self.srv1, 'mdt', 0, self.block1) action = Tunefs(mdt, quota=True, quota_type='ug') self.check_cmd_tunefs( action, '"--mgsnode=localhost@tcp" ' + '"--param=mdd.quota_type=ug" %s' % self.block1) ost = self.fs.new_target(self.srv1, 'ost', 0, '/dev/sdb') action = Tunefs(ost, quota=True, quota_type='ug') self.check_cmd_tunefs( action, '"--mgsnode=localhost@tcp" ' + '"--param=ost.quota_type=ug" /dev/sdb')
def copy_tuning(self, fs, comps=None): """Copy tuning.conf if defined.""" if not self.has_local_flag(): tuning_conf = Globals().get_tuning_file() if tuning_conf: servers = None if comps: # take into account -n and -x options servers = comps.allservers() if self.options.nodes is not None: servers.intersection_update(self.options.nodes) if self.options.excludes is not None: servers.difference_update(self.options.excludes) fs.install(tuning_conf, servers=servers)
def lmfpath(self): """Check LMF value and return a full LMF path""" if not self.options.model: raise CommandHelpException( "Lustre model file path " "(-m <model_file>) " "argument required.", self) lmf = self.get_lmf_path() if lmf: print "Using Lustre model file %s" % lmf else: raise CommandHelpException( "Lustre model file for ``%s'' not found:" " please use filename or full LMF path.\n Your default " "model file directory (lmf_dir) is: %s" % (self.options.model, Globals().get_lmf_dir()), self) return lmf
def _shell(self): """Create a command line and schedule it to be run by self.task""" # Call specific method to prepare command line command = self._prepare_cmd() # Extent path if defined path = Globals().get('command_path') if path: command.insert(0, "export PATH=%s:${PATH};" % path) # Add the command to be scheduled cmdline = ' '.join(command) self.server.hdlr.log('detail', msg='[RUN] %s' % cmdline) if self.dryrun: self.server.action_event(self, 'done') self.set_status(ACT_OK) else: self.task.shell(cmdline, handler=self)
def _mountdata_check(self, label_check=None): """Read device flags using 'tunefs.lustre'""" cmd = "tunefs.lustre --dryrun %s" % self.dev path = Globals().get('command_path') if path: cmd = "export PATH=%s:${PATH}; %s" % (path, cmd) process = subprocess.Popen([cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) output = process.communicate()[0] if process.returncode > 0: raise DiskDeviceError( self, "Failed to run 'tunefs.lustre' to " + "read flags (rc=%d)" % process.returncode) for line in output.splitlines(): line = line.strip() if line.startswith('Flags:'): self._ldd_flags = int(line.split(':')[1], 16) elif line.startswith('Target:'): self.ldd_svname = line.split(':', 1)[1].strip() elif line.startswith('Permanent disk data:'): break if label_check: # Lustre 2.3 changed the label patterns. # fsname and svname could be separated by '-', ':' and '=' # For compatibility reasons, we ignore ':' and '='. if len(self.ldd_svname) > 8 and self.ldd_svname[-8] in (':', '='): self.ldd_svname = "%s-%s" % (self.ldd_svname[:-8], self.ldd_svname[-7:]) if self.ldd_svname != label_check: raise DiskDeviceError( self, "Found service %s != %s on %s" % (self.ldd_svname, label_check, self.dev))
def iter_fsname(self): # If some labels are specified, they also specifies some fs names. # (ie: fsname-OST0000) if self.options.labels and not self.options.fsnames: self.options.fsnames = [] for label in self.options.labels: fsname = str(label).split('-', 1)[0] # Avoid adding the same fs several times if fsname not in self.options.fsnames: self.options.fsnames.append(fsname) # Build a default filesystem list based on all fs in cache directory. elif not self.options.fsnames: self.options.fsnames = [] xmfdir = Globals().get_conf_dir() if os.path.isdir(xmfdir): for filename in os.listdir(xmfdir): name, ext = os.path.splitext(filename) if name and ext == '.xmf': self.options.fsnames.append(name) return iter(self.options.fsnames)
def remove(self, servers=None, **kwargs): """ Remove FS config files. """ result = 0 if servers is None: # Get all possible servers servers = self.components.managed().allservers() # filter local server distant_servers = Server.distant_servers(servers) # If size is different, we have a local server in the list if len(distant_servers) < len(servers): # remove local fs configuration file fs_file = os.path.join(Globals().get_conf_dir(), "%s.xmf" % self.fs_name) if os.path.exists(fs_file): self.hdlr.log('detail', msg='[DEL] %s' % fs_file) if kwargs.get('dryrun', False): result = 0 else: result = os.remove(fs_file) if len(distant_servers) > 0: # Perform the remove operations on all targets for these nodes. self._proxy_action('remove', distant_servers, **kwargs).launch() # Run local actions and FSProxyAction self._run_actions() if len(self.proxy_errors) > 0: return RUNTIME_ERROR return result
def make_temp_conf(self, txt): self._storagefile = makeTempFile(txt) Globals().replace('storage_file', self._storagefile.name)
def tearDown(self): Globals().replace('storage_file', '/etc/shine/storage.conf') if self._conf is not None: self._conf.unregister_fs() clean_tempdirs() Globals().replace('backend', 'None')
def setUp(self): self._conf = None setup_tempdirs() Globals().replace('backend', 'File')
tgtlist = [ newconf.get_target_from_tag_and_type( comp.tag, comp.TYPE.upper()) ] if not self.options.dryrun: oldconf.register_targets(tgtlist) # Will call the handle_pre() method defined by the event handler. if hasattr(neweh, 'pre'): neweh.pre(newfs) # Update with new conf # Note: For user convenience, we always copy configuration, this could # help when nodes are misinstalled. self._copy(newfs, newconf.get_cfg_filename()) if Globals().get_tuning_file(): self._copy(newfs, Globals().get_tuning_file()) next_actions = [] # Tunefs if needed if 'tunefs' in actions or 'writeconf' in actions: next_actions.append("Need to run `tunefs' on all components.") next_actions.append(self._next_action_cmd('tunefs', newfs)) # Reformat if needed if 'reformat' in actions: next_actions.append("Need to `reformat' all targets.") next_actions.append(self._next_action_cmd('format', newfs)) # Format if needed
def _prepare_cmd(self): command = [] # --mgsnode and specific --param if self.comp_is_mdt: command += self._mgsnids() if self.stripecount: command.append('--param=lov.stripecount=%d' % self.stripecount) if self.stripesize: command.append('--param=lov.stripesize=%d' % self.stripesize) if Globals().lustre_version_is_smaller('2.4') and \ self.quota_type is not None: if Globals().lustre_version_is_smaller('2'): option = 'mdt.quota_type' else: option = 'mdd.quota_type' command.append('"--param=%s=%s"' % (option, self.quota_type)) elif self.comp_is_ost: command += self._mgsnids() if Globals().lustre_version_is_smaller('2.4') and \ self.quota_type is not None: command.append('"--param=ost.quota_type=%s"' % self.quota_type) # --failnode: NID(s) of failover partner target_nids = self.comp.get_nids() for nidlist in target_nids[1:]: # if 'network' is specified, restrict the list of partner if self.comp.network and (self.comp_is_mdt or self.comp_is_ost): # Parse network field match = re.match("^([a-z0-9]+?)(\d+)?$", self.comp.network) if not match: raise ValueError("Unrecognized network: %s" % self.comp.network) suffix = ((match.group(1), match.group(2) or '0')) # Analyze NID list. def _same_suffix(nid): """ Returns true if NID suffix is equivalent to 'suffix'. If LNET network is not set, it is considered as '0'. That means 'x@tcp' and 'x@tcp0' is considered equivalent. """ mo = re.match(r'.*@([a-z0-9]+?)(\d+)?$', nid) return mo and ((mo.group(1), mo.group(2) or '0') == suffix) nidlist = [nid for nid in nidlist if _same_suffix(nid)] # if there is still some matching partners, add them if len(nidlist) > 0: nids = ','.join(nidlist) command.append('"--failnode=%s"' % nids) # --network: restrict target to a specific LNET network if self.comp.network and (self.comp_is_mdt or self.comp_is_ost): command.append('--network=%s' % self.comp.network) # Generic --param if self.format_params and self.format_params.get(self.comp.TYPE): command.append('"--param=%s"' % self.format_params.get(self.comp.TYPE)) # Mount options from command line if self.addopts: command.append(self.addopts) return command
def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK if not self.options.model: raise CommandHelpException( "Lustre model file path" "(-m <model_file>) argument required.", self) eh = FSGlobalEventHandler(self) # Use this Shine.FSUtils convenience function. lmf = self.get_lmf_path() if lmf: print "Using Lustre model file %s" % lmf else: raise CommandHelpException( "Lustre model file for ``%s'' not found:" " please use filename or full LMF path.\n" "Your default model files directory (lmf_dir) is: %s" % (self.options.model, Globals().get_lmf_dir()), self) install_nodes = self.options.nodes excluded_nodes = self.options.excludes fs_conf, fs = create_lustrefs(self.get_lmf_path(), event_handler=eh, nodes=install_nodes, excluded=excluded_nodes) # Register the filesystem in backend print "Registering FS %s to backend..." % fs.fs_name if self.options.dryrun: rc = 0 else: rc = self.register_fs(fs_conf) if rc: msg = "Error: failed to register FS to backend (rc=%d)" % rc print >> sys.stderr, msg else: print "Filesystem %s registered." % fs.fs_name # Helper message. # If user specified nodes which were not used, warn him about it. actual_nodes = fs.components.managed().servers() if not self.check_valid_list(fs_conf.get_fs_name(), \ actual_nodes, "install"): return RC_FAILURE # Install file system configuration files; normally, this should # not be done by the Shine.Lustre.FileSystem object itself, but as # all proxy methods are currently handled by it, it is more # convenient this way... try: fs.install(fs_conf.get_cfg_filename(), dryrun=self.options.dryrun) tuning_conf = Globals().get_tuning_file() if tuning_conf: fs.install(tuning_conf, dryrun=self.options.dryrun) except FSRemoteError, error: print "WARNING: Due to error, installation skipped on %s" \ % error.nodes rc = RC_FAILURE
def setUp(self): self._fs = FileSystem('foofs') Globals().replace('color', 'auto')