def list_filesets(self, devices=None, filesetnames=None, update=False): """Get all the filesets for one or more specific devices @type devices: list of devices (if string: 1 device; if None: all found devices) @type filesetnames: report only on specific filesets (if string: 1 filesetname) set self.gpfslocalfilesets is dict with key = filesystemName value is dict with key = id value is dict key = remaining header entries and corresponding values """ if not update and self.gpfslocalfilesets: return self.gpfslocalfilesets opts = [] if devices is None: # get all devices from all filesystems if self.gpfslocalfilesystems is None: self.list_filesystems() devices = self.gpfslocalfilesystems.keys() else: if isinstance(devices, str): devices = [devices] if filesetnames is not None: if isinstance(filesetnames, str): filesetnames = [filesetnames] filesetnamestxt = ','.join(filesetnames) opts.append(filesetnamestxt) self.log.debug("Looking up filesets for devices %s" % (devices)) listm = Monoid([], lambda xs, ys: xs + ys) info = MonoidDict(listm) for device in devices: opts_ = copy.deepcopy(opts) opts_.insert(1, device) res = self._executeY('mmlsfileset', opts_) # for v3.5 filesystemName:filesetName:id:rootInode:status:path:parentId:created:inodes:dataInKB:comment:filesetMode:afmTarget:afmState:afmMode:afmFileLookupRefreshInterval:afmFileOpenRefreshInterval:afmDirLookupRefreshInterval:afmDirOpenRefreshInterval:afmAsyncDelay:reserved:afmExpirationTimeout:afmRPO:afmLastPSnapId:inodeSpace:isInodeSpaceOwner:maxInodes:allocInodes:inodeSpaceMask:afmShowHomeSnapshots:afmNumReadThreads:afmNumReadGWs:afmReadBufferSize:afmWriteBufferSize:afmReadSparseThreshold:afmParallelReadChunkSize:afmParallelReadThreshold:snapId: self.log.debug("list_filesets res keys = %s " % (res.keys())) for (key, value) in res.items(): info[key] = value datakeys = info.keys() datakeys.remove('filesystemName') datakeys.remove('id') fss = nub(info.get('filesystemName', [])) res = dict([(fs, {}) for fs in fss]) # build structure for idx, (fs, qid) in enumerate(zip(info['filesystemName'], info['id'])): details = dict([(k, info[k][idx]) for k in datakeys]) res[fs][qid] = details self.gpfslocalfilesets = res return res
def list_filesets(self, devices=None, filesetnames=None, update=False): """ Get all the filesets for one or more specific devices @type devices: list of devices (if string: 1 device; if None: all found devices) @type filesetnames: report only on specific filesets (if string: 1 filesetname) set self.gpfslocalfilesets is dict with key = filesystemName value is dict with key = id value is dict key = remaining header entries and corresponding values """ if not update and self.gpfslocalfilesets: return self.gpfslocalfilesets opts = [] if devices is None: # get all devices from all filesystems if self.gpfslocalfilesystems is None: self.list_filesystems() devices = self.gpfslocalfilesystems.keys() else: if isinstance(devices, str): devices = [devices] if filesetnames is not None: if isinstance(filesetnames, str): filesetnames = [filesetnames] filesetnamestxt = ','.join(filesetnames) opts.append(filesetnamestxt) self.log.debug("Looking up filesets for devices %s", devices) listm = Monoid([], lambda xs, ys: xs + ys) info = MonoidDict(listm) for device in devices: opts_ = copy.deepcopy(opts) opts_.insert(1, device) res = self._executeY('mmlsfileset', opts_) # for v3.5 # filesystemName:filesetName:id:rootInode:status:path:parentId:created:inodes:dataInKB:comment: # filesetMode:afmTarget:afmState:afmMode:afmFileLookupRefreshInterval:afmFileOpenRefreshInterval: # afmDirLookupRefreshInterval:afmDirOpenRefreshInterval:afmAsyncDelay:reserved:afmExpirationTimeout:afmRPO: # afmLastPSnapId:inodeSpace:isInodeSpaceOwner:maxInodes:allocInodes:inodeSpaceMask:afmShowHomeSnapshots: # afmNumReadThreads:afmNumReadGWs:afmReadBufferSize:afmWriteBufferSize:afmReadSparseThreshold: # afmParallelReadChunkSize:afmParallelReadThreshold:snapId: self.log.debug("list_filesets res keys = %s ", res.keys()) for (key, value) in res.items(): info[key] = value datakeys = list(info.keys()) datakeys.remove('filesystemName') datakeys.remove('id') fss = nub(info.get('filesystemName', [])) res = dict([(fs, {}) for fs in fss]) # build structure for idx, (fs, qid) in enumerate(zip(info['filesystemName'], info['id'])): details = dict([(k, info[k][idx]) for k in datakeys]) res[fs][qid] = details self.gpfslocalfilesets = res return res
def list_quota(self, devices=None): """get quota info for all filesystems for all USR,GRP,FILESET set self.gpfslocalquota to dict: key = deviceName, value is dict with key quotaType (USR | GRP | FILESET) value is dict with key = id, value dict with key = remaining header entries and corresponding values as a NamedTuple - GPFS 3.5 has the following fields in the output lines of mmrepquota (colon separated) - filesystemName - quotaType - id - name - blockUsage - blockQuota - blockLimit - blockInDoubt - blockGrace - filesUsage - filesQuota - filesLimit - filesInDoubt - filesGrace - remarks - quota - defQuota - fid - filesetname - GPFS 3.5 also is able to list multiple e.g., USR lines in different filesets. """ if devices is None: devices = self.list_filesystems().keys() elif isinstance(devices, str): devices = [devices] listm = Monoid([], lambda xs, ys: xs + ys) # not exactly the fastest mappend for lists ... info = MonoidDict(listm) for device in devices: res = self._executeY('mmrepquota', ['-n', device], prefix=True) for (key, value) in res.items(): info[key] = value datakeys = info.keys() datakeys.remove('filesystemName') datakeys.remove('quotaType') datakeys.remove('id') fss = nub(info.get('filesystemName', [])) self.log.debug("Found the following filesystem names: %s" % (fss)) quotatypes = nub(info.get('quotaType', [])) quotatypesstruct = dict([(qt, MonoidDict(Monoid([], lambda xs, ys: xs + ys))) for qt in quotatypes]) res = dict([(fs, copy.deepcopy(quotatypesstruct)) for fs in fss]) # build structure for idx, (fs, qt, qid) in enumerate(zip(info['filesystemName'], info['quotaType'], info['id'])): details = dict([(k, info[k][idx]) for k in datakeys]) if qt == 'FILESET': # GPFS fileset quota have empty filesetName field details['filesetname'] = details['name'] res[fs][qt][qid] = [GpfsQuota(**details)] self.gpfslocalquotas = res return res
def list_quota(self, devices=None): """get quota info for all filesystems for all USR,GRP,FILESET set self.gpfslocalquota to dict: key = deviceName, value is dict with key quotaType (USR | GRP | FILESET) value is dict with key = id, value dict with key = remaining header entries and corresponding values as a NamedTuple - GPFS 3.5 has the following fields in the output lines of mmrepquota (colon separated) - filesystemName - quotaType - id - name - blockUsage - blockQuota - blockLimit - blockInDoubt - blockGrace - filesUsage - filesQuota - filesLimit - filesInDoubt - filesGrace - remarks - quota - defQuota - fid - filesetname - GPFS 3.5 also is able to list multiple e.g., USR lines in different filesets. """ if devices is None: devices = self.list_filesystems().keys() elif isinstance(devices, str): devices = [devices] listm = Monoid([], lambda xs, ys: xs + ys ) # not exactly the fastest mappend for lists ... info = MonoidDict(listm) for device in devices: res = self._executeY('mmrepquota', ['-n', device], prefix=True) for (key, value) in res.items(): info[key] = value datakeys = list(info.keys()) datakeys.remove('filesystemName') datakeys.remove('quotaType') datakeys.remove('id') fss = nub(info.get('filesystemName', [])) self.log.debug("Found the following filesystem names: %s", fss) quotatypes = nub(info.get('quotaType', [])) quotatypesstruct = dict([(qt, MonoidDict(Monoid([], lambda xs, ys: xs + ys))) for qt in quotatypes]) res = dict([(fs, copy.deepcopy(quotatypesstruct)) for fs in fss]) # build structure for idx, (fs, qt, qid) in enumerate( zip(info['filesystemName'], info['quotaType'], info['id'])): details = dict([(k, info[k][idx]) for k in datakeys]) if qt == 'FILESET': # GPFS fileset quota have empty filesetName field details['filesetname'] = details['name'] res[fs][qt][qid] = [GpfsQuota(**details)] self.gpfslocalquotas = res return res