def test_map_uids_to_names(self, mock_getpwall): """ Check that the remapping functions properly """ uids = [(1, 2, 3), (4, 5, 6), (7, 8, 9)] mock_getpwall.return_value = uids res = tools.map_uids_to_names() self.assertEqual(res, {3: 1, 6: 4, 9: 7})
def main(): """Main script""" options = { 'nagios-check-interval-threshold': NAGIOS_CHECK_INTERVAL_THRESHOLD, 'storage': ('the VSC filesystems that are checked by this script', None, 'extend', []), 'write-cache': ('Write the data into the cache files in the FS', None, 'store_true', False), 'account_page_url': ('Base URL of the account page', None, 'store', 'https://account.vscentrum.be/django'), 'access_token': ('OAuth2 token to access the account page REST API', None, 'store', None), 'host_institute': ('Name of the institute where this script is being run', str, 'store', GENT), } opts = ExtendedSimpleOption(options) logger = opts.log try: client = AccountpageClient(token=opts.options.access_token) user_id_map = map_uids_to_names() # is this really necessary? gpfs = GpfsOperations() storage = VscStorage() target_filesystems = [ storage[s].filesystem for s in opts.options.storage ] filesystems = gpfs.list_filesystems(device=target_filesystems).keys() logger.debug("Found the following GPFS filesystems: %s" % (filesystems)) filesets = gpfs.list_filesets(devices=target_filesystems) logger.debug("Found the following GPFS filesets: %s" % (filesets)) quota = gpfs.list_quota(devices=target_filesystems) exceeding_filesets = {} exceeding_users = {} stats = {} for storage_name in opts.options.storage: logger.info("Processing quota for storage_name %s" % (storage_name)) filesystem = storage[storage_name].filesystem replication_factor = storage[storage_name].data_replication_factor if filesystem not in filesystems: logger.error("Non-existent filesystem %s" % (filesystem)) continue if filesystem not in quota.keys(): logger.error("No quota defined for storage_name %s [%s]" % (storage_name, filesystem)) continue quota_storage_map = get_mmrepquota_maps( quota[filesystem], storage_name, filesystem, filesets, replication_factor, ) exceeding_filesets[storage_name] = process_fileset_quota( storage, gpfs, storage_name, filesystem, quota_storage_map['FILESET'], client, dry_run=opts.options.dry_run, institute=opts.options.host_institute) exceeding_users[storage_name] = process_user_quota( storage, gpfs, storage_name, None, quota_storage_map['USR'], user_id_map, client, dry_run=opts.options.dry_run, institute=opts.options.host_institute) stats["%s_fileset_critical" % (storage_name, )] = QUOTA_FILESETS_CRITICAL if exceeding_filesets[storage_name]: stats["%s_fileset" % (storage_name, )] = 1 logger.warning( "storage_name %s found %d filesets that are exceeding their quota", storage_name, len(exceeding_filesets)) for (e_fileset, e_quota) in exceeding_filesets[storage_name]: logger.warning("%s has quota %s" % (e_fileset, str(e_quota))) else: stats["%s_fileset" % (storage_name, )] = 0 logger.debug( "storage_name %s found no filesets that are exceeding their quota" % storage_name) stats["%s_users_warning" % (storage_name, )] = QUOTA_USERS_WARNING stats["%s_users_critical" % (storage_name, )] = QUOTA_USERS_CRITICAL if exceeding_users[storage_name]: stats["%s_users" % (storage_name, )] = len( exceeding_users[storage_name]) logger.warning( "storage_name %s found %d users who are exceeding their quota" % (storage_name, len(exceeding_users[storage_name]))) for (e_user_id, e_quota) in exceeding_users[storage_name]: logger.warning("%s has quota %s" % (e_user_id, str(e_quota))) else: stats["%s_users" % (storage_name, )] = 0 logger.debug( "storage_name %s found no users who are exceeding their quota" % storage_name) except Exception as err: logger.exception("critical exception caught: %s" % (err)) opts.critical("Script failed in a horrible way") opts.epilogue("quota check completed", stats)