def main():
    """Main script."""

    options = {
        'nagios-check-interval-threshold': NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'mail-report': ('mail a report to the hpc-admin list with job list for gracing or inactive users',
                        None, 'store_true', False),
        'access_token': ('OAuth2 token to access the account page REST API', None, 'store', None),
        'account_page_url': ('URL of the account page where we can find the REST API', None, 'store', None)
    }
    opts = ExtendedSimpleOption(options)

    try:
        now = datetime.datetime.utcnow()
        timestamp = now - datetime.timedelta(days=1)
        client = AccountpageClient(token=opts.options.access_token, url=opts.options.account_page_url + "/api/")
        active_users, inactive_users = client.get_accounts()


        grace_users = []
        for a in active_users:
            try:
                if a.expiry_date and datetime.datetime.strptime(a.expiry_date, "%Y-%m-%d") - now < datetime.timedelta(days=7):
                    grace_users.append(a)
            except AttributeError as err:
                logger.debug("Account %s does not have expiry date", a.vsc_id)


        pbs_query = PBSQuery()

        t = time.ctime()
        jobs = pbs_query.getjobs()  # we just get them all

        removed_queued = remove_queued_jobs(jobs, grace_users, inactive_users)
        removed_running = remove_running_jobs(jobs, inactive_users)

        if opts.options.mail_report and not opts.options.dry_run:
            if len(removed_queued) > 0 or len(removed_running) > 0:
                mail_report(t, removed_queued, removed_running)
    except Exception, err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)
def main():
    """
    Main script. Sets the hard limit for either user or VO.
    """
    options = {
        'storage': ("The storage system's name", None, 'store', None),
        'fileset': ("The fileset where you want to adjust the quota", None, 'store', None),
        'user': ('process users', None, 'store', None),
        'vo': ('process vos', None, 'store', None),
        'size': ('the target quota (in KiB)', int, 'store', None),
        'original': ('show the original quota values', None, 'store_true', False),
        'access_token': ('OAuth2 token to access the account page REST API', None, 'store', None),
    }

    opts = SimpleOption(options)
    client = AccountpageClient(token=opts.options.access_token)

    fileset = opts.options.fileset
    storage = opts.options.storage

    if not opts.options.size:
        print "size "
        sys.exit()

    # TODO: could use some love in allowing a unit to be appended and converting to KiB prior to uploading
    size = opts.options.size

    if opts.options.user:
        # quota/user/%(vsc_id)s/storage/%(storage)s/fileset/%(fileset)s/size/$
        vsc_id = opts.options.user
        original = client.account[vsc_id].quota
        upload = client.quota.user[vsc_id].storage[storage].fileset[fileset].size

    if opts.options.vo:

        vsc_id = opts.options.vo
        original = client.vo[vsc_id].quota
        upload = client.quota.vo[vsc_id].storage[storage].fileset[fileset].size

    current = original.get()
    if current[0] in (200,):
        if opts.options.original:
            print("Original values: %s" % (current[1],))
    else:
        print("Error, could not get original quota values for the given parameters")
        print("Issue: %s" % (current[1],))
        sys.exit(-1)

    result = upload.put(body={"hard": size})
    if result[0] in (200,):
        print("Request OK.")
        print("New values: %s" % (result[1],))
    else:
        print("Request failed")
        print("Issue: %s" % (result[1],))
Ejemplo n.º 3
0
def main():
    """
    Main script.
    - build the filter
    - fetches the users
    - process the users
    - write the new timestamp if everything went OK
    - write the nagios check file
    """

    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'storage': ('storage systems on which to deploy users and vos', None,
                    'extend', []),
        'user': ('process users', None, 'store_true', False),
        'vo': ('process vos', None, 'store_true', False),
        'access_token': ('OAuth2 token to access the account page REST API',
                         None, 'store', None),
        'account_page_url':
        ('URL of the account page where we can find the REST API', None,
         'store', None),
        'host_institute':
        ('Name of the institute where this script is being run', str, 'store',
         GENT),
    }

    opts = ExtendedSimpleOption(options)
    stats = {}

    try:
        now = datetime.utcnow()
        client = AccountpageClient(token=opts.options.access_token,
                                   url=opts.options.account_page_url + "/api/")

        try:
            last_timestamp = read_timestamp(SYNC_TIMESTAMP_FILENAME)
        except Exception:
            logger.exception("Something broke reading the timestamp from %s" %
                             SYNC_TIMESTAMP_FILENAME)
            last_timestamp = "200901010000Z"

        logger.info("Last recorded timestamp was %s" % (last_timestamp))
        last_timestamp = convert_to_unix_timestamp(last_timestamp)

        (users_ok, users_fail) = ([], [])
        (quota_ok, quota_fail) = ([], [])
        if opts.options.user:
            ugent_changed_accounts = client.account.institute['gent'].modified[
                last_timestamp].get()[1]

            logger.info(
                "Found %d UGent accounts that have changed in the accountpage since %s"
                % (len(ugent_changed_accounts), last_timestamp))

            ugent_accounts = [u['vsc_id'] for u in ugent_changed_accounts]
            ugent_accounts = nub(ugent_accounts)

            for storage_name in opts.options.storage:
                (users_ok,
                 users_fail) = process_users(opts.options, ugent_accounts,
                                             storage_name, client,
                                             opts.options.host_institute)
                stats["%s_users_sync" % (storage_name, )] = len(users_ok)
                stats["%s_users_sync_fail" %
                      (storage_name, )] = len(users_fail)
                stats["%s_users_sync_fail_warning" %
                      (storage_name, )] = STORAGE_USERS_LIMIT_WARNING
                stats["%s_users_sync_fail_critical" %
                      (storage_name, )] = STORAGE_USERS_LIMIT_CRITICAL

            for storage_name in opts.options.storage:
                storage_changed_quota = [
                    mkVscUserSizeQuota(q) for q in client.quota.user.
                    storage[storage_name].modified[last_timestamp].get()[1]
                ]
                storage_changed_quota = [
                    q for q in storage_changed_quota
                    if q.fileset.startswith('vsc')
                ]
                logger.info(
                    "Found %d accounts that have changed quota on storage %s in the accountpage since %s",
                    len(storage_changed_quota), storage_name, last_timestamp)
                (quota_ok, quota_fail) = process_users_quota(
                    opts.options, storage_changed_quota, storage_name, client,
                    opts.options.host_institute)
                stats["%s_quota_sync" % (storage_name, )] = len(quota_ok)
                stats["%s_quota_sync_fail" %
                      (storage_name, )] = len(quota_fail)
                stats["%s_quota_sync_fail_warning" %
                      (storage_name, )] = STORAGE_QUOTA_LIMIT_WARNING
                stats["%s_quota_sync_fail_critical" %
                      (storage_name, )] = STORAGE_QUOTA_LIMIT_CRITICAL

        (vos_ok, vos_fail) = ([], [])
        if opts.options.vo:
            ugent_changed_vos = client.vo.modified[last_timestamp].get()[1]
            ugent_changed_vo_quota = client.quota.vo.modified[
                last_timestamp].get()[1]

            ugent_vos = sorted(
                set([v['vsc_id'] for v in ugent_changed_vos] + [
                    v['virtual_organisation'] for v in ugent_changed_vo_quota
                ]))

            logger.info(
                "Found %d UGent VOs that have changed in the accountpage since %s"
                % (len(ugent_changed_vos), last_timestamp))
            logger.info(
                "Found %d UGent VOs that have changed quota in the accountpage since %s"
                % (len(ugent_changed_vo_quota), last_timestamp))
            logger.debug(
                "Found the following UGent VOs: {vos}".format(vos=ugent_vos))

            for storage_name in opts.options.storage:
                (vos_ok, vos_fail) = process_vos(opts.options, ugent_vos,
                                                 storage_name, client,
                                                 last_timestamp,
                                                 opts.options.host_institute)
                stats["%s_vos_sync" % (storage_name, )] = len(vos_ok)
                stats["%s_vos_sync_fail" % (storage_name, )] = len(vos_fail)
                stats["%s_vos_sync_fail_warning" %
                      (storage_name, )] = STORAGE_VO_LIMIT_WARNING
                stats["%s_vos_sync_fail_critical" %
                      (storage_name, )] = STORAGE_VO_LIMIT_CRITICAL

        if not (users_fail or quota_fail or vos_fail):
            (_, ldap_timestamp) = convert_timestamp(now)
            if not opts.options.dry_run:
                write_timestamp(SYNC_TIMESTAMP_FILENAME, ldap_timestamp)
    except Exception as err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)

    opts.epilogue("UGent users and VOs synchronised", stats)
Ejemplo n.º 4
0
def main():
    """
    Main script. The usual.
    """

    options = {
        "nagios-check-interval-threshold":
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        "access_token": ("OAuth2 token to access the account page REST API",
                         None, "store", None),
        "account_page_url": (
            "URL of the account page where we can find the REST API",
            str,
            "store",
            "https://apivsc.ugent.be/django",
        ),
        'host_institute':
        ('Name of the institute where this script is being run', str, 'store',
         GENT),
        "clusters": (
            "Cluster(s) (comma-separated) to sync for. "
            "Overrides <host_institute>_SLURM_COMPUTE_CLUSTERS that are in production.",
            "strlist",
            "store",
            [],
        ),
        'start_timestamp':
        ('Timestamp to start the sync from', str, 'store', None),
        'cluster_classes':
        ('Classes of clusters that should be synced, comma-separated',
         "strlist", 'store', [PRODUCTION, PILOT])
    }

    opts = ExtendedSimpleOption(options)
    stats = {}

    (last_timestamp, start_time) = retrieve_timestamp_with_default(
        SYNC_TIMESTAMP_FILENAME, start_timestamp=opts.options.start_timestamp)
    logging.info("Using timestamp %s", last_timestamp)
    logging.info("Using startime %s", start_time)

    try:
        client = AccountpageClient(token=opts.options.access_token,
                                   url=opts.options.account_page_url + "/api/")
        host_institute = opts.options.host_institute

        slurm_account_info = get_slurm_acct_info(SyncTypes.accounts)
        slurm_user_info = get_slurm_acct_info(SyncTypes.users)

        logging.debug("%d accounts found", len(slurm_account_info))
        logging.debug("%d users found", len(slurm_user_info))

        if opts.options.clusters:
            clusters = opts.options.clusters
        else:
            clusters = [
                cs for p in opts.options.cluster_classes
                for cs in VSC_SLURM_CLUSTERS[host_institute][p]
            ]
        sacctmgr_commands = []

        # All users belong to a VO, so fetching the VOs is necessary/
        account_page_vos = [
            mkVo(v)
            for v in client.vo.institute[opts.options.host_institute].get()[1]
        ]

        # make sure the institutes and the default accounts (VOs) are there for each cluster
        institute_vos = dict([
            (v.vsc_id, v) for v in account_page_vos
            if v.vsc_id in INSTITUTE_VOS_BY_INSTITUTE[host_institute].values()
        ])
        sacctmgr_commands += slurm_institute_accounts(slurm_account_info,
                                                      clusters, host_institute,
                                                      institute_vos)

        # The VOs do not track active state of users, so we need to fetch all accounts as well
        active_accounts = set(
            [a["vsc_id"] for a in client.account.get()[1] if a["isactive"]])

        # dictionary mapping the VO vsc_id on a tuple with the VO members and the VO itself
        account_page_members = dict([(vo.vsc_id, (set(vo.members), vo))
                                     for vo in account_page_vos])

        # process all regular VOs
        sacctmgr_commands += slurm_vo_accounts(account_page_vos,
                                               slurm_account_info, clusters,
                                               host_institute)

        # process VO members
        sacctmgr_commands += slurm_user_accounts(account_page_members,
                                                 active_accounts,
                                                 slurm_user_info, clusters,
                                                 opts.options.dry_run)

        logging.info("Executing %d commands", len(sacctmgr_commands))

        if opts.options.dry_run:
            print("Commands to be executed:\n")
            print("\n".join([" ".join(c) for c in sacctmgr_commands]))
        else:
            execute_commands(sacctmgr_commands)

        if not opts.options.dry_run:
            (_, ldap_timestamp) = convert_timestamp(start_time)
            write_timestamp(SYNC_TIMESTAMP_FILENAME, ldap_timestamp)
            opts.epilogue("Accounts synced to slurm", stats)
        else:
            logging.info("Dry run done")

    except Exception as err:
        logging.exception("critical exception caught: %s", err)
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)
def main():

    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'start-timestamp':
        ("The timestamp form which to start, otherwise use the cached value",
         None, "store", None),
        'access_token':
        ('OAuth2 token identifying the user with the accountpage', None,
         'store', None),
        'account_page_url': ('url for the account page', None, 'store', None),
        'start_timestamp':
        ('Timestamp to start the sync from', str, 'store', None),
    }
    # get access_token from conf file
    ExtendedSimpleOption.CONFIGFILES_INIT = ['/etc/account_page.conf']
    opts = ExtendedSimpleOption(options)
    stats = {}

    # Creating this here because this is a singleton class
    _ = LdapQuery(VscConfiguration(VSC_CONF_DEFAULT_FILENAME))

    (last_timestamp, start_time) = retrieve_timestamp_with_default(
        SYNC_TIMESTAMP_FILENAME, start_timestamp=opts.options.start_timestamp)
    logging.info("Using timestamp %s", last_timestamp)
    logging.info("Using startime %s", start_time)

    try:
        parent_pid = os.fork()
        logging.info("Forked.")
    except OSError:
        logging.exception("Could not fork")
        parent_pid = 1
    except Exception:
        logging.exception("Oops")
        parent_pid = 1

    if parent_pid == 0:
        try:
            global logger
            logger = fancylogger.getLogger(NAGIOS_HEADER)
            # drop privileges in the child
            try:
                apache_uid = pwd.getpwnam('apache').pw_uid
                apache_gid = grp.getgrnam('apache').gr_gid

                os.setgroups([])
                os.setgid(apache_gid)
                os.setuid(apache_uid)

                logging.info("Now running as %s" % (os.geteuid(), ))
            except OSError:
                logger.raiseException("Could not drop privileges")

            client = AccountpageClient(token=opts.options.access_token,
                                       url=opts.options.account_page_url +
                                       '/api/')
            syncer = LdapSyncer(client)
            last = last_timestamp
            altered_accounts = syncer.sync_altered_accounts(
                last, opts.options.dry_run)

            logging.debug("Altered accounts: %s", altered_accounts)

            altered_groups = syncer.sync_altered_groups(
                last, opts.options.dry_run)

            logging.debug("Altered groups: %s" % altered_groups)

            if not altered_accounts[ERROR] \
                    and not altered_groups[ERROR]:
                logging.info("Child process exiting correctly")
                sys.exit(0)
            else:
                logging.info("Child process exiting with status -1")
                logging.warning("Error occured in %s" % ([
                    "%s: %s\n" % (k, v) for (k, v) in [
                        ("altered accounts", altered_accounts[ERROR]),
                        ("altered groups", altered_groups[ERROR]),
                    ]
                ]))
                sys.exit(-1)
        except Exception:
            logging.exception("Child caught an exception")
            sys.exit(-1)

    else:
        # parent
        (_, result) = os.waitpid(parent_pid, 0)
        logging.info("Child exited with exit code %d" % (result, ))

        if not result and not opts.options.dry_run:
            (_, ldap_timestamp) = convert_timestamp(start_time)
            write_timestamp(SYNC_TIMESTAMP_FILENAME, ldap_timestamp)
            opts.epilogue("Synchronised LDAP users to the Django DB", stats)
        else:
            sys.exit(NAGIOS_EXIT_CRITICAL)
def main():

    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'start-timestamp':
        ("The timestamp form which to start, otherwise use the cached value",
         None, "store", None),
        'access_token':
        ('OAuth2 token identifying the user with the accountpage', None,
         'store', None),
        'account_page_url': ('url for the account page', None, 'store', None),
    }
    # get access_token from conf file
    ExtendedSimpleOption.CONFIGFILES_INIT = ['/etc/account_page.conf']
    opts = ExtendedSimpleOption(options)
    stats = {}

    # Creating this here because this is a singleton class
    _ = LdapQuery(VscConfiguration(VSC_CONF_DEFAULT_FILENAME))

    last_timestamp = opts.options.start_timestamp
    if not last_timestamp:
        try:
            last_timestamp = read_timestamp(SYNC_TIMESTAMP_FILENAME)
        except Exception:
            _log.warning("Something broke reading the timestamp from %s",
                         SYNC_TIMESTAMP_FILENAME)
            last_timestamp = "201710230000Z"
            _log.warning(
                "We will resync from a hardcoded know working sync a while back : %s",
                last_timestamp)

    _log.info("Using timestamp %s", last_timestamp)
    # record starttime before starting, and take a 10 sec safety buffer so we don't get gaps where users are approved
    # in between the requesting of modified users and writing out the start time
    start_time = datetime.datetime.now() + datetime.timedelta(seconds=-10)
    _log.info("startime %s", start_time)

    try:
        parent_pid = os.fork()
        _log.info("Forked.")
    except OSError:
        _log.exception("Could not fork")
        parent_pid = 1
    except Exception:
        _log.exception("Oops")
        parent_pid = 1

    if parent_pid == 0:
        try:
            global _log
            _log = fancylogger.getLogger(NAGIOS_HEADER)
            # drop privileges in the child
            try:
                apache_uid = pwd.getpwnam('apache').pw_uid
                apache_gid = grp.getgrnam('apache').gr_gid

                os.setgroups([])
                os.setgid(apache_gid)
                os.setuid(apache_uid)

                _log.info("Now running as %s" % (os.geteuid(), ))
            except OSError:
                _log.raiseException("Could not drop privileges")

            client = AccountpageClient(token=opts.options.access_token,
                                       url=opts.options.account_page_url +
                                       '/api/')
            syncer = LdapSyncer(client)
            last = int(
                (datetime.datetime.strptime(last_timestamp, "%Y%m%d%H%M%SZ") -
                 datetime.datetime(1970, 1, 1)).total_seconds())
            altered_accounts = syncer.sync_altered_accounts(
                last, opts.options.dry_run)

            _log.debug("Altered accounts: %s", altered_accounts)

            altered_groups = syncer.sync_altered_groups(
                last, opts.options.dry_run)

            _log.debug("Altered groups: %s" % altered_groups)

            if not altered_accounts[ERROR] \
                    and not altered_groups[ERROR]:
                _log.info("Child process exiting correctly")
                sys.exit(0)
            else:
                _log.info("Child process exiting with status -1")
                _log.warning("Error occured in %s" % ([
                    "%s: %s\n" % (k, v) for (k, v) in [
                        ("altered accounts", altered_accounts[ERROR]),
                        ("altered groups", altered_groups[ERROR]),
                    ]
                ]))
                sys.exit(-1)
        except Exception:
            _log.exception("Child caught an exception")
            sys.exit(-1)

    else:
        # parent
        (_, result) = os.waitpid(parent_pid, 0)
        _log.info("Child exited with exit code %d" % (result, ))

        if not result:
            if not opts.options.start_timestamp:
                (_, ldap_timestamp) = convert_timestamp(start_time)
                if not opts.options.dry_run:
                    write_timestamp(SYNC_TIMESTAMP_FILENAME, ldap_timestamp)
            else:
                _log.info(
                    "Not updating the timestamp, since one was provided on the command line"
                )
            opts.epilogue("Synchronised LDAP users to the Django DB", stats)
        else:
            _log.info(
                "Not updating the timestamp, since it was given on the command line for this run"
            )
            sys.exit(NAGIOS_EXIT_CRITICAL)
Ejemplo n.º 7
0
def main():
    """
    Main script.
    - build the filter
    - fetches the users
    - process the users
    - write the new timestamp if everything went OK
    - write the nagios check file
    """

    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'storage': ('storage systems on which to deploy users and vos', None,
                    'extend', []),
        'user': ('process users', None, 'store_true', False),
        'vo': ('process vos', None, 'store_true', False),
        'access_token': ('OAuth2 token to access the account page REST API',
                         None, 'store', None),
        'account_page_url':
        ('URL of the account page where we can find the REST API', None,
         'store', None),
        'host_institute':
        ('Name of the institute where this script is being run', str, 'store',
         GENT),
        'start_timestamp':
        ('Timestamp to start the sync from', str, 'store', None),
    }

    opts = ExtendedSimpleOption(options)
    stats = {}

    (last_timestamp, start_time) = retrieve_timestamp_with_default(
        SYNC_TIMESTAMP_FILENAME, start_timestamp=opts.options.start_timestamp)
    logging.info("Using timestamp %s", last_timestamp)
    logging.info("Using startime %s", start_time)

    try:
        client = AccountpageClient(token=opts.options.access_token,
                                   url=opts.options.account_page_url + "/api/")

        institute = opts.options.host_institute

        (users_ok, users_fail) = ([], [])
        (quota_ok, quota_fail) = ([], [])
        if opts.options.user:
            changed_accounts = client.account.institute[institute].modified[
                last_timestamp].get()[1]

            logging.info(
                "Found %d %s accounts that have changed in the accountpage since %s"
                % (len(changed_accounts), institute, last_timestamp))

            accounts = nub([u['vsc_id'] for u in changed_accounts])

            for storage_name in opts.options.storage:
                (users_ok, users_fail) = process_users(opts.options, accounts,
                                                       storage_name, client,
                                                       institute)
                stats["%s_users_sync" % (storage_name, )] = len(users_ok)
                stats["%s_users_sync_fail" %
                      (storage_name, )] = len(users_fail)
                stats["%s_users_sync_fail_warning" %
                      (storage_name, )] = STORAGE_USERS_LIMIT_WARNING
                stats["%s_users_sync_fail_critical" %
                      (storage_name, )] = STORAGE_USERS_LIMIT_CRITICAL

            for storage_name in opts.options.storage:
                storage_changed_quota = [
                    mkVscUserSizeQuota(q) for q in client.quota.user.
                    storage[storage_name].modified[last_timestamp].get()[1]
                ]
                storage_changed_quota = [
                    q for q in storage_changed_quota
                    if q.fileset.startswith('vsc')
                ]
                logging.info(
                    "Found %d accounts that have changed quota on storage %s in the accountpage since %s",
                    len(storage_changed_quota), storage_name, last_timestamp)
                (quota_ok, quota_fail) = process_users_quota(
                    opts.options, storage_changed_quota, storage_name, client,
                    institute)
                stats["%s_quota_sync" % (storage_name, )] = len(quota_ok)
                stats["%s_quota_sync_fail" %
                      (storage_name, )] = len(quota_fail)
                stats["%s_quota_sync_fail_warning" %
                      (storage_name, )] = STORAGE_QUOTA_LIMIT_WARNING
                stats["%s_quota_sync_fail_critical" %
                      (storage_name, )] = STORAGE_QUOTA_LIMIT_CRITICAL

        (vos_ok, vos_fail) = ([], [])
        if opts.options.vo:
            changed_vos = client.vo.institute[institute].modified[
                last_timestamp].get()[1]
            changed_vo_quota = client.quota.vo.modified[last_timestamp].get(
            )[1]

            vos = sorted(
                set([v['vsc_id'] for v in changed_vos] +
                    [v['virtual_organisation'] for v in changed_vo_quota]))

            logging.info(
                "Found %d %s VOs that have changed in the accountpage since %s"
                % (len(changed_vos), institute, last_timestamp))
            logging.info(
                "Found %d %s VOs that have changed quota in the accountpage since %s"
                % (len(changed_vo_quota), institute, last_timestamp))
            logging.debug("Found the following {institute} VOs: {vos}".format(
                institute=institute, vos=vos))

            for storage_name in opts.options.storage:
                (vos_ok, vos_fail) = process_vos(opts.options, vos,
                                                 storage_name, client,
                                                 last_timestamp, institute)
                stats["%s_vos_sync" % (storage_name, )] = len(vos_ok)
                stats["%s_vos_sync_fail" % (storage_name, )] = len(vos_fail)
                stats["%s_vos_sync_fail_warning" %
                      (storage_name, )] = STORAGE_VO_LIMIT_WARNING
                stats["%s_vos_sync_fail_critical" %
                      (storage_name, )] = STORAGE_VO_LIMIT_CRITICAL

        if not (users_fail or quota_fail
                or vos_fail) and not opts.options.dry_run:
            (_, ldap_timestamp) = convert_timestamp(start_time)
            write_timestamp(SYNC_TIMESTAMP_FILENAME, ldap_timestamp)
    except Exception as err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)

    opts.epilogue("%s users and VOs synchronised" % institute, stats)
Ejemplo n.º 8
0
def main():
    """Main script"""

    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'storage': ('the VSC filesystems that are checked by this script',
                    None, 'extend', []),
        'write-cache': ('Write the data into the cache files in the FS', None,
                        'store_true', False),
        'account_page_url': ('Base URL of the account page', None, 'store',
                             'https://account.vscentrum.be/django'),
        'access_token': ('OAuth2 token to access the account page REST API',
                         None, 'store', None),
        'host_institute':
        ('Name of the institute where this script is being run', str, 'store',
         GENT),
    }
    opts = ExtendedSimpleOption(options)
    logger = opts.log

    try:
        client = AccountpageClient(token=opts.options.access_token)

        user_id_map = map_uids_to_names()  # is this really necessary?
        gpfs = GpfsOperations()
        storage = VscStorage()

        target_filesystems = [
            storage[s].filesystem for s in opts.options.storage
        ]

        filesystems = gpfs.list_filesystems(device=target_filesystems).keys()
        logger.debug("Found the following GPFS filesystems: %s" %
                     (filesystems))

        filesets = gpfs.list_filesets(devices=target_filesystems)
        logger.debug("Found the following GPFS filesets: %s" % (filesets))

        quota = gpfs.list_quota(devices=target_filesystems)
        exceeding_filesets = {}
        exceeding_users = {}
        stats = {}

        for storage_name in opts.options.storage:

            logger.info("Processing quota for storage_name %s" %
                        (storage_name))
            filesystem = storage[storage_name].filesystem
            replication_factor = storage[storage_name].data_replication_factor

            if filesystem not in filesystems:
                logger.error("Non-existent filesystem %s" % (filesystem))
                continue

            if filesystem not in quota.keys():
                logger.error("No quota defined for storage_name %s [%s]" %
                             (storage_name, filesystem))
                continue

            quota_storage_map = get_mmrepquota_maps(
                quota[filesystem],
                storage_name,
                filesystem,
                filesets,
                replication_factor,
            )

            exceeding_filesets[storage_name] = process_fileset_quota(
                storage,
                gpfs,
                storage_name,
                filesystem,
                quota_storage_map['FILESET'],
                client,
                dry_run=opts.options.dry_run,
                institute=opts.options.host_institute)

            exceeding_users[storage_name] = process_user_quota(
                storage,
                gpfs,
                storage_name,
                None,
                quota_storage_map['USR'],
                user_id_map,
                client,
                dry_run=opts.options.dry_run,
                institute=opts.options.host_institute)

            stats["%s_fileset_critical" %
                  (storage_name, )] = QUOTA_FILESETS_CRITICAL
            if exceeding_filesets[storage_name]:
                stats["%s_fileset" % (storage_name, )] = 1
                logger.warning(
                    "storage_name %s found %d filesets that are exceeding their quota",
                    storage_name, len(exceeding_filesets))
                for (e_fileset, e_quota) in exceeding_filesets[storage_name]:
                    logger.warning("%s has quota %s" %
                                   (e_fileset, str(e_quota)))
            else:
                stats["%s_fileset" % (storage_name, )] = 0
                logger.debug(
                    "storage_name %s found no filesets that are exceeding their quota"
                    % storage_name)

            stats["%s_users_warning" % (storage_name, )] = QUOTA_USERS_WARNING
            stats["%s_users_critical" %
                  (storage_name, )] = QUOTA_USERS_CRITICAL
            if exceeding_users[storage_name]:
                stats["%s_users" % (storage_name, )] = len(
                    exceeding_users[storage_name])
                logger.warning(
                    "storage_name %s found %d users who are exceeding their quota"
                    % (storage_name, len(exceeding_users[storage_name])))
                for (e_user_id, e_quota) in exceeding_users[storage_name]:
                    logger.warning("%s has quota %s" %
                                   (e_user_id, str(e_quota)))
            else:
                stats["%s_users" % (storage_name, )] = 0
                logger.debug(
                    "storage_name %s found no users who are exceeding their quota"
                    % storage_name)

    except Exception as err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")

    opts.epilogue("quota check completed", stats)
Ejemplo n.º 9
0
def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'hosts':
        ('the hosts/clusters that should be contacted for job information',
         None, 'extend', []),
        'location': ('the location for storing the pickle file: delcatty, muk',
                     str, 'store', 'delcatty'),
        'access_token':
        ('the token that will allow authentication against the account page',
         None, 'store', None),
        'account_page_url': ('', None, 'store', None),
        'target_master':
        ('the master used to execute showq commands', None, 'store', None),
        'target_user':
        ('the user for ssh to the target master', None, 'store', None),
    }

    opts = ExtendedSimpleOption(options)

    try:
        rest_client = AccountpageClient(token=opts.options.access_token)

        gpfs = GpfsOperations()
        storage = VscStorage()
        storage_name = cluster_user_pickle_store_map[opts.options.location]
        login_mount_point = storage[storage_name].login_mount_point
        gpfs_mount_point = storage[storage_name].gpfs_mount_point

        clusters = {}
        for host in opts.options.hosts:
            master = opts.configfile_parser.get(host, "master")
            checkjob_path = opts.configfile_parser.get(host, "checkjob_path")
            clusters[host] = {'master': master, 'path': checkjob_path}

        checkjob = SshCheckjob(opts.options.target_master,
                               opts.options.target_user,
                               clusters,
                               cache_pickle=True,
                               dry_run=opts.options.dry_run)

        (job_information, _, _) = checkjob.get_moab_command_information()

        active_users = job_information.keys()

        logger.debug("Active users: %s" % (active_users))
        logger.debug("Checkjob information: %s" % (job_information))

        nagios_user_count = 0
        nagios_no_store = 0

        stats = {}

        for user in active_users:
            path = get_pickle_path(opts.options.location, user, rest_client)
            try:
                user_queue_information = CheckjobInfo(
                    {user: job_information[user]})
                store_on_gpfs(user, path, "checkjob", user_queue_information,
                              gpfs, login_mount_point, gpfs_mount_point,
                              ".checkjob.json.gz", opts.options.dry_run)
                nagios_user_count += 1
            except Exception:
                logger.exception("Could not store cache file for user %s" %
                                 (user))
                nagios_no_store += 1
        stats["store_users"] = nagios_user_count
        stats["store_fail"] = nagios_no_store
        stats["store_fail_critical"] = STORE_LIMIT_CRITICAL
    except Exception, err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)
Ejemplo n.º 10
0
def main():
    """
    Main script. The usual.
    """

    options = {
        "nagios-check-interval-threshold":
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        "access_token": ("OAuth2 token to access the account page REST API",
                         None, "store", None),
        "account_page_url": (
            "URL of the account page where we can find the REST API",
            str,
            "store",
            "https://apivsc.ugent.be/django",
        ),
        "clusters": (
            "Cluster(s) (comma-separated) to sync for. "
            "Overrides GENT_SLURM_COMPUTE_CLUSTERS that are in production.",
            str,
            "store",
            None,
        ),
    }

    opts = ExtendedSimpleOption(options)
    stats = {}

    try:
        client = AccountpageClient(token=opts.options.access_token,
                                   url=opts.options.account_page_url + "/api/")

        last_timestamp = "201804010000Z"  # the beginning of time

        logging.info("Last recorded timestamp was %s" % (last_timestamp))

        slurm_account_info = get_slurm_acct_info(SyncTypes.accounts)
        slurm_user_info = get_slurm_acct_info(SyncTypes.users)

        logging.debug("%d accounts found", len(slurm_account_info))
        logging.debug("%d users found", len(slurm_user_info))

        if opts.options.clusters is not None:
            clusters = opts.options.clusters.split(",")
        else:
            clusters = [
                c for c in GENT_SLURM_COMPUTE_CLUSTERS
                if c in GENT_PRODUCTION_COMPUTE_CLUSTERS
            ]

        sacctmgr_commands = []

        # make sure the institutes and the default accounts (VOs) are there for each cluster
        sacctmgr_commands += slurm_institute_accounts(slurm_account_info,
                                                      clusters)

        # All users belong to a VO, so fetching the VOs is necessary/
        account_page_vos = [mkVo(v) for v in client.vo.get()[1]]

        # The VOs do not track active state of users, so we need to fetch all accounts as well
        active_accounts = set(
            [a["vsc_id"] for a in client.account.get()[1] if a["isactive"]])

        # dictionary mapping the VO vsc_id on a tuple with the VO members and the VO itself
        account_page_members = dict([(vo.vsc_id, (set(vo.members), vo))
                                     for vo in account_page_vos])

        # process all regular VOs
        sacctmgr_commands += slurm_vo_accounts(account_page_vos,
                                               slurm_account_info, clusters)

        # process VO members
        sacctmgr_commands += slurm_user_accounts(account_page_members,
                                                 active_accounts,
                                                 slurm_user_info, clusters,
                                                 opts.options.dry_run)

        logging.info("Executing %d commands", len(sacctmgr_commands))

        if opts.options.dry_run:
            print("Commands to be executed:\n")
            print("\n".join([" ".join(c) for c in sacctmgr_commands]))
        else:
            execute_commands(sacctmgr_commands)

    except Exception as err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)

    if not opts.options.dry_run:
        opts.epilogue("Accounts synced to slurm", stats)
    else:
        logger.info("Dry run done")
Ejemplo n.º 11
0
def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        'nagios-check-interval-threshold':
        NAGIOS_CHECK_INTERVAL_THRESHOLD,
        'hosts':
        ('the hosts/clusters that should be contacted for job information',
         None, 'extend', []),
        'information': ('the sort of information to store: user, vo, project',
                        None, 'store', 'user'),
        'location': ('the location for storing the pickle file: delcatty, muk',
                     str, 'store', 'delcatty'),
        'account_page_url':
        ('the URL at which the account page resides', None, 'store', None),
        'access_token':
        ('the token that will allow authentication against the account page',
         None, 'store', None),
        'target_master':
        ('the master used to execute showq commands', None, 'store', None),
        'target_user': ('the user for ssh to the target master', None, 'store',
                        None),
    }

    opts = ExtendedSimpleOption(options)

    try:
        rest_client = AccountpageClient(token=opts.options.access_token)

        gpfs = GpfsOperations()
        storage = VscStorage()
        storage_name = cluster_user_pickle_store_map[opts.options.location]
        login_mount_point = storage[storage_name].login_mount_point
        gpfs_mount_point = storage[storage_name].gpfs_mount_point

        clusters = {}
        for host in opts.options.hosts:
            master = opts.configfile_parser.get(host, "master")
            showq_path = opts.configfile_parser.get(host, "showq_path")
            clusters[host] = {'master': master, 'path': showq_path}

        logger.debug("clusters = %s" % (clusters, ))
        showq = SshShowq(opts.options.target_master,
                         opts.options.target_user,
                         clusters,
                         cache_pickle=True,
                         dry_run=opts.options.dry_run)

        logger.debug("Getting showq information ...")

        (queue_information, _, _) = showq.get_moab_command_information()
        timeinfo = time.time()

        active_users = queue_information.keys()

        logger.debug("Active users: %s" % (active_users))
        logger.debug("Queue information: %s" % (queue_information))

        # We need to determine which users should get an updated pickle. This depends on
        # - the active user set
        # - the information we want to provide on the cluster(set) where this script runs
        # At the same time, we need to determine the job information each user gets to see
        tup = (opts.options.information, active_users, queue_information,
               rest_client)
        (target_users, target_queue_information,
         user_map) = determine_target_information(*tup)

        nagios_user_count = 0
        nagios_no_store = 0

        stats = {}

        for user in target_users:
            try:
                path = get_pickle_path(opts.options.location, user,
                                       rest_client)
                user_queue_information = target_queue_information[user]
                user_queue_information['timeinfo'] = timeinfo
                store_on_gpfs(user, path, "showq",
                              (user_queue_information, user_map[user]), gpfs,
                              login_mount_point, gpfs_mount_point,
                              ".showq.json.gz", opts.options.dry_run)
                nagios_user_count += 1
            except Exception:
                logger.error("Could not store pickle file for user %s" %
                             (user))
                nagios_no_store += 1

        stats["store_users"] = nagios_user_count
        stats["store_fail"] = nagios_no_store
        stats["store_fail_critical"] = STORE_LIMIT_CRITICAL
    except Exception, err:
        logger.exception("critical exception caught: %s" % (err))
        opts.critical("Script failed in a horrible way")
        sys.exit(NAGIOS_EXIT_CRITICAL)
Ejemplo n.º 12
0
def get_vsc_record(username, vsc_token, logger=None):
    """
    Retrieve and update list of VSC users with data from VSC account page
    - username: (string) VSC ID or institute user of the VSC account
    - vsc_token: (string) access token to VSC account page
    - logger: (object) fancylogger object of the caller
    """
    if logger is None:
        logger = fancylogger.getLogger()

    vsc_api_client = AccountpageClient(token=vsc_token)

    # Get institute login of the VSC account attached to this username
    if username[0:3] == 'vsc' and username[3].isdigit():
        # VSC ID: query institute login to VSC account page
        logger.debug(f"[{username}] user treated as VSC ID")
        try:
            vsc_account = vsc_api_client.account[username].person.get()[1]
        except HTTPError as err:
            if err.code == 404:
                error_exit(
                    logger,
                    f"[{username}] VSC ID not found in VSC account page")
            else:
                error_exit(logger, f"[{username}] {err}")
        except (TimeoutError, URLError) as err:
            error_exit(
                logger,
                f"[{username}] connection to VSC account page timed out")
        else:
            vsc_login = {
                'username': vsc_account['institute_login'],
                'site': vsc_account['institute']['name']
            }
            logger.debug(
                f"[{username}] VSC ID belongs to VSC account '{vsc_login['username']}'"
            )
    else:
        # Others: assume NetID from Brussels
        logger.debug(f"[{username}] user treated as NetID")
        vsc_login = {'username': username, 'site': BRUSSEL}

    # Retrieve user data from VSC account page
    try:
        vsc_account = vsc_api_client.account.institute[vsc_login['site']].id[
            vsc_login['username']].get()[1]
    except HTTPError as err:
        if err.code == 404:
            logger.debug(
                f"[{username}] with VSC account '{vsc_login['username']}' not found"
            )
            return None
        else:
            error_exit(logger, f"[{username}] {err}")
    except (TimeoutError, URLError) as err:
        error_exit(logger,
                   f"[{username}] connection to VSC account page timed out")
    else:
        logger.debug(
            f"[{username}] user account record retrieved from VSC account '{vsc_login['username']}'"
        )

        user_record = {
            # only use first entry of research field
            'field': vsc_account['research_field'][0],
            'site':
            INSTITUTE_LONGNAME[vsc_account['person']['institute']['name']],
            'updated': date.today().isoformat(),
        }

        return user_record