Exemplo n.º 1
0
def main():
    """ Builds a zookeeper tree with ACLS on from a config file"""
    options = {
        'servers':('list of zk servers', 'strlist', 'store', None)
    }
    go = simple_option(options)

    rpasswd, rpath = get_rootinfo(go.configfile_remainder)
    znodes, users = parse_zkconfig(go.configfile_remainder)

    logger.debug("znodes: %s" % znodes)
    logger.debug("users: %s" % users)

    # Connect to zookeeper
    # initial authentication credentials and acl for admin on root level
    acreds = [('digest', 'root:' + rpasswd)]
    root_acl = make_digest_acl('root', rpasswd, all=True)

    # Create kazoo/zookeeper connection with root credentials
    servers = go.options.servers
    zkclient = VscKazooClient(servers, auth_data=acreds)

    # Iterate paths
    for path, attrs in znodes.iteritems():
        logger.debug("path %s attribs %s" % (path, attrs))
        acls = dict((arg, attrs[arg]) for arg in attrs if arg not in ('value', 'ephemeral', 'sequence', 'makepath'))
        acl_list = parse_acls(acls, users, root_acl)
        kwargs = dict((arg, attrs[arg]) for arg in attrs if arg in ('ephemeral', 'sequence', 'makepath'))
        if not zkclient.exists_znode(path):
            zkclient.make_znode(path, value=attrs.get('value', ''), acl=acl_list, **kwargs)
        else:
            logger.warning('node %s already exists' % path)
            zkclient.znode_acls(path, acl_list)

    zkclient.exit()
Exemplo n.º 2
0
def main():

    options = {
        'storage': ('the VSC filesystems that are checked by this script', 'strlist', 'store', []),
        'threshold': ('allowed the time difference between the cached quota and the time of running', None, 'store',
                      DEFAULT_ALLOWED_TIME_THRESHOLD),
        'fileset_prefixes': ('the filesets that we allow for showing QuotaUser', 'strlist', 'store', []),
        'vo': ('provide storage details for the VO you belong to', None, 'store_true', False)
    }
    opts = simple_option(options, config_files=['/etc/quota_information.conf'])

    storage = VscStorage()
    vsc = VSC(False)
    user_name = getpwuid(os.getuid())[0]

    vos = [g.gr_name for g in grp.getgrall()
                     if user_name in g.gr_mem
                     and g.gr_name.startswith('gvo')
                     and g.gr_name != vsc.default_vo]  # default VO has no quota associated with it

    opts.options.vo = opts.options.vo and vos

    now = time.time()

    print_user_quota(opts, storage, user_name, now)

    if opts.options.vo:
        print_vo_quota(opts, storage, vos, now)
Exemplo n.º 3
0
def main():
    opts = {
        'github-account': ("GitHub account where repository is located", None, 'store', 'hpcugent', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None, 'store', 'boegel', 'u'),
        'repository': ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
    }
    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    pickle_file = None
    if go.args:
        pickle_file = go.args[0]

    prs = fetch_pr_data(pickle_file, go.options.github_user, go.options.github_account, go.options.repository)

    html_file = HTML_FILE % go.options.repository
    print("Generating %s..." % html_file)
    handle = open(html_file, 'w')
    handle.write(HTML_HEADER)
    handle.write(gen_table_header())
    pr_cnt, table_rows, merged_today, last_update = gen_table_rows(prs)
    handle.write(table_rows)
    handle.write(HTML_FOOTER % {
        'merged_today': merged_today,
        'pr_cnt': pr_cnt,
        'repo': '%s/%s' % (go.options.github_account, go.options.repository),
        'timestamp': last_update, #datetime.now().strftime(format='%d %B %Y %H:%M:%S'),
    })
    handle.close()
Exemplo n.º 4
0
def main():
    """
    Main script.
    """

    options = {
        "jobid": ("The PBS_JOBID of the job for which we want information", None, "store", None),
        "information": (
            "Comma-separated list of the job info to print. " "Entries of the format input_key:output_key",
            None,
            "store",
            None,
        ),
    }
    opts = simple_option(options)

    if not opts.options.jobid:
        logger.error("jobid is a required option. Bailing.")
        sys.exit(1)

    pquery = PBSQuery()
    current_job = pquery.getjob(opts.options.jobid)

    s = transform_info(current_job, opts.options.information)

    print "\n".join(s)
Exemplo n.º 5
0
def main():

    options = {
        'storage': ('the VSC filesystems that are checked by this script', None, 'extend', []),
        'threshold': ('allowed the time difference between the cached quota and the time of running', None, 'store',
                      DEFAULT_ALLOWED_TIME_THRESHOLD),
    }
    opts = simple_option(options, config_files='/etc/quota_information.conf')

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]
    now = time.time()

    for storage_name in opts.options.storage:

        mount_point = storage[storage_name].login_mount_point
        path_template = storage.path_templates[storage_name]['user']
        path = os.path.join(mount_point, path_template[0], path_template(user_name))

        cache = FileCache(path)
        (timestamp, quota) = cache.load('quota')

        if now - timestamp > opts.options.threshold:
            print "%s: WARNING: no recent quota information (age of data is %d minutes)" % (storage_name,

                                                                                               (now-timestamp)/60)
        else:
            for (fileset, qi) in quota.quota_map.items():
            print "%s: used %d MiB (%d%%) quota %d MiB in fileset %d" % (storage_name,
                                                           quota)


if __name__ == '__main__':
    main()
Exemplo n.º 6
0
def main():
    """ Start a new rsync client (destination or source) in a specified session """
    options = {
        # Zookeeper connection options:
        'servers'     : ('list of zk servers', 'strlist', 'store', None),
        'user'        : ('user with creation rights on zookeeper', None, 'store', 'root', 'u'),
        'passwd'      : ('password for user with creation rights', None, 'store', 'admin', 'p'),
        # Role options, define exactly one of these:
        'source'      : ('rsync source', None, 'store_true', False, 'S'),
        'destination' : ('rsync destination', None, 'store_true', False, 'D'),
        'pathsonly'   : ('Only do a test run of the pathlist building', None, 'store_true', False),
        'state'       : ('Only do the state', None, 'store_true', False),
        # Session options; should be the same on all clients of the session!
        'session'     : ('session name', None, 'store', 'default', 'N'),
        'netcat'      : ('run netcat test instead of rsync', None, 'store_true', False),
        'dryrun'      : ('run rsync in dry run mode', None, 'store_true', False, 'n'),
        'rsyncpath'   : ('rsync basepath', None, 'store', None, 'r'),  # May differ between sources and dests
        # Pathbuilding (Source clients and pathsonly ) specific options:
        'excludere'   : ('Exclude from pathbuilding', None, 'regex', re.compile('/\.snapshots(/.*|$)')),
        'depth'       : ('queue depth', "int", 'store', 3),
        # Source clients options; should be the same on all clients of the session!:
        'delete'      : ('run rsync with --delete', None, 'store_true', False),
        # Individual client options
        'daemon'      : ('daemonize client', None, 'store_true', False),
        'domain'      : ('substitute domain', None, 'store', None),
        'logfile'     : ('Output to logfile', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.log'),
        'pidfile'     : ('Pidfile template', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.pid'),
        # Individual Destination client specific options
        'rsyncport'   : ('force port on which rsyncd binds', "int", 'store', None),
        'startport'   : ('offset to look for rsyncd ports', "int", 'store', 4444)
    }

    go = simple_option(options)
    acreds, admin_acl, rstype = zkrsync_parse(go.options)
    if go.options.logfile:
        init_logging(go.options.logfile, go.options.session, rstype)

    kwargs = {
        'session'     : go.options.session,
        'default_acl' : [admin_acl],
        'auth_data'   : acreds,
        'rsyncpath'   : go.options.rsyncpath,
        'netcat'      : go.options.netcat,
        }

    if go.options.daemon:
        pidfile = init_pidfile(go.options.pidfile, go.options.session, rstype)
        zkrsdaemon = ZkrsDaemon(pidfile, rstype, go.options, kwargs)
        zkrsdaemon.start()
    else:
        start_zkrs(rstype, go.options, kwargs)
def main(args):
    """Main script."""

    options = {
        'nagios': ('print out nagion information', None, 'store_true', False, 'n'),
        'nagios_check_filename': ('filename of where the nagios check data is stored', str, 'store', NAGIOS_CHECK_FILENAME),
        'nagios_check_interval_threshold': ('threshold of nagios checks timing out', None, 'store', NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'mail-report': ('mail a report to the hpc-admin list with job list for gracing or inactive users',
                        None, 'store_true', False),
        'ha': ('high-availability master IP address', None, 'store', None),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }
    opts = simple_option(options)

    nagios_reporter = NagiosReporter(NAGIOS_HEADER, NAGIOS_CHECK_FILENAME, NAGIOS_CHECK_INTERVAL_THRESHOLD)

    if opts.options.nagios:
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter(NAGIOS_EXIT_WARNING,
                        NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    try:
        vsc_config = VscConfiguration()
        LdapQuery(vsc_config)

        grace_users = get_user_with_status('grace')
        inactive_users = get_user_with_status('inactive')

        pbs_query = PBSQuery()

        t = time.ctime()
        jobs = pbs_query.getjobs()  # we just get them all

        removed_queued = remove_queued_jobs(jobs, grace_users, inactive_users, opts.options.dry_run)
        removed_running = remove_running_jobs(jobs, inactive_users, opts.options.dry_run)

        if opts.options.mail_report and not opts.options.dry_run:
            if len(removed_queued) > 0 or len(removed_running) > 0:
                mail_report(t, removed_queued, removed_running)
    except Exception, err:
        logger.exception("Something went wrong: {err}".format(err=err))
        nagios_reporter.cache(NAGIOS_EXIT_CRITICAL,
                              NagiosResult("Script failed, check log file ({logfile})".format(logfile=PBS_CHECK_LOG_FILE)))
        sys.exit(NAGIOS_EXIT_CRITICAL)
Exemplo n.º 8
0
def main():
    """Yeah, so, erm. The main function and such."""

    options = {
        "summary": ("Give the summary", None, "store_true", False, "s"),
        "detail": ("Detailed information", None, "store_true", False),
        "virtualorganisation": ("Give VO details if available", None, "store_true", False, "v"),
        "running": ("Display running job information", None, "store_true", False, "r"),
        "idle": ("Display idle job information", None, "store_true", False, "i"),
        "blocked": ("Dispay blocked job information", None, "store_true", False, "b"),
        "hosts": ("Hosts/clusters to check", None, "extend", []),
        "location_environment": (
            "the location for storing the pickle file depending on the cluster",
            str,
            "store",
            "VSC_HOME",
        ),
    }

    opts = simple_option(options, config_files=["/etc/myshowq.conf"])

    if not (opts.options.running or opts.options.idle or opts.options.blocked):
        opts.options.running = True
        opts.options.idle = True
        opts.options.blocked = True

    my_uid = os.geteuid()
    my_name = pwd.getpwuid(my_uid)[0]

    (res, user_map) = readbuffer(
        my_name,
        opts.options.virtualorganisation,
        opts.options.running,
        opts.options.idle,
        opts.options.blocked,
        opts.options.location_environment,
    )

    if not res or len(res) == 0:
        print "no data"
        sys.exit(0)

    if opts.options.summary:
        showsummary(opts.options.hosts, res, user_map, my_name, opts.options.virtualorganisation)
    if opts.options.detail:
        showdetail(opts.options.hosts, res, user_map, my_name, opts.options.virtualorganisation)
Exemplo n.º 9
0
def main():

    options = {
        'jobid': ('Fully qualified identification of the job', None, 'store', None),
        'location_environment': ('the location for storing the pickle file depending on the cluster', str, 'store', 'VSC_SCRATCH_DELCATTY'),
    }
    opts = simple_option(options, config_files=['/etc/mycheckjob.conf'])

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]

    mount_point = storage[opts.options.location_environment].login_mount_point
    path_template = storage.path_templates[opts.options.location_environment]['user']
    path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".checkjob.json.gz")

    checkjob_info = read_cache(path)

    print checkjob_info.display(opts.options.jobid)
Exemplo n.º 10
0
def main():

    opts = {
        'github-account': ("GitHub account where repository is located", None, 'store', 'hpcugent', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None, 'store', 'boegel', 'u'),
        'repository': ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
    }
    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    github_token = fetch_github_token(go.options.github_user)
    github = RestClient(GITHUB_API_URL, username=go.options.github_user, token=github_token, user_agent='eb-pr-overview')

    downloading_msg = "Downloading PR data for %s/%s repo..." % (go.options.github_account, go.options.repository)
    print(downloading_msg)

    prs_data = fetch_prs_data(github, go.options.github_account, go.options.repository, downloading_msg)
    gh_repo = github.repos[go.options.github_account][go.options.repository]
    create_pr_overview(prs_data, gh_repo)
Exemplo n.º 11
0
def main():
    """Yeah, so, erm. The main function and such."""

    options = {
        "summary": ("Give the summary", None, "store_true", False, 's'),
        "detail": ("Detailed information", None, "store_true", False,),
        "virtualorganisation": ("Give VO details if available", None, "store_true", False, 'v'),
        "running": ("Display running job information", None, "store_true", False, 'r'),
        "idle": ("Display idle job information", None, "store_true", False, 'i'),
        "blocked": ("Dispay blocked job information", None, "store_true", False, 'b'),
        'hosts': ("Hosts/clusters to check", None, 'extend', []),
        'location_environment': ('the location for storing the pickle file depending on the cluster', str, 'store', 'VSC_SCRATCH_DELCATTY'),
    }

    opts = simple_option(options, config_files=['/etc/myshowq.conf'])

    if not (opts.options.running or opts.options.idle or opts.options.blocked):
        opts.options.running = True
        opts.options.idle = True
        opts.options.blocked = True

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]
    now = time.time()

    mount_point = storage[opts.options.location_environment].login_mount_point
    path_template = storage.path_templates[opts.options.location_environment]['user']
    path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".showq.json.gz")

    (res, user_map) = read_cache(user_name,
                                 opts.options.virtualorganisation,
                                 opts.options.running,
                                 opts.options.idle,
                                 opts.options.blocked,
                                 path)

    if not res or len(res) == 0:
        print "no data"
        sys.exit(0)

    if opts.options.summary:
        showsummary(opts.options.hosts, res, user_map, user_name, opts.options.virtualorganisation)
    if opts.options.detail:
        showdetail(opts.options.hosts, res, user_map, user_name, opts.options.virtualorganisation)
Exemplo n.º 12
0
def main():
    opts = {
        "github-account": ("GitHub account where repository is located", None, "store", "hpcugent", "a"),
        "github-user": ("GitHub user to use (for authenticated access)", None, "store", "boegel", "u"),
        "repository": ("Repository to use", None, "store", "easybuild-easyconfigs", "r"),
    }
    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    pickle_file = None
    if go.args:
        pickle_file = go.args[0]

    prs = fetch_pr_data(pickle_file, go.options.github_user, go.options.github_account, go.options.repository)

    created_ats = [datetime_parser(pr["created_at"].split("T")[0]) for pr in prs]
    closed_ats = [datetime_parser((pr["closed_at"] or "T").split("T")[0] or "ENDNEXTMONTH") for pr in prs]

    print("Plotting...")
    plot_historic_PR_ages(created_ats, closed_ats, go.options.repository)
    plot_open_closed_PRs(created_ats, closed_ats, go.options.repository)
Exemplo n.º 13
0
def main():

    options = {
        'jobid': ('Fully qualified identification of the job', None, 'store', None),
        'location_environment': ('the location for storing the pickle file depending on the cluster', str, 'store', 'VSC_HOME'),
    }

    opts = simple_option(options, config_files=['/etc/mycheckjob.conf'])

    my_uid = os.geteuid()
    my_name = pwd.getpwuid(my_uid)[0]

    path = checkjob_data_location(my_name, opts.options.location_environment)
    (timeinfo, checkjob) = read_checkjob_data(path)

    age = time.time() - timeinfo

    if age > MAXIMAL_AGE:
        print "Job information is older than %d minutes (%f hours). Information may not be relevant any longer" % (age / 60, age / 60.0 / 60.0)

    print checkjob.display(opts.options.jobid)
Exemplo n.º 14
0
def main():
    """Main function"""
    options = {
        'nagios_check_filename': ('filename of where the nagios check data is stored', str, 'store', NAGIOS_CHECK_FILENAME),
        'nagios_check_interval_threshold': ('threshold of nagios checks timing out', None, 'store', NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'hosts': ('the hosts/clusters that should be contacted for job information', None, 'extend', []),
        'location': ('the location for storing the pickle file: gengar, muk', str, 'store', 'gengar'),
        'ha': ('high-availability master IP address', None, 'store', None),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }

    opts = simple_option(options)

    nag = SimpleNagios(_cache=NAGIOS_CHECK_FILENAME)

    if opts.options.ha and not proceed_on_ha_service(opts.options.ha):
        _log.info("Not running on the target host in the HA setup. Stopping.")
        nag.ok("Not running on the HA master.")
    else:
        # parse config file
        clusters = {}
        for host in opts.options.hosts:
            master = opts.configfile_parser.get(host, "master")
            showq_path = opts.configfile_parser.get(host, "showq_path")
            mjobctl_path = opts.configfile_parser.get(host, "mjobctl_path")
            clusters[host] = {
                'master': master,
                'spath': showq_path,
                'mpath': mjobctl_path,
            }

        # process the new and previous data
        released_jobids, stats = process_hold(clusters, dry_run=opts.options.dry_run)

        # nagios state
        stats.update(RELEASEJOB_LIMITS)
        stats['message'] = "released %s jobs in hold" % len(released_jobids)
        nag._eval_and_exit(**stats)

    _log.info("Cached nagios state: %s %s" % (nag._final_state[0][1], nag._final_state[1]))
Exemplo n.º 15
0
def main():

    options = {
        "jobid": ("Fully qualified identification of the job", None, "store", None),
        "location_environment": (
            "the location for storing the pickle file depending on the cluster",
            str,
            "store",
            "VSC_SCRATCH_DELCATTY",
        ),
    }
    opts = simple_option(options, config_files=["/etc/mycheckjob.conf"])

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]

    mount_point = storage[opts.options.location_environment].login_mount_point
    path_template = storage.path_templates[opts.options.location_environment]["user"]
    path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".checkjob.json.gz")

    checkjob_info = read_cache(path)

    print checkjob_info.display(opts.options.jobid)
Exemplo n.º 16
0
def main():
    """
    Main script.
    """

    options = {
        "jobid": ("The PBS_JOBID of the job for which we want information",
                  None, "store", None),
        "information":
        ("Comma-separated list of the job info to print. "
         "Entries of the format input_key:output_key", None, "store", None),
    }
    opts = simple_option(options)

    if not opts.options.jobid:
        logger.error("jobid is a required option. Bailing.")
        sys.exit(1)

    pquery = PBSQuery()
    current_job = pquery.getjob(opts.options.jobid)

    s = transform_info(current_job, opts.options.information)

    print "\n".join(s)
Exemplo n.º 17
0
# the License, or (at your option) any later version.
#
# vsc-base is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with vsc-base. If not, see <http://www.gnu.org/licenses/>.
#
"""
An example how simple_option can do its magic.

Run it with -h and/or -H to see the help functions.

To see it do something, try
python examples/simple_option.py --info -L itjustworks

@author: Stijn De Weirdt (Ghent University)
"""
from vsc.utils.generaloption import simple_option

# dict = {longopt:(help_description,type,action,default_value,shortopt),}
options = {'long1':('1st long option', None, 'store', 'excellent', 'L')}

go = simple_option(options)

go.log.info("1st option %s" % go.options.long1)
go.log.debug("DEBUG 1st option %s" % go.options.long1)

Exemplo n.º 18
0
def main():
    """the main function"""
    fancylogger.logToScreen(enable=True, stdout=True)
    fancylogger.setLogLevelInfo()

    options = {
        'github-user': ('Your github username to use', None, 'store', None, 'g'),
        'closed-pr': ('Delete all gists from closed pull-requests', None, 'store_true', True, 'p'),
        'all': ('Delete all gists from Easybuild ', None, 'store_true', False, 'a'),
        'orphans': ('Delete all gists without a pull-request', None, 'store_true', False, 'o'),
    }

    go = simple_option(options)
    log = go.log

    if not (go.options.all or go.options.closed_pr or go.options.orphans):
        log.error("Please tell me what to do?")

    if go.options.github_user is None:
        eb_go = EasyBuildOptions(envvar_prefix='EASYBUILD', go_args=[])
        username = eb_go.options.github_user
        log.debug("Fetch github username from easybuild, found: %s", username)
    else:
        username = go.options.github_user

    if username is None:
        log.error("Could not find a github username")
    else:
        log.info("Using username = %s", username)

    token = fetch_github_token(username)

    gh = RestClient(GITHUB_API_URL, username=username, token=token)
    # ToDo: add support for pagination
    status, gists = gh.gists.get(per_page=100)

    if status != HTTP_STATUS_OK:
        log.error("Failed to get a lists of gists for user %s: error code %s, message = %s",
                  username, status, gists)
    else:
        log.info("Found %s gists", len(gists))

    regex = re.compile(r"(EasyBuild test report|EasyBuild log for failed build).*?(?:PR #(?P<PR>[0-9]+))?\)?$")

    pr_cache = {}
    num_deleted = 0

    for gist in gists:
        if not gist["description"]:
            continue
        re_pr_num = regex.search(gist["description"])
        delete_gist = False

        if re_pr_num:
            log.debug("Found a Easybuild gist (id=%s)", gist["id"])
            pr_num = re_pr_num.group("PR")
            if go.options.all:
                delete_gist = True
            elif pr_num and go.options.closed_pr:
                log.debug("Found Easybuild test report for PR #%s", pr_num)

                if pr_num not in pr_cache:
                    status, pr = gh.repos[GITHUB_EB_MAIN][GITHUB_EASYCONFIGS_REPO].pulls[pr_num].get()
                    if status != HTTP_STATUS_OK:
                        log.error("Failed to get pull-request #%s: error code %s, message = %s",
                                  pr_num, status, pr)
                    pr_cache[pr_num] = pr["state"]

                if pr_cache[pr_num] == "closed":
                    log.debug("Found report from closed PR #%s (id=%s)", pr_num, gist["id"])
                    delete_gist = True

            elif not pr_num and go.options.orphans:
                log.debug("Found Easybuild test report without PR (id=%s)", gist["id"])
                delete_gist = True

        if delete_gist:
            status, del_gist = gh.gists[gist["id"]].delete()

            if status != HTTP_DELETE_OK:
                log.error("Unable to remove gist (id=%s): error code %s, message = %s",
                          gist["id"], status, del_gist)
            else:
                log.info("Delete gist with id=%s", gist["id"])
                num_deleted += 1

    log.info("Deleted %s gists", num_deleted)
Exemplo n.º 19
0
def main():
    """Main script"""

    options = {
        'nagios': ('print out nagios information', None, 'store_true', False, 'n'),
        'nagios-check-filename': ('filename of where the nagios check data is stored', str, 'store', NAGIOS_CHECK_FILENAME),
        'nagios-check-interval-threshold': ('threshold of nagios checks timing out', None, 'store', NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'storage': ('the VSC filesystems that are checked by this script', None, 'extend', []),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }
    opts = simple_option(options)

    logger.info('started GPFS quota check run.')

    nagios_reporter = NagiosReporter(NAGIOS_HEADER,
                                     opts.options.nagios_check_filename,
                                     opts.options.nagios_check_interval_threshold)

    if opts.options.nagios:
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    lockfile = TimestampedPidLockfile(QUOTA_CHECK_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    try:
        user_id_map = map_uids_to_names() # is this really necessary?
        LdapQuery(VscConfiguration())
        gpfs = GpfsOperations()
        filesystems = gpfs.list_filesystems().keys()
        logger.debug("Found the following GPFS filesystems: %s" % (filesystems))

        filesets = gpfs.list_filesets()
        logger.debug("Found the following GPFS filesets: %s" % (filesets))

        quota = gpfs.list_quota()

        for storage in opts.options.storage:

            logger.info("Processing quota for storage %s" % (storage))
            filesystem = opts.configfile_parser.get(storage, 'filesystem')

            if filesystem not in filesystems:
                logger.error("Non-existant filesystem %s" % (filesystem))
                continue

            if filesystem not in quota.keys():
                logger.error("No quota defined for storage %s [%s]" % (storage, filesystem))
                continue

            quota_storage_map = get_mmrepquota_maps(quota[filesystem], storage,filesystem, filesets)

            exceeding_filesets = process_fileset_quota(gpfs, storage, filesystem, quota_storage_map['FILESET'])
            exceeding_users = process_user_quota(gpfs, storage, filesystem, quota_storage_map['USR'], user_id_map)

            logger.warning("storage %s found %d filesets that are exceeding their quota: %s" % (storage,
                                                                                                len(exceeding_filesets),
                                                                                                exceeding_filesets))
            logger.warning("storage %s found %d users who are exceeding their quota: %s" % (storage,
                                                                                            len(exceeding_users),
                                                                                            exceeding_users))

            notify_exceeding_filesets(gpfs=gpfs,
                                      storage=storage,
                                      filesystem=filesystem,
                                      exceeding_items=exceeding_filesets,
                                      dry_run=opts.options.dry_run)
            notify_exceeding_users(gpfs=gpfs,
                                   storage=storage,
                                   filesystem=filesystem,
                                   exceeding_items=exceeding_users,
                                   dry_run=opts.options.dry_run)

        sys.exit(1)

    except Exception, err:
        logger.exception("critical exception caught: %s" % (err))
        if not opts.options.dry_run:
            nagios_reporter.cache(NAGIOS_EXIT_CRITICAL, NagiosResult("CRITICAL script failed - %s" % (err.message)))
        if not opts.options.dry_run:
            lockfile.release()
        sys.exit(1)
Exemplo n.º 20
0
def main():
    """The main."""

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        'nagios': ('print out nagios information', None, 'store_true', False, 'n'),
        'nagios-check-filename': ('filename of where the nagios check data is stored',
                                  str,
                                  'store',
                                  NAGIOS_CHECK_FILENAME),
        'nagios-check-interval-threshold': ('threshold of nagios checks timing out',
                                            None,
                                            'store',
                                            NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'location': ('path to store the gzipped files', None, 'store', QUOTA_LOG_ZIP_PATH),
        'ha': ('high-availability master IP address', None, 'store', None),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }

    opts = simple_option(options)

    nagios_reporter = NagiosReporter(NAGIOS_HEADER,
                                     opts.options.nagios_check_filename,
                                     opts.options.nagios_check_interval_threshold)
    if opts.options.nagios:
        logger.debug("Producing Nagios report and exiting.")
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter.cache(NAGIOS_EXIT_WARNING,
                              NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    lockfile = TimestampedPidLockfile(QUOTA_LOG_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    logger.info("starting quota_log run")

    filesystem_error = 0
    filesystem_ok = 0
    error = False

    try:
        gpfs = GpfsOperations()
        quota = gpfs.list_quota()

        for key in quota:
            try:
                filename = "gpfs_quota_%s_%s.gz" % (time.strftime("%Y%m%d-%H:%M"), key)
                path = os.path.join(opts.options.location, filename)
                zipfile = gzip.open(path, 'wb', 9)  # Compress to the max
                zipfile.write(json.dumps(quota[key]))
                zipfile.close()
                filesystem_ok += 1
                logger.info("Stored quota information for FS %s" % (key))
            except Exception, err:
                logger.exception("Failed storing quota information for FS %s" % (key))
                filesystem_error += 1
    except Exception, err:
        logger.exception("Failure obtaining GPFS quota")
        error = True
Exemplo n.º 21
0
def main():

    opts = {
        'dry-run': ("Dry run, don't actually post/push/merge anything", None,
                    'store_true', False, 'x'),
        'force': ("Use force to execute the specified action", None,
                  'store_true', False, 'f'),
        'github-account': ("GitHub account where repository is located", None,
                           'store', 'easybuilders', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None,
                        'store', 'boegel', 'u'),
        'owner':
        ("Owner of the bot account that is used", None, 'store', None),
        'repository':
        ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
        # actions
        'comment':
        ("Post a comment in the pull request", None, 'store', None, 'C'),
        'merge': ("Merge the pull request", None, 'store_true', False, 'M'),
        'review': ("Review the pull request", None, 'store_true', False, 'R'),
        'test':
        ("Submit job to upload test report", None, 'store_or_None', None, 'T'),
        'travis': ("Scan Travis test results, notify of failed tests in PRs",
                   None, 'store_true', False),
    }

    actions = ['comment', 'merge', 'review', 'test', 'travis']

    go = simple_option(go_dict=opts)
    init_build_options()

    # determine which action should be taken
    selected_action = None
    for action in sorted(actions):
        action_value = getattr(go.options, action)
        if isinstance(action_value, bool):
            if action_value:
                selected_action = (action, action_value)
                break
        elif action_value is not None:
            selected_action = (action, action_value)
            break  # FIXME: support multiple actions, loop over them (e.g. -C :jok,lgtm -T)

    if selected_action is None:
        avail_actions = ', '.join(
            ["%s (-%s)" % (a, a[0].upper()) for a in sorted(actions)])
        error("No action specified, pick one: %s" % avail_actions)
    else:
        info("Selected action: %s" % selected_action[0])

    global DRY_RUN
    DRY_RUN = go.options.dry_run
    force = go.options.force
    github_account = go.options.github_account
    github_user = go.options.github_user
    owner = go.options.owner
    repository = go.options.repository

    pr = None
    check_msg = None
    github_token = fetch_github_token(github_user)

    # prepare using GitHub API
    github = RestClient(GITHUB_API_URL,
                        username=github_user,
                        token=github_token,
                        user_agent='eb-pr-check')

    if selected_action[0] == 'travis':
        res = travis(github_account, repository, github_token, owner=owner)
        if res:
            for pr, pr_comment, check_msg in res:
                pr_data = fetch_pr_data(github, github_account, repository, pr)
                comment(github,
                        github_user,
                        repository,
                        pr_data,
                        pr_comment,
                        check_msg=check_msg,
                        verbose=DRY_RUN)
        else:
            print "Found no PRs to notify, all done here!"

    else:
        if len(go.args) == 1:
            pr = go.args[0]
        else:
            usage()

        print "Fetching PR information ",
        print "(using GitHub token for user '%s': %s)... " % (github_user, (
            'no', 'yes')[bool(github_token)]),
        sys.stdout.flush()
        pr_data = fetch_pr_data(github, github_account, repository, pr)
        print ''

        #print_raw_pr_info(pr_data)

        print_pr_summary(pr_data)

        if selected_action[0] == 'comment':
            comment(github,
                    github_user,
                    repository,
                    pr_data,
                    selected_action[1],
                    check_msg=check_msg)
        elif selected_action[0] == 'merge':
            merge(github,
                  github_user,
                  github_account,
                  repository,
                  pr_data,
                  force=force)
        elif selected_action[0] == 'review':
            review(pr_data)
        elif selected_action[0] == 'test':
            test(pr_data, selected_action[1])
        else:
            error("Handling action '%s' not implemented yet" %
                  selected_action[0])
Exemplo n.º 22
0
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with vsc-base. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simple QA script

@author: Stijn De Weirdt (Ghent University)
"""
import os

from vsc.utils.run import run_qa, run_qalog, run_qastdout, run_async_to_stdout
from vsc.utils.generaloption import simple_option

go = simple_option(None)

SCRIPT_DIR = os.path.join(os.path.dirname(__file__), '..', 'test', 'runtests')
SCRIPT_QA = os.path.join(SCRIPT_DIR, 'qa.py')


def test_qa():
    qa_dict = {
        'Simple question:': 'simple answer',
    }
    ec, output = run_qa([SCRIPT_QA, 'simple'], qa=qa_dict)
    return ec, output


def test_qalog():
    qa_dict = {
Exemplo n.º 23
0
def main():
    """Main"""

    options = {
        'nagios': ('Report in nagios format', None, 'store_true', False, 'n'),
        'regex': ('Filter on regexp, data for first match', None, 'regex',
                  None, 'r'),
        'allregex': ('Combined with --regex/-r, return all data', None,
                     'store_true', False, 'A'),
        'anystate':
        ('Matches any state (eg down_on_error node will also list as error)',
         None, 'store_true', False, 'a'),
        'down': ('Down nodes', None, 'store_true', False, 'D'),
        'downonerror': ('Down on error nodes', None, 'store_true', False, 'E'),
        'offline': ('Offline nodes', None, 'store_true', False, 'o'),
        'offline_idle': ('Offline idle nodes', None, 'store_true', False, 'O'),
        'partial':
        ('Partial nodes (one or more running job(s), jobslot(s) available)',
         None, 'store_true', False, 'p'),
        'job-exclusive': ('Job-exclusive nodes (no jobslots available)', None,
                          'store_true', False, 'x'),
        'free': ('Free nodes (0 or more running jobs, jobslot(s) available)',
                 None, 'store_true', False, 'f'),
        'unknown': ('State unknown nodes', None, 'store_true', False, 'u'),
        'bad': ('Bad nodes (broken jobregex)', None, 'store_true', False, 'b'),
        'error': ('Error nodes', None, 'store_true', False, 'e'),
        'idle': ('Idle nodes (No running jobs, jobslot(s) available)', None,
                 'store_true', False, 'i'),
        'singlenodeinfo':
        (('Single (most-frequent) node information in key=value format'
          '(no combination with other options)'), None, 'store_true', False,
         'I'),
        'reportnodeinfo':
        ('Report node information (no combination with other options)', None,
         'store_true', False, 'R'),
        'moab': ('Use moab information (mdiag -n)', None, 'store_true', False,
                 'm'),
        'moabxml': ('Use xml moab data from file (for testing)', None, 'store',
                    None),
        'shorthost': ('Return (short) hostname', None, 'store_true', False,
                      's'),
        'invert': ('Return inverted selection', None, 'store_true', False,
                   'v'),
    }

    go = simple_option(options)

    if go.options.nagios and not go.options.debug:
        fancylogger.logToDevLog(enable=True)
        fancylogger.logToScreen(enable=False)
        fancylogger.setLogLevelInfo()

    all_states = ND_NAGIOS_CRITICAL + ND_NAGIOS_WARNING + ND_NAGIOS_OK
    report_states = []
    if go.options.down:
        report_states.append(ND_down)
    if go.options.downonerror:
        report_states.append(ND_down_on_error)
    if go.options.offline:
        report_states.append(ND_offline)
    if go.options.free:
        report_states.append(ND_free)
    if go.options.partial:
        report_states.append(ND_free_and_job)
    if go.options.job_exclusive:
        report_states.append(ND_job_exclusive)
    if go.options.unknown:
        report_states.append(ND_state_unknown)
    if go.options.bad:
        report_states.append(ND_bad)
    if go.options.error:
        report_states.append(ND_error)
    if go.options.idle:
        report_states.append(ND_idle)
    if go.options.offline_idle:
        report_states.append(ND_offline_idle)

    if len(report_states) == 0:
        report_states = all_states

    if go.options.singlenodeinfo or go.options.reportnodeinfo:
        nodeinfo = collect_nodeinfo()[2]
        if len(nodeinfo) == 0:
            _log.error('No nodeinfo found')
            sys.exit(1)

        ordered = sorted(nodeinfo.items(),
                         key=lambda x: len(x[1]),
                         reverse=True)

        if go.options.singlenodeinfo:
            if len(nodeinfo) > 1:
                msg = "Not all nodes have same parameters. Using most frequent ones."
                if go.options.reportnodeinfo:
                    _log.warning(msg)
                else:
                    _log.error(msg)

            # usage: export `./show_nodes -I` ; env |grep SHOWNODES_
            most_freq = ordered[0][0]
            msg = []
            msg.append("SHOWNODES_PPN=%d" % most_freq[0])
            msg.append("SHOWNODES_PHYSMEMMB=%d" % (most_freq[1] * 1024))
        else:
            msg = []
            for info, nodes in ordered:
                txt = "%d nodes with %d cores, %s MB physmem, %s GB swap and %s GB local disk" % (
                    len(nodes), info[0], info[1] * 1024, info[2], info[3])
                msg.append(txt)
                # print and _log are dumped to stdout at different moment, repeat the txt in the debug log
                _log.debug("Found %s with matching nodes: %s" % (txt, nodes))

        print "\n".join(msg)
        sys.exit(0)

    if go.options.moab:

        if go.options.moabxml:
            try:
                moabxml = open(go.options.moabxml).read()
            except (OSError, IOError):
                _log.error('Failed to read moab xml from %s' %
                           go.options.moabxml)
        else:
            moabxml = None
        nodes_dict = moab_get_nodes_dict(xml=moabxml)

        nodes = get_nodes(nodes_dict)
    else:
        nodes = get_nodes()

    nagiosexit = {
        NDNAG_WARNING: warning_exit,
        NDNAG_CRITICAL: critical_exit,
        NDNAG_OK: ok_exit,
    }

    nagios_res = {}
    detailed_res = {}
    nodes_found = []

    all_nodes = []

    for name, full_state in nodes:
        all_nodes.append(name)

        if go.options.regex and not go.options.regex.search(name):
            continue

        nagios_state = full_state['derived']['nagiosstate']
        if nagios_state not in nagios_res:
            nagios_res[nagios_state] = []

        state = full_state['derived']['state']
        states = full_state['derived']['states']

        if state == ND_free and ND_idle in states:
            state = ND_idle  # special case for idle
        if state == ND_offline and ND_idle in states:
            state = ND_offline_idle
        if state not in detailed_res:
            detailed_res[state] = []

        if go.options.anystate:
            states_to_check = states
        else:
            states_to_check = [state]

        # filter the allowed states
        if any(x for x in states_to_check if x in report_states):
            nagios_res[nagios_state].append(states)
            detailed_res[state].append(states)
            nodes_found.append(name)

            if go.options.regex and not go.options.allregex:
                break

    if go.options.invert:
        nodes_found = [x for x in all_nodes if x not in nodes_found]

    if go.options.regex and not go.options.allregex:
        # there should only be one node
        nagios_state, all_states = nagios_res.items()[0]
        states = all_states[0]
        if go.options.nagios:
            msg = "show_nodes - %s" % ",".join(states)
            nagiosexit[nagios_state](msg)
        else:
            txt = "%s %s" % (nagios_state, ",".join(states))
            print txt
    else:
        if go.options.nagios:
            msg = NagiosResult('show_nodes')
            txt = []
            total = 0
            for state in all_states:
                if state in detailed_res:
                    nr = len(detailed_res[state])
                else:
                    nr = 0
                total += nr
                setattr(msg, state, nr)
            msg.total = total

            reported_state = [str(NDNAG_OK), '']
            if ND_bad in detailed_res:
                reported_state[0] = NDNAG_CRITICAL
                msg.message += ' - %s bad nodes' % (len(detailed_res[ND_bad]))
            nagiosexit[reported_state[0]](msg)
        else:
            # just print the nodes
            if go.options.shorthost:
                nodes_found = [x.split('.')[0] for x in nodes_found]
            print ' '.join(nodes_found)
Exemplo n.º 24
0
def main():
    """ Start a new rsync client (destination or source) in a specified session """
    options = {
        # Zookeeper connection options:
        'servers'     : ('list of zk servers', 'strlist', 'store', None),
        'user'        : ('user with creation rights on zookeeper', None, 'store', 'root', 'u'),
        'passwd'      : ('password for user with creation rights', None, 'store', 'admin', 'p'),
        # Role options, define exactly one of these:
        'source'      : ('rsync source', None, 'store_true', False, 'S'),
        'destination' : ('rsync destination', None, 'store_true', False, 'D'),
        'pathsonly'   : ('Only do a test run of the pathlist building', None, 'store_true', False),
        'state'       : ('Only do the state', None, 'store_true', False),
        # Session options; should be the same on all clients of the session!
        'session'     : ('session name', None, 'store', 'default', 'N'),
        'netcat'      : ('run netcat test instead of rsync', None, 'store_true', False),
        'dryrun'      : ('run rsync in dry run mode', None, 'store_true', False, 'n'),
        'rsyncpath'   : ('rsync basepath', None, 'store', None, 'r'),  # May differ between sources and dests
        # Pathbuilding (Source clients and pathsonly ) specific options:
        'excludere'   : ('Exclude from pathbuilding', None, 'regex', re.compile('/\.snapshots(/.*|$)')),
        'depth'       : ('queue depth', "int", 'store', 4),
        # Source clients options; should be the same on all clients of the session!:
        'delete'      : ('run rsync with --delete', None, 'store_true', False),
        # Individual client options
        'domain'      : ('substitute domain', None, 'store', None),
        'logfile'     : ('Output to logfile', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.log'),
        # Individual Destination client specific options
        'rsyncport'   : ('force port on which rsyncd binds', "int", 'store', None),
        'startport'   : ('offset to look for rsyncd ports', "int", 'store', 4444)
    }

    go = simple_option(options)
    acreds, admin_acl, rstype = zkrsync_parse(go.options)

    if go.options.logfile:
        logfile = go.options.logfile % {
            'session': go.options.session,
            'rstype': rstype,
            'pid': str(os.getpid())
        }
        logdir = os.path.dirname(logfile)
        if logdir:
            if not os.path.exists(logdir):
                os.makedirs(logdir)
            os.chmod(logdir, stat.S_IRWXU)

        fancylogger.logToFile(logfile)
        logger.debug('Logging to file %s:' % logfile)

    kwargs = {
        'session'     : go.options.session,
        'default_acl' : [admin_acl],
        'auth_data'   : acreds,
        'rsyncpath'   : go.options.rsyncpath,
        'netcat'      : go.options.netcat,
        }
    if go.options.state:
        rsyncP = RsyncSource(go.options.servers, **kwargs)
        logger.info('Progress: %s of %s paths remaining' % (rsyncP.len_paths(), rsyncP.paths_total))
        rsyncP.exit()
        sys.exit(0)

    elif go.options.pathsonly:
        kwargs['rsyncdepth'] = go.options.depth
        kwargs['excludere'] = go.options.excludere
        rsyncP = RsyncSource(go.options.servers, **kwargs)
        locked = rsyncP.acq_lock()
        if locked:
            starttime = time.time()
            rsyncP.build_pathqueue()
            endtime = time.time()
            timing = endtime - starttime
            pathqueue = rsyncP.path_queue
            logger.info('Building with depth %i took %f seconds walltime. there are %i paths in the Queue'
                         % (go.options.depth, timing, len(pathqueue)))
            rsyncP.delete(pathqueue.path, recursive=True)
            rsyncP.release_lock()
        else:
            logger.error('There is already a lock on the pathtree of this session')

        rsyncP.exit()
        sys.exit(0)

    elif rstype == CL_DEST:
        # Start zookeeper connection and rsync daemon
        kwargs['rsyncport'] = go.options.rsyncport
        kwargs['startport'] = go.options.startport
        kwargs['domain'] = go.options.domain
        rsyncD = RsyncDestination(go.options.servers, **kwargs)
        rsyncD.run()

        logger.debug('%s Ready' % rsyncD.get_whoami())
        rsyncD.exit()
        sys.exit(0)

    elif rstype == CL_SOURCE:
        # Start zookeeper connections
        kwargs['rsyncdepth'] = go.options.depth
        kwargs['dryrun'] = go.options.dryrun
        kwargs['delete'] = go.options.delete
        kwargs['excludere'] = go.options.excludere
        rsyncS = RsyncSource(go.options.servers, **kwargs)
        # Try to retrieve session lock
        locked = rsyncS.acq_lock()

        if locked:
            logger.debug('lock acquired')
            watchnode = rsyncS.start_ready_rwatch()
            if not watchnode:
                sys.exit(1)
            paths_total = rsyncS.build_pathqueue()
            todo_paths = paths_total
            while not rsyncS.isempty_pathqueue():
                if todo_paths != rsyncS.len_paths():  # Output progress state
                    todo_paths = rsyncS.len_paths()
                    logger.info('Progress: %s of %s paths remaining' % (todo_paths, paths_total))
                time.sleep(SLEEP_TIME)
            rsyncS.shutdown_all()
            rsyncS.exit()
            sys.exit(0)
        else:
            rsyncS.ready_with_stop_watch()
            logger.debug('ready to process paths')
            while not rsyncS.is_ready():
                logger.debug('trying to get a path out of Queue')
                rsyncS.rsync(TIME_OUT)

            logger.debug('%s Ready' % rsyncS.get_whoami())
            rsyncS.exit()
            sys.exit(0)
Exemplo n.º 25
0
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with vsc-base. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simple QA script

@author: Stijn De Weirdt (Ghent University)
"""
import os

from vsc.utils.run import run_qa, run_qalog, run_qastdout, run_async_to_stdout
from vsc.utils.generaloption import simple_option

go = simple_option(None)

SCRIPT_DIR = os.path.join(os.path.dirname(__file__), '..', 'test', 'runtests')
SCRIPT_QA = os.path.join(SCRIPT_DIR, 'qa.py')


def test_qa():
    qa_dict = {
               'Simple question:': 'simple answer',
               }
    ec, output = run_qa([SCRIPT_QA, 'simple'], qa=qa_dict)
    return ec, output


def test_qalog():
    qa_dict = {
Exemplo n.º 26
0
def main():
    """ Start a new rsync client (destination or source) in a specified session """
    options = {
        # Zookeeper connection options:
        'servers': ('list of zk servers', 'strlist', 'store', None),
        'user':
        ('user with creation rights on zookeeper', None, 'store', 'root', 'u'),
        'passwd': ('password for user with creation rights', None, 'store',
                   'admin', 'p'),
        # Role options, define exactly one of these:
        'source': ('rsync source', None, 'store_true', False, 'S'),
        'destination': ('rsync destination', None, 'store_true', False, 'D'),
        'pathsonly': ('Only do a test run of the pathlist building', None,
                      'store_true', False),
        'state': ('Only do the state', None, 'store_true', False),
        # Session options; should be the same on all clients of the session!
        'session': ('session name', None, 'store', 'default', 'N'),
        'netcat':
        ('run netcat test instead of rsync', None, 'store_true', False),
        'dryrun':
        ('run rsync in dry run mode', None, 'store_true', False, 'n'),
        'rsyncpath': ('rsync basepath', None, 'store', None,
                      'r'),  # May differ between sources and dests
        # Pathbuilding (Source clients and pathsonly ) specific options:
        'rsubpaths':
        ('rsync subpaths, specified as <depth>_<path>, with deepest paths last',
         'strlist', 'store', None),
        'excludere': ('Exclude from pathbuilding', None, 'regex',
                      re.compile('/\.snapshots(/.*|$)')),
        'excl_usr':
        ('If set, exclude paths for this user only when using excludere', None,
         'store', 'root'),
        'depth': ('queue depth', "int", 'store', 3),
        # Source clients options; should be the same on all clients of the session!:
        'delete': ('run rsync with --delete', None, 'store_true', False),
        'checksum': ('run rsync with --checksum', None, 'store_true', False),
        'hardlinks': ('run rsync with --hard-links', None, 'store_true',
                      False),
        'inplace': ('run rsync with --inplace', None, 'store_true', False),
        # Individual client options
        'verifypath': ('Check basepath exists while running', None,
                       'store_false', True),
        'daemon': ('daemonize client', None, 'store_true', False),
        'domain': ('substitute domain', None, 'store', None),
        'done-file': ('cachefile to write state to when done', None, 'store',
                      None),
        'dropcache': ('run rsync with --drop-cache', None, 'store_true',
                      False),
        'logfile': ('Output to logfile', None, 'store',
                    '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.log'),
        'pidfile': ('Pidfile template', None, 'store',
                    '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.pid'),
        'timeout': ('run rsync with --timeout TIMEOUT', "int", 'store', 0),
        'verbose': ('run rsync with --verbose', None, 'store_true', False),
        # Individual Destination client specific options
        'rsyncport': ('force port on which rsyncd binds', "int", 'store',
                      None),
        'startport': ('offset to look for rsyncd ports', "int", 'store', 4444),
        # Arbitrary rsync options: comma seperate list. Use a colon to seperate key and values
        'arbitopts':
        ('Arbitrary rsync source client long name options: comma seperate list. '
         +
         'Use a colon to seperate key and values. Beware these keys/values are not checked.',
         'strlist', 'store', None),
    }

    go = simple_option(options)
    acreds, admin_acl, rstype = zkrsync_parse(go.options)
    if go.options.logfile:
        init_logging(go.options.logfile, go.options.session, rstype)

    kwargs = {
        'session': go.options.session,
        'default_acl': [admin_acl],
        'auth_data': acreds,
        'rsyncpath': go.options.rsyncpath,
        'netcat': go.options.netcat,
        'verifypath': go.options.verifypath,
        'dropcache': go.options.dropcache,
    }

    if go.options.daemon:
        pidfile = init_pidfile(go.options.pidfile, go.options.session, rstype)
        zkrsdaemon = ZkrsDaemon(pidfile, rstype, go.options, kwargs)
        zkrsdaemon.start()
    else:
        start_zkrs(rstype, go.options, kwargs)
Exemplo n.º 27
0
        easyconfigs.append((timestamp, os.path.basename(spec), toolchain))

    print ''
    print 'found %d different toolchains' % len(toolchains)

    for toolchain in sorted(toolchains):
        # datestamp first for correct sorting!
        ecs = sorted([(datestamp, ec) for (datestamp, ec, tc) in easyconfigs
                      if tc == toolchain])

        print '%s (%d)' % (toolchain, len(ecs))
        print '\toldest: %s (%s)' % (ecs[0][1], ecs[0][0])
        print '\tnewest: %s (%s)' % (ecs[-1][1], ecs[-1][0])


## MAIN ##

opts = {
    'easyconfigs-repo':
    ("Path to easyconfigs repository", None, 'store', '.', 'p'),
    'pattern':
    ("Filter pattern to use on easyconfig file names", None, 'store', '', 'f'),
}

go = simple_option(
    go_dict=opts,
    descr="Script to help figure out which toolchains to deprecate")

doit(go.options.easyconfigs_repo, go.options.pattern)
Exemplo n.º 28
0
def main():
    """the main function"""
    fancylogger.logToScreen(enable=True, stdout=True)
    fancylogger.setLogLevelInfo()

    options = {
        'github-user': ('Your github username to use', None, 'store', None, 'g'),
        'closed-pr': ('Delete all gists from closed pull-requests', None, 'store_true', True, 'p'),
        'all': ('Delete all gists from Easybuild ', None, 'store_true', False, 'a'),
        'orphans': ('Delete all gists without a pull-request', None, 'store_true', False, 'o'),
    }

    go = simple_option(options)
    log = go.log

    if not (go.options.all or go.options.closed_pr or go.options.orphans):
        raise EasyBuildError("Please tell me what to do?")

    if go.options.github_user is None:
        eb_go = EasyBuildOptions(envvar_prefix='EASYBUILD', go_args=[])
        username = eb_go.options.github_user
        log.debug("Fetch github username from easybuild, found: %s", username)
    else:
        username = go.options.github_user

    if username is None:
        raise EasyBuildError("Could not find a github username")
    else:
        log.info("Using username = %s", username)

    token = fetch_github_token(username)

    gh = RestClient(GITHUB_API_URL, username=username, token=token)
    # ToDo: add support for pagination
    status, gists = gh.gists.get(per_page=100)

    if status != HTTP_STATUS_OK:
        raise EasyBuildError("Failed to get a lists of gists for user %s: error code %s, message = %s",
                             username, status, gists)
    else:
        log.info("Found %s gists", len(gists))

    regex = re.compile(r"(EasyBuild test report|EasyBuild log for failed build).*?(?:PR #(?P<PR>[0-9]+))?\)?$")

    pr_cache = {}
    num_deleted = 0

    for gist in gists:
        if not gist["description"]:
            continue
        re_pr_num = regex.search(gist["description"])
        delete_gist = False

        if re_pr_num:
            log.debug("Found a Easybuild gist (id=%s)", gist["id"])
            pr_num = re_pr_num.group("PR")
            if go.options.all:
                delete_gist = True
            elif pr_num and go.options.closed_pr:
                log.debug("Found Easybuild test report for PR #%s", pr_num)

                if pr_num not in pr_cache:
                    status, pr = gh.repos[GITHUB_EB_MAIN][GITHUB_EASYCONFIGS_REPO].pulls[pr_num].get()
                    if status != HTTP_STATUS_OK:
                        raise EasyBuildError("Failed to get pull-request #%s: error code %s, message = %s",
                                             pr_num, status, pr)
                    pr_cache[pr_num] = pr["state"]

                if pr_cache[pr_num] == "closed":
                    log.debug("Found report from closed PR #%s (id=%s)", pr_num, gist["id"])
                    delete_gist = True

            elif not pr_num and go.options.orphans:
                log.debug("Found Easybuild test report without PR (id=%s)", gist["id"])
                delete_gist = True

        if delete_gist:
            status, del_gist = gh.gists[gist["id"]].delete()

            if status != HTTP_DELETE_OK:
                raise EasyBuildError("Unable to remove gist (id=%s): error code %s, message = %s",
                                     gist["id"], status, del_gist)
            else:
                log.info("Delete gist with id=%s", gist["id"])
                num_deleted += 1

    log.info("Deleted %s gists", num_deleted)
Exemplo n.º 29
0
def main():

    opts = {
        'dry-run': ("Dry run, don't actually post/push/merge anything", None, 'store_true', False, 'x'),
        'force': ("Use force to execute the specified action", None, 'store_true', False, 'f'),
        'github-account': ("GitHub account where repository is located", None, 'store', 'hpcugent', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None, 'store', 'boegel', 'u'),
        'repository': ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
        # actions
        'comment': ("Post a comment in the pull request", None, 'store', None, 'C'),
        'merge': ("Merge the pull request", None, 'store_true', False, 'M'),
        'review': ("Review the pull request", None, 'store_true', False, 'R'),
        'test': ("Submit job to upload test report", None, 'store_or_None', None, 'T'),
    }

    actions = ['comment', 'merge', 'review', 'test']

    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    # determine which action should be taken
    selected_action = None
    for action in sorted(actions):
        action_value = getattr(go.options, action)
        if isinstance(action_value, bool):
            if action_value:
                selected_action = (action, action_value)
                break
        elif action_value is not None:
            selected_action = (action, action_value)
            break  # FIXME: support multiple actions, loop over them (e.g. -C :jok,lgtm -T)

    if selected_action is None:
        avail_actions = ', '.join(["%s (-%s)" % (a, a[0].upper()) for a in sorted(actions)])
        error("No action specified, pick one: %s" % avail_actions)
    else:
        info("Selected action: %s" % selected_action[0])

    # prepare using GitHub API
    global DRY_RUN
    DRY_RUN = go.options.dry_run
    force = go.options.force
    github_account = go.options.github_account
    github_user = go.options.github_user
    repository = go.options.repository

    github_token = fetch_github_token(github_user)
    github = RestClient(GITHUB_API_URL, username=github_user, token=github_token, user_agent='eb-pr-check')

    if len(go.args) == 1:
        pr = go.args[0]
    else:
        usage()

    print "Fetching PR information ",
    print "(using GitHub token for user '%s': %s)... " % (github_user, ('no', 'yes')[bool(github_token)]),
    sys.stdout.flush()
    pr_data = fetch_pr_data(github, github_account, repository, pr)
    print ''

    #print_raw_pr_info(pr_data)

    print_pr_summary(pr_data)

    if selected_action[0] == 'comment':
        comment(github, github_user, repository, pr_data, selected_action[1])
    elif selected_action[0] == 'merge':
        merge(github, github_user, github_account, repository, pr_data, force=force)
    elif selected_action[0] == 'review':
        review(pr_data)
    elif selected_action[0] == 'test':
        test(pr_data, selected_action[1])
    else:
        error("Handling action '%s' not implemented yet" % selected_action[0])
Exemplo n.º 30
0
            "c",
        ),
        "index_name": ("Filename for the index/toc for the components.", None, "store", "mkdocs.yml", "i"),
        "remove_emails": ("Remove email addresses from generated md files.", None, "store_true", True, "r"),
        "remove_whitespace": ("Remove whitespace (\n\n\n) from md files.", None, "store_true", True, "w"),
        "remove_headers": (
            "Remove unneeded headers from files (MAINTAINER and AUTHOR).",
            None,
            "store_true",
            True,
            "R",
        ),
        "small_titles": ("Decrease the title size in the md files.", None, "store_true", True, "s"),
        "codify_paths": ("Put paths inside code tags.", None, "store_true", True, "p"),
    }
    GO = simple_option(OPTIONS)
    LOGGER.info("Starting main.")

    checkinputandcommands(GO.options.modules_location, GO.options.output_location, GO.options.maven_compile)

    if GO.options.maven_compile:
        LOGGER.info("Doing maven clean and compile.")
        mavencleancompile(GO.options.modules_location)
    else:
        LOGGER.info("Skipping maven clean and compile.")

    COMPS = listcomponents(GO.options.modules_location)
    PODS = listpods(GO.options.modules_location, COMPS)

    MDS = generatemds(PODS, GO.options.output_location)
    generatetoc(PODS, GO.options.output_location, GO.options.index_name)
Exemplo n.º 31
0
def main():
    """Main"""

    options = {
        'nagios': ('Report in nagios format', None, 'store_true', False, 'n'),
        'regex': ('Filter on regexp, data for first match', None, 'regex', None, 'r'),
        'allregex': ('Combined with --regex/-r, return all data', None, 'store_true', False, 'A'),
        'anystate': ('Matches any state (eg down_on_error node will also list as error)',
                     None, 'store_true', False, 'a'),
        'down': ('Down nodes', None, 'store_true', False, 'D'),
        'downonerror': ('Down on error nodes', None, 'store_true', False, 'E'),
        'offline': ('Offline nodes', None, 'store_true', False, 'o'),
        'partial': ('Partial nodes (one or more running job(s), jobslot(s) available)', None, 'store_true', False, 'p'),
        'job-exclusive': ('Job-exclusive nodes (no jobslots available)', None, 'store_true', False, 'x'),
        'free': ('Free nodes (0 or more running jobs, jobslot(s) available)', None, 'store_true', False, 'f'),
        'unknown': ('State unknown nodes', None, 'store_true', False, 'u'),
        'bad': ('Bad nodes (broken jobregex)', None, 'store_true', False, 'b'),
        'error': ('Error nodes', None, 'store_true', False, 'e'),
        'idle': ('Idle nodes (No running jobs, jobslot(s) available)', None, 'store_true', False, 'i'),
        'singlenodeinfo': (('Single (most-frequent) node information in key=value format'
                            '(no combination with other options)'), None, 'store_true', False, 'I'),
        'reportnodeinfo': ('Report node information (no combination with other options)',
                           None, 'store_true', False, 'R'),
        'moab': ('Use moab information (mdiag -n)', None, 'store_true', False, 'm'),
        'moabxml': ('Use xml moab data from file (for testing)', None, 'store', None),
        'shorthost': ('Return (short) hostname', None, 'store_true', False, 's'),
        'invert': ('Return inverted selection', None, 'store_true', False, 'v'),
        }

    go = simple_option(options)

    if go.options.nagios and not go.options.debug:
        fancylogger.logToDevLog(enable=True)
        fancylogger.logToScreen(enable=False)
        fancylogger.setLogLevelInfo()

    all_states = ND_NAGIOS_CRITICAL + ND_NAGIOS_WARNING + ND_NAGIOS_OK
    report_states = []
    if go.options.down:
        report_states.append(ND_down)
    if go.options.downonerror:
        report_states.append(ND_down_on_error)
    if go.options.offline:
        report_states.append(ND_offline)
    if go.options.free:
        report_states.append(ND_free)
    if go.options.partial:
        report_states.append(ND_free_and_job)
    if go.options.job_exclusive:
        report_states.append(ND_job_exclusive)
    if go.options.unknown:
        report_states.append(ND_state_unknown)
    if go.options.bad:
        report_states.append(ND_bad)
    if go.options.error:
        report_states.append(ND_error)
    if go.options.idle:
        report_states.append(ND_idle)

    if len(report_states) == 0:
        report_states = all_states

    if go.options.singlenodeinfo or go.options.reportnodeinfo:
        nodeinfo = collect_nodeinfo()[2]
        if len(nodeinfo) == 0:
            _log.error('No nodeinfo found')
            sys.exit(1)

        ordered = sorted(nodeinfo.items(), key=lambda x: len(x[1]), reverse=True)

        if go.options.singlenodeinfo:
            if len(nodeinfo) > 1:
                msg = "Not all nodes have same parameters. Using most frequent ones."
                if go.options.reportnodeinfo:
                    _log.warning(msg)
                else:
                    _log.error(msg)

            # usage: export `./show_nodes -I` ; env |grep SHOWNODES_
            most_freq = ordered[0][0]
            msg = []
            msg.append("SHOWNODES_PPN=%d" % most_freq[0])
            msg.append("SHOWNODES_PHYSMEMMB=%d" % (most_freq[1] * 1024))
        else:
            msg = []
            for info, nodes in ordered:
                txt = "%d nodes with %d cores, %s MB physmem, %s GB swap and %s GB local disk" % (
                    len(nodes), info[0], info[1] * 1024, info[2], info[3])
                msg.append(txt)
                # print and _log are dumped to stdout at different moment, repeat the txt in the debug log
                _log.debug("Found %s with matching nodes: %s" % (txt, nodes))

        print "\n".join(msg)
        sys.exit(0)

    if go.options.moab:

        if go.options.moabxml:
            try:
                moabxml = open(go.options.moabxml).read()
            except (OSError, IOError):
                _log.error('Failed to read moab xml from %s' % go.options.moabxml)
        else:
            moabxml = None
        nodes_dict = moab_get_nodes_dict(xml=moabxml)

        nodes = get_nodes(nodes_dict)
    else:
        nodes = get_nodes()

    nagiosexit = {
        NDNAG_WARNING: warning_exit,
        NDNAG_CRITICAL: critical_exit,
        NDNAG_OK: ok_exit,
    }

    nagios_res = {}
    detailed_res = {}
    nodes_found = []

    all_nodes = []

    for name, full_state in nodes:
        all_nodes.append(name)

        if go.options.regex and not go.options.regex.search(name):
            continue

        nagios_state = full_state['derived']['nagiosstate']
        if nagios_state not in nagios_res:
            nagios_res[nagios_state] = []

        state = full_state['derived']['state']
        states = full_state['derived']['states']

        if state == ND_free and ND_idle in states:
            state = ND_idle  # special case for idle
        if state not in detailed_res:
            detailed_res[state] = []

        if go.options.anystate:
            states_to_check = states
        else:
            states_to_check = [state]

        # filter the allowed states
        if any(x for x in states_to_check if x in report_states):
            nagios_res[nagios_state].append(states)
            detailed_res[state].append(states)
            nodes_found.append(name)

            if go.options.regex and not go.options.allregex:
                break

    if go.options.invert:
        nodes_found = [x for x in all_nodes if x not in nodes_found]

    if go.options.regex and not go.options.allregex:
        # there should only be one node
        nagios_state, all_states = nagios_res.items()[0]
        states = all_states[0]
        if go.options.nagios:
            msg = "show_nodes - %s" % ",".join(states)
            nagiosexit[nagios_state](msg)
        else:
            txt = "%s %s" % (nagios_state, ",".join(states))
            print txt
    else:
        if go.options.nagios:
            msg = NagiosResult('show_nodes')
            txt = []
            total = 0
            for state in all_states:
                if state in detailed_res:
                    nr = len(detailed_res[state])
                else:
                    nr = 0
                total += nr
                setattr(msg, state, nr)
            msg.total = total

            reported_state = [str(NDNAG_OK), '']
            if ND_bad in detailed_res:
                reported_state[0] = NDNAG_CRITICAL
                msg.message += ' - %s bad nodes' % (len(detailed_res[ND_bad]))
            nagiosexit[reported_state[0]](msg)
        else:
            # just print the nodes
            if go.options.shorthost:
                nodes_found = [x.split('.')[0] for x in nodes_found]
            print ' '.join(nodes_found)
Exemplo n.º 32
0
def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        "nagios": ("print out nagion information", None, "store_true", False, "n"),
        "nagios_check_filename": (
            "filename of where the nagios check data is stored",
            str,
            "store",
            NAGIOS_CHECK_FILENAME,
        ),
        "nagios_check_interval_threshold": (
            "threshold of nagios checks timing out",
            None,
            "store",
            NAGIOS_CHECK_INTERVAL_THRESHOLD,
        ),
        "hosts": ("the hosts/clusters that should be contacted for job information", None, "extend", []),
        "information": ("the sort of information to store: user, vo, project", None, "store", "user"),
        "location": ("the location for storing the pickle file: gengar, muk", str, "store", "gengar"),
        "ha": ("high-availability master IP address", None, "store", None),
        "dry-run": ("do not make any updates whatsoever", None, "store_true", False),
    }

    opts = simple_option(options)

    if opts.options.debug:
        fancylogger.setLogLevelDebug()

    nagios_reporter = NagiosReporter(NAGIOS_HEADER, NAGIOS_CHECK_FILENAME, NAGIOS_CHECK_INTERVAL_THRESHOLD)
    if opts.options.nagios:
        logger.debug("Producing Nagios report and exiting.")
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter.cache(NAGIOS_EXIT_WARNING, NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    lockfile = TimestampedPidLockfile(DSHOWQ_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    logger.info("starting dshowq run")

    clusters = {}
    for host in opts.options.hosts:
        master = opts.configfile_parser.get(host, "master")
        showq_path = opts.configfile_parser.get(host, "showq_path")
        clusters[host] = {"master": master, "path": showq_path}

    showq = Showq(clusters, cache_pickle=True, dry_run=opts.options.dry_run)

    (queue_information, reported_hosts, failed_hosts) = showq.get_moab_command_information()
    timeinfo = time.time()

    active_users = queue_information.keys()

    logger.debug("Active users: %s" % (active_users))
    logger.debug("Queue information: %s" % (queue_information))

    # We need to determine which users should get an updated pickle. This depends on
    # - the active user set
    # - the information we want to provide on the cluster(set) where this script runs
    # At the same time, we need to determine the job information each user gets to see
    (target_users, target_queue_information, user_map) = determine_target_information(
        opts.options.information, active_users, queue_information
    )

    nagios_user_count = 0
    nagios_no_store = 0

    LdapQuery(VscConfiguration())

    for user in target_users:
        if not opts.options.dry_run:
            try:
                (path, store) = get_pickle_path(opts.options.location, user)
                user_queue_information = target_queue_information[user]
                user_queue_information["timeinfo"] = timeinfo
                store(user, path, (user_queue_information, user_map[user]))
                nagios_user_count += 1
            except (UserStorageError, FileStoreError, FileMoveError), err:
                logger.error("Could not store pickle file for user %s" % (user))
                nagios_no_store += 1
        else:
            logger.info(
                "Dry run, not actually storing data for user %s at path %s"
                % (user, get_pickle_path(opts.options.location, user)[0])
            )
            logger.debug("Dry run, queue information for user %s is %s" % (user, target_queue_information[user]))
Exemplo n.º 33
0
def main():
    """ Start a new rsync client (destination or source) in a specified session """
    options = {
        # Zookeeper connection options:
        'servers'     : ('list of zk servers', 'strlist', 'store', None),
        'user'        : ('user with creation rights on zookeeper', None, 'store', 'root', 'u'),
        'passwd'      : ('password for user with creation rights', None, 'store', 'admin', 'p'),
        # Role options, define exactly one of these:
        'source'      : ('rsync source', None, 'store_true', False, 'S'),
        'destination' : ('rsync destination', None, 'store_true', False, 'D'),
        'pathsonly'   : ('Only do a test run of the pathlist building', None, 'store_true', False),
        'state'       : ('Only do the state', None, 'store_true', False),
        # Session options; should be the same on all clients of the session!
        'session'     : ('session name', None, 'store', 'default', 'N'),
        'netcat'      : ('run netcat test instead of rsync', None, 'store_true', False),
        'dryrun'      : ('run rsync in dry run mode', None, 'store_true', False, 'n'),
        'rsyncpath'   : ('rsync basepath', None, 'store', None, 'r'),  # May differ between sources and dests
        # Pathbuilding (Source clients and pathsonly ) specific options:
        'rsubpaths'   : ('rsync subpaths, specified as <depth>_<path>, with deepest paths last', 'strlist', 'store', None),
        'excludere'   : ('Exclude from pathbuilding', None, 'regex', re.compile('/\.snapshots(/.*|$)')),
        'excl_usr'    : ('If set, exclude paths for this user only when using excludere', None, 'store', 'root'),
        'depth'       : ('queue depth', "int", 'store', 3),
        # Source clients options; should be the same on all clients of the session!:
        'delete'      : ('run rsync with --delete', None, 'store_true', False),
        'checksum'    : ('run rsync with --checksum', None, 'store_true', False),
        'hardlinks'   : ('run rsync with --hard-links', None, 'store_true', False),
        # Individual client options
        'verifypath'  : ('Check basepath exists while running', None, 'store_false', True),
        'daemon'      : ('daemonize client', None, 'store_true', False),
        'domain'      : ('substitute domain', None, 'store', None),
        'done-file'   : ('cachefile to write state to when done', None, 'store', None),
        'dropcache'   : ('run rsync with --drop-cache', None, 'store_true', False),
        'logfile'     : ('Output to logfile', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.log'),
        'pidfile'     : ('Pidfile template', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.pid'),
        'timeout'     : ('run rsync with --timeout TIMEOUT',  "int", 'store', 0),
        'verbose'     : ('run rsync with --verbose', None, 'store_true', False),
        # Individual Destination client specific options
        'rsyncport'   : ('force port on which rsyncd binds', "int", 'store', None),
        'startport'   : ('offset to look for rsyncd ports', "int", 'store', 4444),
        # Arbitrary rsync options: comma seperate list. Use a colon to seperate key and values
        'arbitopts'   : ('Arbitrary rsync source client long name options: comma seperate list. ' +
                             'Use a colon to seperate key and values. Beware these keys/values are not checked.',
                             'strlist', 'store', None),

    }

    go = simple_option(options)
    acreds, admin_acl, rstype = zkrsync_parse(go.options)
    if go.options.logfile:
        init_logging(go.options.logfile, go.options.session, rstype)

    kwargs = {
        'session'     : go.options.session,
        'default_acl' : [admin_acl],
        'auth_data'   : acreds,
        'rsyncpath'   : go.options.rsyncpath,
        'netcat'      : go.options.netcat,
        'verifypath'  : go.options.verifypath,
        'dropcache'   : go.options.dropcache,
        }

    if go.options.daemon:
        pidfile = init_pidfile(go.options.pidfile, go.options.session, rstype)
        zkrsdaemon = ZkrsDaemon(pidfile, rstype, go.options, kwargs)
        zkrsdaemon.start()
    else:
        start_zkrs(rstype, go.options, kwargs)
Exemplo n.º 34
0
def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        "nagios": ("print out nagios information", None, "store_true", False, "n"),
        "nagios_check_filename": (
            "filename of where the nagios check data is stored",
            str,
            "store",
            NAGIOS_CHECK_FILENAME,
        ),
        "nagios_check_interval_threshold": (
            "threshold of nagios checks timing out",
            None,
            "store",
            NAGIOS_CHECK_INTERVAL_THRESHOLD,
        ),
        "hosts": ("the hosts/clusters that should be contacted for job information", None, "extend", []),
        "location": ("the location for storing the pickle file: home, scratch", str, "store", "home"),
        "ha": ("high-availability master IP address", None, "store", None),
        "dry-run": ("do not make any updates whatsoever", None, "store_true", False),
    }

    opts = simple_option(options)

    if opts.options.debug:
        fancylogger.setLogLevelDebug()

    nagios_reporter = NagiosReporter(
        NAGIOS_HEADER, opts.options.nagios_check_filename, opts.options.nagios_check_interval_threshold
    )
    if opts.options.nagios:
        logger.debug("Producing Nagios report and exiting.")
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter.cache(NAGIOS_EXIT_WARNING, NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    lockfile = TimestampedPidLockfile(DCHECKJOB_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    logger.info("Starting dcheckjob")

    LdapQuery(VscConfiguration())

    clusters = {}
    for host in opts.options.hosts:
        master = opts.configfile_parser.get(host, "master")
        checkjob_path = opts.configfile_parser.get(host, "checkjob_path")
        clusters[host] = {"master": master, "path": checkjob_path}

    checkjob = Checkjob(clusters, cache_pickle=True, dry_run=True)

    (job_information, reported_hosts, failed_hosts) = checkjob.get_moab_command_information()
    timeinfo = time.time()

    active_users = job_information.keys()

    logger.debug("Active users: %s" % (active_users))
    logger.debug("Checkjob information: %s" % (job_information))

    nagios_user_count = 0
    nagios_no_store = 0

    for user in active_users:
        if not opts.options.dry_run:
            try:
                (path, store) = get_pickle_path(opts.options.location, user)
                user_queue_information = CheckjobInfo({user: job_information[user]})
                store(user, path, (timeinfo, user_queue_information))
                nagios_user_count += 1
            except (UserStorageError, FileStoreError, FileMoveError), _:
                logger.error("Could not store pickle file for user %s" % (user))
                nagios_no_store += 1
        else:
            logger.info(
                "Dry run, not actually storing data for user %s at path %s"
                % (user, get_pickle_path(opts.options.location, user)[0])
            )
            logger.debug("Dry run, queue information for user %s is %s" % (user, job_information[user]))
Exemplo n.º 35
0
            _log.error('Provided indices %s exceed avail data items %s' % (indices, len(input)))
            sys.exit(1)

    ec, stdout = run_asyncloop(cmd=LOGSTASH_CMD+[cfg_file], input="\n".join(input + ['']))

    output = process(stdout, len(input))
    test(output, input, results)

if __name__ == '__main__':
    opts = {
        "last": ("Only test last data entry", None, "store_true", False, 'L'),
        "first": ("Only test first data entry", None, "store_true", False, 'F'),
        "entries": ("Indices of data entries to test", "strlist", "store", None, 'E'),
        "logstash-version": ("Logstash version to test with", None, "store", DEFAULT_LOGSTASH_VERSION, 'V'),
    }
    go = simple_option(opts)
    indices = None
    if go.options.first:
        indices = [0]
    elif go.options.last:
        indices = [-1]
    elif go.options.entries:
        indices = [int(x) for x in go.options.entries]

    _log = go.log


    cfg_name = 'logstash_%s.conf' % go.options.logstash_version
    cfg_file = os.path.join(os.path.dirname(os.getcwd()), 'tests', cfg_name)

    if not os.path.isfile(cfg_file):
Exemplo n.º 36
0
COMMON_PARAMS = {
    'ConfigureMake': ['configopts', 'buildopts', 'installopts'],
    # needs to be extended
}
DOC_FUNCTIONS = ['build_step', 'configure_step', 'install_step']

DEFAULT_EXAMPLE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'examples')
DEFAULT_MODULE = 'easybuild.easyblocks.generic'


options = {
    'out-file': ('Path to output file', 'string', 'store', None, 'o'),
    'examples': ('Path to dir that contains example files', 'string', 'store', DEFAULT_EXAMPLE_PATH, 'e'),
    'module': ('Name of module to load the easyblocks from', 'string', 'store', DEFAULT_MODULE, 'm')
}
so = simple_option(options)

config.init_build_options({'validate': False, 'external_modules_metadata': {}})

autogen_comment = [
    ".. This file is automatically generated using the %s script, " % os.path.basename(__file__),
    ".. and information and docstrings from easyblocks and the EasyBuild framework.",
    ".. Doo not edit this file manually, but update the docstrings and regenerate it.",
    '',
]

easyblocks_overview = gen_easyblocks_overview_rst(so.options.module, so.options.examples, COMMON_PARAMS, DOC_FUNCTIONS)

txt = '\n'.join(autogen_comment + easyblocks_overview)
if so.options.out_file:
    write_file(so.options.out_file, txt)