コード例 #1
0
ファイル: run.py プロジェクト: ebagrenrut/easybuild-framework
    def test_run_cmd_log(self):
        """Test logging of executed commands."""
        fd, logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-')
        os.close(fd)

        regex = re.compile(
            'cmd "echo hello" exited with exit code [0-9]* and output:')

        # command output is not logged by default without debug logging
        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello"))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 0)
        write_file(logfile, '')

        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello", log_all=True))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 1)
        write_file(logfile, '')

        # with debugging enabled, exit code and output of command should only get logged once
        setLogLevelDebug()

        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello"))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 1)
        write_file(logfile, '')

        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello", log_all=True))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 1)
        write_file(logfile, '')
コード例 #2
0
ファイル: run.py プロジェクト: cyiops/easybuild-framework
    def test_run_cmd_log(self):
        """Test logging of executed commands."""
        fd, logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-')
        os.close(fd)

        regex = re.compile('cmd "echo hello" exited with exit code [0-9]* and output:')

        # command output is not logged by default without debug logging
        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello"))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 0)
        write_file(logfile, '')

        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello", log_all=True))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 1)
        write_file(logfile, '')

        # with debugging enabled, exit code and output of command should only get logged once
        setLogLevelDebug()

        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello"))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 1)
        write_file(logfile, '')

        init_logging(logfile, silent=True)
        self.assertTrue(run_cmd("echo hello", log_all=True))
        stop_logging(logfile)
        self.assertEqual(len(regex.findall(read_file(logfile))), 1)
        write_file(logfile, '')
コード例 #3
0
ファイル: fancylogger.py プロジェクト: hpcugent/vsc-base
    def test_fancylogger_as_rootlogger_logging(self):
        """
        Test if just using import logging, logging with logging uses fancylogger
        after setting the root logger
        """

        # test logging.root is loggin root logger
        # this is an assumption made to make the fancyrootlogger code work
        orig_root = logging.getLogger()
        self.assertEqual(logging.root, orig_root,
                         msg='logging.root is the root logger')
        self.assertFalse(isinstance(logging.root, fancylogger.FancyLogger),
                         msg='logging.root is not a FancyLogger')


        stringfile = StringIO()
        sys.stderr = stringfile
        handler = fancylogger.logToScreen()
        fancylogger.setLogLevelDebug()
        logger = fancylogger.getLogger()

        self.assertEqual(logger.handlers, [self.handler, handler],
                         msg='active handler for root fancylogger')
        self.assertEqual(logger.level, fancylogger.getLevelInt('DEBUG'), msg='debug level set')

        msg = 'this is my string'
        logging.debug(msg)
        self.assertEqual(stringfile.getvalue(), '',
                         msg="logging.debug reports nothing when fancylogger loglevel is debug")

        fancylogger.setroot()
        self.assertTrue(isinstance(logging.root, fancylogger.FancyLogger),
                         msg='logging.root is a FancyLogger after setRootLogger')
        self.assertEqual(logging.root.level, fancylogger.getLevelInt('DEBUG'), msg='debug level set for root')
        self.assertEqual(logger.level, logging.NOTSET, msg='original root fancylogger level set to NOTSET')

        self.assertEqual(logging.root.handlers, [self.handler, handler],
                         msg='active handler for root logger from previous root fancylogger')
        self.assertEqual(logger.handlers, [], msg='no active handlers on previous root fancylogger')

        root_logger = logging.getLogger('')
        self.assertEqual(root_logger, logging.root,
                        msg='logging.getLogger() returns logging.root FancyLogger')

        frl = fancylogger.getLogger()
        self.assertEqual(frl, logging.root,
                        msg='fancylogger.getLogger() returns logging.root FancyLogger')

        logging.debug(msg)
        self.assertTrue(msg in stringfile.getvalue(),
                         msg="logging.debug reports when fancylogger loglevel is debug")

        fancylogger.resetroot()
        self.assertEqual(logging.root, orig_root,
                         msg='logging.root is the original root logger after resetroot')

        # restore
        fancylogger.logToScreen(enable=False, handler=handler)
コード例 #4
0
    def test_fancylogger_as_rootlogger_logging(self):
        """
        Test if just using import logging, logging with logging uses fancylogger
        after setting the root logger
        """

        # test logging.root is loggin root logger
        # this is an assumption made to make the fancyrootlogger code work
        orig_root = logging.getLogger()
        self.assertEqual(logging.root, orig_root,
                         msg='logging.root is the root logger')
        self.assertFalse(isinstance(logging.root, fancylogger.FancyLogger),
                         msg='logging.root is not a FancyLogger')


        stringfile = StringIO()
        sys.stderr = stringfile
        handler = fancylogger.logToScreen()
        fancylogger.setLogLevelDebug()
        logger = fancylogger.getLogger()

        self.assertEqual(logger.handlers, [self.handler, handler],
                         msg='active handler for root fancylogger')
        self.assertEqual(logger.level, fancylogger.getLevelInt('DEBUG'), msg='debug level set')

        msg = 'this is my string'
        logging.debug(msg)
        self.assertEqual(stringfile.getvalue(), '',
                         msg="logging.debug reports nothing when fancylogger loglevel is debug")

        fancylogger.setroot()
        self.assertTrue(isinstance(logging.root, fancylogger.FancyLogger),
                         msg='logging.root is a FancyLogger after setRootLogger')
        self.assertEqual(logging.root.level, fancylogger.getLevelInt('DEBUG'), msg='debug level set for root')
        self.assertEqual(logger.level, logging.NOTSET, msg='original root fancylogger level set to NOTSET')

        self.assertEqual(logging.root.handlers, [self.handler, handler],
                         msg='active handler for root logger from previous root fancylogger')
        self.assertEqual(logger.handlers, [], msg='no active handlers on previous root fancylogger')

        root_logger = logging.getLogger('')
        self.assertEqual(root_logger, logging.root,
                        msg='logging.getLogger() returns logging.root FancyLogger')

        frl = fancylogger.getLogger()
        self.assertEqual(frl, logging.root,
                        msg='fancylogger.getLogger() returns logging.root FancyLogger')

        logging.debug(msg)
        self.assertTrue(msg in stringfile.getvalue(),
                         msg="logging.debug reports when fancylogger loglevel is debug")

        fancylogger.resetroot()
        self.assertEqual(logging.root, orig_root,
                         msg='logging.root is the original root logger after resetroot')

        # restore
        fancylogger.logToScreen(enable=False, handler=handler)
コード例 #5
0
ファイル: cache.py プロジェクト: stdweird/vsc-modules
def convert_lmod_cache_to_json():
    """Main conversion of Lmod lua cache to cluster and software mapping in JSON"""
    # you really don't want this in debug
    cachefile = os.path.join(get_lmod_conf()['dir'], CACHEFILENAME)
    mpathMapT, spiderT = get_lmod_cache(cachefile)

    setLogLevelDebug()
    clustermap, mpmap = cluster_maps(mpathMapT)
    softmap = software_map(spiderT, mpmap)
    write_json(clustermap, softmap)
コード例 #6
0
def make_worker_log(name, debug=False, logfn_name=None, disable_defaulthandlers=False):
    """Make a basic log object"""
    if logfn_name is None:
        logfn_name = name
    logfn = '/tmp/scoop_%s.log' % logfn_name

    if debug:
        setLogLevelDebug()

    logToFile(logfn, name=name)
    os.chmod(logfn, stat.S_IRUSR | stat.S_IWUSR)

    if disable_defaulthandlers:
        disableDefaultHandlers()

    _log = getLogger(name=name)

    return _log
コード例 #7
0
parser.add_option("-l",
                  "--local",
                  action="store_true",
                  dest="local",
                  help="Use a local path, not on github.com (Default false)")

options, args = parser.parse_args()

# get and configure logger
log = fancylogger.getLogger(__name__)
if options.verbose == 1:
    fancylogger.setLogLevelWarning()
elif options.verbose == 2:
    fancylogger.setLogLevelInfo()
elif options.verbose >= 3:
    fancylogger.setLogLevelDebug()

if options.quiet:
    fancylogger.logToScreen(False)
else:
    fancylogger.logToScreen(True)

# other options
if not options.branch:
    options.branch = "develop"
if not options.username:
    options.username = "******"
if not options.repo:
    options.repo = "easybuild-easyconfigs"
if not options.path:
    options.path = "easybuild/easyconfigs"
コード例 #8
0
ファイル: suite.py プロジェクト: Spencerx/easybuild-framework
for test_fn in [fn, os.path.join(testdir, 'test')]:
    try:
        open(fn, 'w').write('test')
    except IOError, err:
        sys.stderr.write("ERROR: Can't write to temporary file %s, set $TMPDIR to a writeable directory (%s)" % (fn, err))
        sys.exit(1)
os.remove(fn)
shutil.rmtree(testdir)

# initialize logger for all the unit tests
fd, log_fn = tempfile.mkstemp(prefix='easybuild-tests-', suffix='.log')
os.close(fd)
os.remove(log_fn)
fancylogger.logToFile(log_fn)
log = fancylogger.getLogger()
setLogLevelDebug()

# call suite() for each module and then run them all
# note: make sure the options unit tests run first, to avoid running some of them with a readily initialized config
tests = [o, r, ef, ev, ebco, ep, e, mg, m, mt, f, run, a, robot, b, v, g, tcv, tc, t, c, s, l, f_c, sc]

SUITE = unittest.TestSuite([x.suite() for x in tests])

# uses XMLTestRunner if possible, so we can output an XML file that can be supplied to Jenkins
xml_msg = ""
try:
    import xmlrunner  # requires unittest-xml-reporting package
    xml_dir = 'test-reports'
    res = xmlrunner.XMLTestRunner(output=xml_dir, verbosity=1).run(SUITE)
    xml_msg = ", XML output of tests available in %s directory" % xml_dir
except ImportError, err:
コード例 #9
0
def main():
    # Core command line arguments
    cli_core = argparse.ArgumentParser(prog='accounting-report', add_help=False)
    cli_core.add_argument(
        '-v', '--version', action='version', version='%(prog)s from vsc-accounting-brussel v{}'.format(VERSION)
    )
    cli_core.add_argument(
        '-d', dest='debug', help='use debug log level', required=False, action='store_true'
    )
    cli_core.add_argument(
        '-i',
        dest='force_install',
        help='force (re)installation of any data files needed from package resources',
        required=False,
        action='store_true',
    )
    cli_core.add_argument(
        '-c',
        dest='config_file',
        help='path to configuration file (default: ~/.config/vsc-accounting/vsc-accouning.ini)',
        default='vsc-accounting.ini',
        required=False,
    )

    cli_core_args, cli_extra_args = cli_core.parse_known_args()

    # Debug level logs
    if cli_core_args.debug:
        fancylogger.setLogLevelDebug()
        logger.debug("Switched logging to debug verbosity")

    # Load configuration
    MainConf.load(cli_core_args.config_file)

    # Enforce (re)installation of data files
    if cli_core_args.force_install:
        dataparser.FORCE_INSTALL = True

    # Read nodegroup specs and default values
    try:
        nodegroups_spec = MainConf.get('nodegroups', 'specsheet')
        nodegroups_default = MainConf.get('nodegroups', 'default').split(',')
    except KeyError as err:
        error_exit(logger, err)
    else:
        nodegroups = DataFile(nodegroups_spec).contents

    # Reporting command line arguments
    cli = argparse.ArgumentParser(
        description='Generate accurate accounting reports about the computational resources used in an HPC cluster',
        parents=[cli_core],
    )
    cli.add_argument(
        '-s',
        dest='start_date',
        help='data retrieved from START_DATE [YYYY-MM-DD] at 00:00',
        required=True,
        type=valid_isodate,
    )
    cli.add_argument(
        '-e',
        dest='end_date',
        help='data retrieved until END_DATE [YYYY-MM-DD] at 00:00 (default: today)',
        default=date.today(),
        required=False,
        type=valid_isodate,
    )
    cli.add_argument(
        '-r',
        dest='resolution',
        help='time resolution of the accounting (default: day)',
        choices=['year', 'quarter', 'month', 'week', 'day'],
        default='day',
        required=False,
    )
    cli.add_argument(
        '-f',
        dest='report_format',
        help='format of the report document (default: SVG)',
        choices=['html', 'pdf', 'png', 'svg'],
        default='svg',
        required=False,
    )
    cli.add_argument(
        '-t', dest='csv', help='write report data table in a CSV file', required=False, action='store_true',
    )
    cli.add_argument(
        '-o',
        dest='output_dir',
        help='path to store output files (default: print working directory)',
        default=None,
        required=False,
        type=valid_dirpath,
    )
    cli.add_argument(
        '-u',
        dest="compute_units",
        help='compute time units (default: corehours)',
        choices=['corehours', 'coredays'],
        default='corehours',
        required=False,
    )
    cli.add_argument(
        '-n',
        dest='node_groups',
        help='node groups to include in the accounting report',
        choices=[*nodegroups],
        nargs='*',
        default=nodegroups_default,
        required=False,
    )
    cli.add_argument(
        'reports',
        help='accounting reports to generate',
        choices=[
            'compute-time',
            'compute-percent',
            'running-jobs',
            'unique-users',
            'peruser-compute',
            'peruser-percent',
            'peruser-jobs',
            'perfield-compute',
            'perfield-percent',
            'perfield-jobs',
            'persite-compute',
            'persite-percent',
            'persite-jobs',
            'top-users',
            'top-users-percent',
            'top-fields',
            'top-fields-percent',
            'top-sites',
            'top-sites-percent',
        ],
        nargs='+',
    )

    # Read command line arguments
    cli_args = cli.parse_args()

    # Set absolute path of output directory
    if cli_args.output_dir:
        basedir = os.path.abspath(os.path.expanduser(cli_args.output_dir))
    else:
        basedir = os.getcwd()
    logger.debug("Output directory set to: %s", basedir)

    # Convert time resolution to pandas DateOffset format
    pd_date_offsets = {'day': 'D', 'week': 'W-MON', 'month': 'MS', 'quarter': 'QS', 'year': 'AS'}
    date_offset = pd_date_offsets[cli_args.resolution]

    # Selection of node groups
    nodegroup_list = list(set(cli_args.node_groups))  # go through a set to remove duplicates

    # Account compute time on each node group in the requested period
    ComputeTime = ComputeTimeCount(
        cli_args.start_date, cli_args.end_date, date_offset, compute_units=cli_args.compute_units
    )

    for ng in nodegroup_list:
        logger.info("Processing jobs on %s nodes...", ng)
        ComputeTime.add_nodegroup(ng, nodegroups[ng]['cores'], nodegroups[ng]['hosts'])

    # Colors of each nodegroup
    plot_colors = {ng: nodegroups[ng]['color'] for ng in nodegroup_list}

    # Generate requested accounting reports
    report_save = [basedir, cli_args.report_format, cli_args.csv]
    report_generators = {
        'compute-time': (report.compute_time, [ComputeTime, plot_colors] + report_save),
        'compute-percent': (report.compute_percent, [ComputeTime, plot_colors] + report_save),
        'running-jobs': (report.global_measure, [ComputeTime, 'Running Jobs', plot_colors] + report_save),
        'unique-users': (report.global_measure, [ComputeTime, 'Unique Users', plot_colors] + report_save),
        'peruser-compute': (report.aggregates, [ComputeTime, 'User', 'Compute', False, plot_colors] + report_save),
        'peruser-percent': (report.aggregates, [ComputeTime, 'User', 'Compute', True, plot_colors] + report_save),
        'peruser-jobs': (report.aggregates, [ComputeTime, 'User', 'Jobs', False, plot_colors] + report_save),
        'perfield-compute': (report.aggregates, [ComputeTime, 'Field', 'Compute', False, plot_colors] + report_save),
        'perfield-percent': (report.aggregates, [ComputeTime, 'Field', 'Compute', True, plot_colors] + report_save),
        'perfield-jobs': (report.aggregates, [ComputeTime, 'Field', 'Jobs', False, plot_colors] + report_save),
        'persite-compute': (report.aggregates, [ComputeTime, 'Site', 'Compute', False, plot_colors] + report_save),
        'persite-percent': (report.aggregates, [ComputeTime, 'Site', 'Compute', True, plot_colors] + report_save),
        'persite-jobs': (report.aggregates, [ComputeTime, 'Site', 'Jobs', False, plot_colors] + report_save),
        'top-users': (report.top_users, [ComputeTime, False] + report_save),
        'top-users-percent': (report.top_users, [ComputeTime, True] + report_save),
        'top-fields': (report.top_fields, [ComputeTime, False] + report_save),
        'top-fields-percent': (report.top_fields, [ComputeTime, True] + report_save),
        'top-sites': (report.top_sites, [ComputeTime, False] + report_save),
        'top-sites-percent': (report.top_sites, [ComputeTime, True] + report_save),
    }

    for requested_report in cli_args.reports:
        report_generators[requested_report][0](*report_generators[requested_report][1])
コード例 #10
0
     help="Choose the branch to link to (default easybuild-easyconfigs).")
parser.add_option("-p", "--path", action="store", dest="path",
     help="Specify a path inside the repo (default easybuild/easyconfigs).")
parser.add_option("-l", "--local", action="store_true", dest="local",
     help="Use a local path, not on github.com (Default false)")

options, args = parser.parse_args()

# get and configure logger
log = fancylogger.getLogger(__name__)
if options.verbose == 1:
    fancylogger.setLogLevelWarning()
elif options.verbose == 2:
    fancylogger.setLogLevelInfo()
elif options.verbose >= 3:
    fancylogger.setLogLevelDebug()

if options.quiet:
    fancylogger.logToScreen(False)
else:
    fancylogger.logToScreen(True)

# other options
if not options.branch:
    options.branch = "develop"
if not options.username:
    options.username = "******"
if not options.repo:
    options.repo = "easybuild-easyconfigs"
if not options.path:
    options.path = "easybuild/easyconfigs"
コード例 #11
0
            "setpriority: prio not in allowed range MIN %s MAX %s" %
            (PRIO_MIN, PRIO_MAX))

    ec = _libc.setpriority(priority_which_t(which), id_t(who),
                           ctypes.c_int(prio))
    if ec == 0:
        _logger.debug("setpriority for which %s who %s prio %s" %
                      (which, who, prio))
    else:
        _logger.error("setpriority failed for which %s who %s prio %s" %
                      (which, who, prio))


if __name__ == '__main__':
    # some examples of usage
    setLogLevelDebug()

    cs = cpu_set_t()
    print "__bits", cs.__bits
    print "sizeof cpu_set_t", ctypes.sizeof(cs)
    x = sched_getaffinity()
    print "x", x
    hr_mask = "1-5,7,9,10-15"
    print hr_mask, x.convert_hr_bits(hr_mask)
    print x
    x.set_bits()
    print x

    sched_setaffinity(x)
    print sched_getaffinity()
コード例 #12
0
ファイル: dshowq.py プロジェクト: hpcugent/master-scripts
def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        "nagios": ("print out nagion information", None, "store_true", False, "n"),
        "nagios_check_filename": (
            "filename of where the nagios check data is stored",
            str,
            "store",
            NAGIOS_CHECK_FILENAME,
        ),
        "nagios_check_interval_threshold": (
            "threshold of nagios checks timing out",
            None,
            "store",
            NAGIOS_CHECK_INTERVAL_THRESHOLD,
        ),
        "hosts": ("the hosts/clusters that should be contacted for job information", None, "extend", []),
        "information": ("the sort of information to store: user, vo, project", None, "store", "user"),
        "location": ("the location for storing the pickle file: gengar, muk", str, "store", "gengar"),
        "ha": ("high-availability master IP address", None, "store", None),
        "dry-run": ("do not make any updates whatsoever", None, "store_true", False),
    }

    opts = simple_option(options)

    if opts.options.debug:
        fancylogger.setLogLevelDebug()

    nagios_reporter = NagiosReporter(NAGIOS_HEADER, NAGIOS_CHECK_FILENAME, NAGIOS_CHECK_INTERVAL_THRESHOLD)
    if opts.options.nagios:
        logger.debug("Producing Nagios report and exiting.")
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter.cache(NAGIOS_EXIT_WARNING, NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    lockfile = TimestampedPidLockfile(DSHOWQ_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    logger.info("starting dshowq run")

    clusters = {}
    for host in opts.options.hosts:
        master = opts.configfile_parser.get(host, "master")
        showq_path = opts.configfile_parser.get(host, "showq_path")
        clusters[host] = {"master": master, "path": showq_path}

    showq = Showq(clusters, cache_pickle=True, dry_run=opts.options.dry_run)

    (queue_information, reported_hosts, failed_hosts) = showq.get_moab_command_information()
    timeinfo = time.time()

    active_users = queue_information.keys()

    logger.debug("Active users: %s" % (active_users))
    logger.debug("Queue information: %s" % (queue_information))

    # We need to determine which users should get an updated pickle. This depends on
    # - the active user set
    # - the information we want to provide on the cluster(set) where this script runs
    # At the same time, we need to determine the job information each user gets to see
    (target_users, target_queue_information, user_map) = determine_target_information(
        opts.options.information, active_users, queue_information
    )

    nagios_user_count = 0
    nagios_no_store = 0

    LdapQuery(VscConfiguration())

    for user in target_users:
        if not opts.options.dry_run:
            try:
                (path, store) = get_pickle_path(opts.options.location, user)
                user_queue_information = target_queue_information[user]
                user_queue_information["timeinfo"] = timeinfo
                store(user, path, (user_queue_information, user_map[user]))
                nagios_user_count += 1
            except (UserStorageError, FileStoreError, FileMoveError), err:
                logger.error("Could not store pickle file for user %s" % (user))
                nagios_no_store += 1
        else:
            logger.info(
                "Dry run, not actually storing data for user %s at path %s"
                % (user, get_pickle_path(opts.options.location, user)[0])
            )
            logger.debug("Dry run, queue information for user %s is %s" % (user, target_queue_information[user]))
コード例 #13
0
ファイル: dcheckjob.py プロジェクト: hpcugent/master-scripts
def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        "nagios": ("print out nagios information", None, "store_true", False, "n"),
        "nagios_check_filename": (
            "filename of where the nagios check data is stored",
            str,
            "store",
            NAGIOS_CHECK_FILENAME,
        ),
        "nagios_check_interval_threshold": (
            "threshold of nagios checks timing out",
            None,
            "store",
            NAGIOS_CHECK_INTERVAL_THRESHOLD,
        ),
        "hosts": ("the hosts/clusters that should be contacted for job information", None, "extend", []),
        "location": ("the location for storing the pickle file: home, scratch", str, "store", "home"),
        "ha": ("high-availability master IP address", None, "store", None),
        "dry-run": ("do not make any updates whatsoever", None, "store_true", False),
    }

    opts = simple_option(options)

    if opts.options.debug:
        fancylogger.setLogLevelDebug()

    nagios_reporter = NagiosReporter(
        NAGIOS_HEADER, opts.options.nagios_check_filename, opts.options.nagios_check_interval_threshold
    )
    if opts.options.nagios:
        logger.debug("Producing Nagios report and exiting.")
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter.cache(NAGIOS_EXIT_WARNING, NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    lockfile = TimestampedPidLockfile(DCHECKJOB_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    logger.info("Starting dcheckjob")

    LdapQuery(VscConfiguration())

    clusters = {}
    for host in opts.options.hosts:
        master = opts.configfile_parser.get(host, "master")
        checkjob_path = opts.configfile_parser.get(host, "checkjob_path")
        clusters[host] = {"master": master, "path": checkjob_path}

    checkjob = Checkjob(clusters, cache_pickle=True, dry_run=True)

    (job_information, reported_hosts, failed_hosts) = checkjob.get_moab_command_information()
    timeinfo = time.time()

    active_users = job_information.keys()

    logger.debug("Active users: %s" % (active_users))
    logger.debug("Checkjob information: %s" % (job_information))

    nagios_user_count = 0
    nagios_no_store = 0

    for user in active_users:
        if not opts.options.dry_run:
            try:
                (path, store) = get_pickle_path(opts.options.location, user)
                user_queue_information = CheckjobInfo({user: job_information[user]})
                store(user, path, (timeinfo, user_queue_information))
                nagios_user_count += 1
            except (UserStorageError, FileStoreError, FileMoveError), _:
                logger.error("Could not store pickle file for user %s" % (user))
                nagios_no_store += 1
        else:
            logger.info(
                "Dry run, not actually storing data for user %s at path %s"
                % (user, get_pickle_path(opts.options.location, user)[0])
            )
            logger.debug("Dry run, queue information for user %s is %s" % (user, job_information[user]))