コード例 #1
0
 def test_load_classads(self):
     self.new_ads_verify(classad.parseAds(open("tests/test_multiple.ad")))
     self.new_ads_verify(
         classad.parseAds(open("tests/test_multiple.ad").read()))
     self.old_ads_verify(
         classad.parseOldAds(open("tests/test_multiple.old.ad")))
     self.old_ads_verify(
         classad.parseOldAds(open("tests/test_multiple.old.ad").read()))
コード例 #2
0
ファイル: classad_tests.py プロジェクト: blueskyll/condor
 def test_load_classads(self):
     self.new_ads_verify(classad.parseAds(open("tests/test_multiple.ad")))
     self.new_ads_verify(classad.parseAds(open("tests/test_multiple.ad").read()))
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         self.old_ads_verify(classad.parseOldAds(open("tests/test_multiple.old.ad")))
         self.old_ads_verify(classad.parseOldAds(open("tests/test_multiple.old.ad").read()))
     self.old_ads_verify(classad.parseAds(open("tests/test_multiple.old.ad")))
     self.old_ads_verify(classad.parseAds(open("tests/test_multiple.old.ad").read()))
コード例 #3
0
 def test_load_classads(self):
     self.new_ads_verify(classad.parseAds(open("tests/test_multiple.ad")))
     self.new_ads_verify(
         classad.parseAds(open("tests/test_multiple.ad").read()))
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         self.old_ads_verify(
             classad.parseOldAds(open("tests/test_multiple.old.ad")))
         self.old_ads_verify(
             classad.parseOldAds(open("tests/test_multiple.old.ad").read()))
     self.old_ads_verify(
         classad.parseAds(open("tests/test_multiple.old.ad")))
     self.old_ads_verify(
         classad.parseAds(open("tests/test_multiple.old.ad").read()))
コード例 #4
0
def get_provider_ad(provider, key_path):
    '''
    Returns the ClassAd for a given OAuth provider given
    the provider name and path to file with ClassAds.
    '''

    if not os.path.exists(key_path):
        raise Exception("Key file {0} doesn't exist".format(key_path))

    try:
        with open(key_path, 'r') as key_file:
            for ad in classad.parseAds(key_file):
                ad_provider = get_provider_str(ad['Provider'],
                                               ad.get('Handle', ''))
                if ad_provider == provider:
                    break
            else:
                raise Exception("Provider {0} not in key file {1}".format(
                    provider, key_path))
    except IOError as ie:
        sys.stderr.write("Failed to open key file {0}: {1}\n".format(
            key_path, str(ie)))
        raise

    return ad
コード例 #5
0
def adjustMaxRetries(adjustJobIds, ad):
    """
    Edit the DAG file adjusting the maximum allowed number of retries to the current
    retry + CRAB_NumAutomJobRetries for the jobs specified in the jobIds argument
    (or for all jobs if jobIds = True). Incrementing the maximum allowed number of
    retries is a necessary condition for a job to be resubmitted.
    """
    printLog("Adjusting retries for job ids: {0}".format(adjustJobIds))
    if not adjustJobIds:
        return
    if not os.path.exists("RunJobs.dag"):
        return
    ## Get the latest retry count of each DAG node from the node status file.
    retriesDict = {}
    filenames = getGlob(ad, "node_state", "node_state.[1-9]*")
    for fn in filenames:
        with open(fn, 'r') as fd:
            for nodeStatusAd in classad.parseAds(fd):
                if nodeStatusAd['Type'] != "NodeStatus":
                    continue
                node = nodeStatusAd.get('Node', '')
                if (not node.startswith("Job")) or node.endswith("SubJobs"):
                    continue
                jobId = node[3:]
                retriesDict[jobId] = int(nodeStatusAd.get('RetryCount', -1))
    ## Search for the RETRY directives in the DAG file for the job ids passed in the
    ## resubmitJobIds argument and change the maximum retries to the current retry
    ## count + CRAB_NumAutomJobRetries.
    retry_re = re.compile(r'RETRY Job(\d+(?:-\d+)?) (\d+) ')
    adjustAll = (adjustJobIds is True)
    numAutomJobRetries = int(ad.get('CRAB_NumAutomJobRetries', 2))
    filenames = getGlob(ad, "RunJobs.dag", "RunJobs[1-9]*.subdag")
    for fn in filenames:
        output = ""
        with open(fn, 'r') as fd:
            for line in fd.readlines():
                match_retry_re = retry_re.search(line)
                if match_retry_re:
                    jobId = match_retry_re.groups()[0]
                    if adjustAll or (jobId in adjustJobIds):
                        if jobId in retriesDict and retriesDict[jobId] != -1:
                            lastRetry = retriesDict[jobId]
                            ## The 1 is to account for the resubmission itself; then, if the job fails, we
                            ## allow up to numAutomJobRetries automatic retries from DAGMan.
                            maxRetries = lastRetry + (1 + numAutomJobRetries)
                        else:
                            try:
                                maxRetries = int(
                                    match_retry_re.groups()[1]) + (
                                        1 + numAutomJobRetries)
                            except ValueError:
                                maxRetries = numAutomJobRetries
                        printLog("Adjusted maxRetries for {0} to {1}".format(
                            jobId, maxRetries))
                        line = retry_re.sub(
                            r'RETRY Job%s %d ' % (jobId, maxRetries), line)
                output += line
        with open(fn, 'w') as fd:
            fd.write(output)
コード例 #6
0
ファイル: AdjustSites.py プロジェクト: belforte/CRABServer
def adjustMaxRetries(adjustJobIds, ad):
    """
    Edit the DAG file adjusting the maximum allowed number of retries to the current
    retry + CRAB_NumAutomJobRetries for the jobs specified in the jobIds argument
    (or for all jobs if jobIds = True). Incrementing the maximum allowed number of
    retries is a necessary condition for a job to be resubmitted.
    """
    printLog("Adjusting retries for job ids: {0}".format(adjustJobIds))
    if not adjustJobIds:
        return
    if not os.path.exists("RunJobs.dag"):
        return
    ## Get the latest retry count of each DAG node from the node status file.
    retriesDict = {}
    filenames = getGlob(ad, "node_state", "node_state.[1-9]*")
    for fn in filenames:
        with open(fn, 'r') as fd:
            for nodeStatusAd in classad.parseAds(fd):
                if nodeStatusAd['Type'] != "NodeStatus":
                    continue
                node = nodeStatusAd.get('Node', '')
                if (not node.startswith("Job")) or node.endswith("SubJobs"):
                    continue
                jobId = node[3:]
                retriesDict[jobId] = int(nodeStatusAd.get('RetryCount', -1))
    ## Search for the RETRY directives in the DAG file for the job ids passed in the
    ## resubmitJobIds argument and change the maximum retries to the current retry
    ## count + CRAB_NumAutomJobRetries.
    retry_re = re.compile(r'RETRY Job(\d+(?:-\d+)?) (\d+) ')
    adjustAll = (adjustJobIds is True)
    numAutomJobRetries = int(ad.get('CRAB_NumAutomJobRetries', 2))
    filenames = getGlob(ad, "RunJobs.dag", "RunJobs[1-9]*.subdag")
    for fn in filenames:
        output = ""
        with open(fn, 'r') as fd:
            for line in fd.readlines():
                match_retry_re = retry_re.search(line)
                if match_retry_re:
                    jobId = match_retry_re.groups()[0]
                    if adjustAll or (jobId in adjustJobIds):
                        if jobId in retriesDict and retriesDict[jobId] != -1:
                            lastRetry = retriesDict[jobId]
                            ## The 1 is to account for the resubmission itself; then, if the job fails, we
                            ## allow up to numAutomJobRetries automatic retries from DAGMan.
                            maxRetries = lastRetry + (1 + numAutomJobRetries)
                        else:
                            try:
                                maxRetries = int(match_retry_re.groups()[1]) + (1 + numAutomJobRetries)
                            except ValueError:
                                maxRetries = numAutomJobRetries
                        printLog("Adjusted maxRetries for {0} to {1}".format(jobId, maxRetries))
                        line = retry_re.sub(r'RETRY Job%s %d ' % (jobId, maxRetries), line)
                output += line
        with open(fn, 'w') as fd:
            fd.write(output)
コード例 #7
0
 def parseNodeStateV2(cls, fp, nodes):
     """
     HTCondor 8.1.6 updated the node state file to be classad-based.
     This is a more flexible format that allows future extensions but, unfortunately,
     also requires a separate parser.
     """
     taskStatus = nodes.setdefault("DagStatus", {})
     for ad in classad.parseAds(fp):
         if ad['Type'] == "DagStatus":
             taskStatus['Timestamp'] = ad.get('Timestamp', -1)
             taskStatus['NodesTotal'] = ad.get('NodesTotal', -1)
             taskStatus['DagStatus'] = ad.get('DagStatus', -1)
             continue
         if ad['Type'] != "NodeStatus":
             continue
         node = ad.get("Node", "")
         if not node.startswith("Job"):
             continue
         nodeid = node[3:]
         status = ad.get('NodeStatus', -1)
         retry = ad.get('RetryCount', -1)
         msg = ad.get("StatusDetails", "")
         if status == 1:  # STATUS_READY
             info = nodes.setdefault(nodeid, {})
             if info.get("State") == "transferring":
                 info["State"] = "cooloff"
             elif info.get('State') != "cooloff":
                 info['State'] = 'unsubmitted'
         elif status == 2:  # STATUS_PRERUN
             info = nodes.setdefault(nodeid, {})
             if retry == 0:
                 info['State'] = 'unsubmitted'
             else:
                 info['State'] = 'cooloff'
         elif status == 3:  # STATUS_SUBMITTED
             info = nodes.setdefault(nodeid, {})
             if msg == 'not_idle':
                 info.setdefault('State', 'running')
             else:
                 info.setdefault('State', 'idle')
         elif status == 4:  # STATUS_POSTRUN
             info = nodes.setdefault(nodeid, {})
             if info.get("State") != "cooloff":
                 info['State'] = 'transferring'
         elif status == 5:  # STATUS_DONE
             info = nodes.setdefault(nodeid, {})
             info['State'] = 'finished'
         elif status == 6:  # STATUS_ERROR
             info = nodes.setdefault(nodeid, {})
             # Older versions of HTCondor would put jobs into STATUS_ERROR
             # for a short time if the job was to be retried.  Hence, we had
             # some status parsing logic to try and guess whether the job would
             # be tried again in the near future.  This behavior is no longer
             # observed; STATUS_ERROR is terminal.
             info['State'] = 'failed'
コード例 #8
0
 def parseNodeStateV2(cls, fp, nodes):
     """
     HTCondor 8.1.6 updated the node state file to be classad-based.
     This is a more flexible format that allows future extensions but, unfortunately,
     also requires a separate parser.
     """
     taskStatus = nodes.setdefault("DagStatus", {})
     for ad in classad.parseAds(fp):
         if ad['Type'] == "DagStatus":
             taskStatus['Timestamp'] = ad.get('Timestamp', -1)
             taskStatus['NodesTotal'] = ad.get('NodesTotal', -1)
             taskStatus['DagStatus'] = ad.get('DagStatus', -1)
             continue
         if ad['Type'] != "NodeStatus":
             continue
         node = ad.get("Node", "")
         if not node.startswith("Job"):
             continue
         nodeid = node[3:]
         status = ad.get('NodeStatus', -1)
         retry = ad.get('RetryCount', -1)
         msg = ad.get("StatusDetails", "")
         if status == 1: # STATUS_READY
             info = nodes.setdefault(nodeid, {})
             if info.get("State") == "transferring":
                 info["State"] = "cooloff"
             elif info.get('State') != "cooloff":
                 info['State'] = 'unsubmitted'
         elif status == 2: # STATUS_PRERUN
             info = nodes.setdefault(nodeid, {})
             if retry == 0:
                 info['State'] = 'unsubmitted'
             else:
                 info['State'] = 'cooloff'
         elif status == 3: # STATUS_SUBMITTED
             info = nodes.setdefault(nodeid, {})
             if msg == 'not_idle':
                 info.setdefault('State', 'running')
             else:
                 info.setdefault('State', 'idle')
         elif status == 4: # STATUS_POSTRUN
             info = nodes.setdefault(nodeid, {})
             if info.get("State") != "cooloff":
                 info['State'] = 'transferring'
         elif status == 5: # STATUS_DONE
             info = nodes.setdefault(nodeid, {})
             info['State'] = 'finished'
         elif status == 6: # STATUS_ERROR
             info = nodes.setdefault(nodeid, {})
             # Older versions of HTCondor would put jobs into STATUS_ERROR
             # for a short time if the job was to be retried.  Hence, we had
             # some status parsing logic to try and guess whether the job would
             # be tried again in the near future.  This behavior is no longer
             # observed; STATUS_ERROR is terminal.
             info['State'] = 'failed'
コード例 #9
0
ファイル: cache_status.py プロジェクト: emaszs/CRABServer
def parseNodeStateV2(fp, nodes):
    """
    HTCondor 8.1.6 updated the node state file to be classad-based.
    This is a more flexible format that allows future extensions but, unfortunately,
    also requires a separate parser.
    """
    taskStatus = nodes.setdefault("DagStatus", {})
    for ad in classad.parseAds(fp):
        if ad["Type"] == "DagStatus":
            taskStatus["Timestamp"] = ad.get("Timestamp", -1)
            taskStatus["NodesTotal"] = ad.get("NodesTotal", -1)
            taskStatus["DagStatus"] = ad.get("DagStatus", -1)
            continue
        if ad["Type"] != "NodeStatus":
            continue
        node = ad.get("Node", "")
        if not node.startswith("Job") or node.endswith("SubJobs"):
            continue
        nodeid = node[3:]
        status = ad.get("NodeStatus", -1)
        retry = ad.get("RetryCount", -1)
        msg = ad.get("StatusDetails", "")
        info = nodes.setdefault(nodeid, NODE_DEFAULTS)
        if status == 1:  # STATUS_READY
            if info.get("State") == "transferring":
                info["State"] = "cooloff"
            elif info.get("State") != "cooloff":
                info["State"] = "unsubmitted"
        elif status == 2:  # STATUS_PRERUN
            if retry == 0:
                info["State"] = "unsubmitted"
            else:
                info["State"] = "cooloff"
        elif status == 3:  # STATUS_SUBMITTED
            if msg == "not_idle":
                info.setdefault("State", "running")
            else:
                info.setdefault("State", "idle")
        elif status == 4:  # STATUS_POSTRUN
            if info.get("State") != "cooloff":
                info["State"] = "transferring"
        elif status == 5:  # STATUS_DONE
            info["State"] = "finished"
        elif status == 6:  # STATUS_ERROR
            # Older versions of HTCondor would put jobs into STATUS_ERROR
            # for a short time if the job was to be retried.  Hence, we had
            # some status parsing logic to try and guess whether the job would
            # be tried again in the near future.  This behavior is no longer
            # observed; STATUS_ERROR is terminal.
            if info["State"] != "killed":
                info["State"] = "failed"
コード例 #10
0
ファイル: AdjustSites.py プロジェクト: dciangot/CRABServer
def adjustMaxRetries(adjustJobIds, ad):
    """
    Edit the DAG file adjusting the maximum allowed number of retries to the current
    retry + CRAB_NumAutomJobRetries for the jobs specified in the jobIds argument
    (or for all jobs if jobIds = True). Incrementing the maximum allowed number of
    retries is a necessary condition for a job to be resubmitted.
    """
    if not adjustJobIds:
        return
    if not os.path.exists("RunJobs.dag"):
        return
    ## Get the latest retry count of each DAG node from the node status file.
    retriesDict = {}
    if os.path.exists("node_state"):
        with open("node_state", "r") as fd:
            for nodeStatusAd in classad.parseAds(fd):
                if nodeStatusAd["Type"] != "NodeStatus":
                    continue
                node = nodeStatusAd.get("Node", "")
                if not node.startswith("Job"):
                    continue
                jobId = node[3:]
                retriesDict[jobId] = int(nodeStatusAd.get("RetryCount", -1))
    ## Search for the RETRY directives in the DAG file for the job ids passed in the
    ## resubmitJobIds argument and change the maximum retries to the current retry
    ## count + CRAB_NumAutomJobRetries.
    retry_re = re.compile(r"RETRY Job([0-9]+) ([0-9]+) ")
    output = ""
    adjustAll = adjustJobIds == True
    numAutomJobRetries = int(ad.get("CRAB_NumAutomJobRetries", 2))
    with open("RunJobs.dag", "r") as fd:
        for line in fd.readlines():
            match_retry_re = retry_re.search(line)
            if match_retry_re:
                jobId = match_retry_re.groups()[0]
                if adjustAll or (jobId in adjustJobIds):
                    if jobId in retriesDict and retriesDict[jobId] != -1:
                        lastRetry = retriesDict[jobId]
                        ## The 1 is to account for the resubmission itself; then, if the job fails, we
                        ## allow up to numAutomJobRetries automatic retries from DAGMan.
                        maxRetries = lastRetry + (1 + numAutomJobRetries)
                    else:
                        try:
                            maxRetries = int(match_retry_re.groups()[1]) + (1 + numAutomJobRetries)
                        except ValueError:
                            maxRetries = numAutomJobRetries
                    line = retry_re.sub(r"RETRY Job%s %d " % (jobId, maxRetries), line)
            output += line
    with open("RunJobs.dag", "w") as fd:
        fd.write(output)
コード例 #11
0
 def test_parse_iter(self):
     tf = tempfile.TemporaryFile()
     tf.write("[foo = 1] [bar = 2]")
     tf.seek(0)
     ad_iter = classad.parseAds(tf)
     ad = ad_iter.next()
     self.assertEqual(len(ad), 1)
     self.assertEqual(ad["foo"], 1)
     self.assertEquals(" [bar = 2]", tf.read())
     tf = tempfile.TemporaryFile()
     tf.write("-----\nfoo = 1\n\nbar = 2\n")
     tf.seek(0)
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         ad_iter = classad.parseOldAds(tf)
     ad = ad_iter.next()
     self.assertEqual(len(ad), 1)
     self.assertEqual(ad["foo"], 1)
     self.assertEquals("bar = 2\n", tf.read())
コード例 #12
0
ファイル: classad_tests.py プロジェクト: blueskyll/condor
 def test_parse_iter(self):
     tf = tempfile.TemporaryFile()
     tf.write("[foo = 1] [bar = 2]")
     tf.seek(0)
     ad_iter = classad.parseAds(tf)
     ad = ad_iter.next()
     self.assertEqual(len(ad), 1)
     self.assertEqual(ad["foo"], 1)
     self.assertEquals(" [bar = 2]", tf.read())
     tf = tempfile.TemporaryFile()
     tf.write("-----\nfoo = 1\n\nbar = 2\n")
     tf.seek(0)
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         ad_iter = classad.parseOldAds(tf)
     ad = ad_iter.next()
     self.assertEqual(len(ad), 1)
     self.assertEqual(ad["foo"], 1)
     self.assertEquals("bar = 2\n", tf.read())
コード例 #13
0
def key(key):
    # get the key from URL
    if not re.match("[0-9a-fA-F]+", key):
        raise Exception("Key id {0} is not a valid key".format(key))

    # check for the key file in the credmon directory
    cred_dir = get_cred_dir()
    key_path = os.path.join(cred_dir, key)
    if not os.path.exists(key_path):
        raise Exception("Key file {0} doesn't exist".format(key_path))

    # read in the key file, which is a list of classads
    providers = {}
    session['logged_in'] = False
    print('Creating new session from {0}'.format(key_path))
    with open(key_path, 'r') as key_file:

        # store the path to the key file since it will be accessed later in the session
        session['key_path'] = key_path

        # initialize data for each token provider
        for provider_ad in classad.parseAds(key_file):
            provider = get_provider_str(provider_ad['Provider'],
                                        provider_ad.get('Handle', ''))
            providers[provider] = {}
            providers[provider]['logged_in'] = False
            if 'Scopes' in provider_ad:
                providers[provider]['requested_scopes'] = [
                    s.rstrip().lstrip()
                    for s in provider_ad['Scopes'].split(',')
                ]
            if 'Audience' in provider_ad:
                providers[provider]['requested_resource'] = provider_ad[
                    'Audience']

        # the local username is global to the session, just grab it from the last classad
        session['local_username'] = provider_ad['LocalUser']

    session['providers'] = providers
    print('New session started for user {0}'.format(session['local_username']))
    return render_template('index.html')
コード例 #14
0
 def test_parse_iter(self):
     tf = tempfile.TemporaryFile()
     tf.write(b"[foo = 1] [bar = 2]")
     tf.seek(0)
     if sys.version_info > (3, ):
         tf, tf_ = open(tf.fileno()), tf
     ad_iter = classad.parseAds(tf)
     ad = next(ad_iter)
     self.assertEqual(len(ad), 1)
     self.assertEqual(ad["foo"], 1)
     self.assertEqual(" [bar = 2]", tf.read())
     tf = tempfile.TemporaryFile()
     tf.write(b"-----\nfoo = 1\n\nbar = 2\n")
     tf.seek(0)
     if sys.version_info > (3, ):
         tf, tf_ = open(tf.fileno()), tf
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         ad_iter = classad.parseOldAds(tf)
     ad = next(ad_iter)
     self.assertEqual(len(ad), 1)
     self.assertEqual(ad["foo"], 1)
     self.assertEqual("bar = 2\n", tf.read())
コード例 #15
0

if __name__ == '__main__':

    # Start by parsing input arguments
    try:
        args = parse_args()
    except Exception:
        sys.exit(-1)

    gluster = GlusterPlugin()

    # Parse in the classads stored in the input file.
    # Each ad represents a single file to be transferred.
    try:
        infile_ads = classad.parseAds(open(args['infile'], 'r'))
    except Exception as err:
        try:
            with open(args['outfile'], 'w') as outfile:
                outfile_dict = get_error_dict(err)
                outfile.write(str(classad.ClassAd(outfile_dict)))
        except Exception:
            pass
        sys.exit(-1)

    # Now iterate over the list of classads and perform the transfers.
    try:
        with open(args['outfile'], 'w') as outfile:
            for ad in infile_ads:
                try:
                    if not args['upload']:
コード例 #16
0
def test_parse_ads_from_string(ad_string):
    ads = list(classad.parseAds(ad_string))

    assert ads[0]["foo"] == "bar"
    assert ads[1]["foo"] == "wiz"
コード例 #17
0
def test_parse_ads_from_file_like_object(ad_file):
    ads = list(classad.parseAds(ad_file.open(mode="r")))

    assert ads[0]["foo"] == "bar"
    assert ads[1]["foo"] == "wiz"
コード例 #18
0
def main():
    """Main function
    """
    is_osg = htcondor.param.get('OSG_CONFIGURE_PRESENT',
                                '').lower() in ('true', 'yes', '1')

    # Create dict whose values are lists of ads specified in the relevant JOB_ROUTER_* variables
    parsed_jr_ads = {}
    for attr in ['JOB_ROUTER_DEFAULTS', 'JOB_ROUTER_ENTRIES']:
        try:
            config_val = htcondor.param[attr]
        except KeyError:
            error("Missing required %s configuration value" % attr)

        # store the ads (iterating through ClassAdStringIterator consumes them)
        try:
            parsed_jr_ads[attr] = list(classad.parseAds(config_val))
        except ValueError:
            # We shouldn't ever get here since classad.parseAds() only raises ValueError when it's given
            # non-string/non-file output and htcondor.param shouldn't contain such values
            error("Failed to parse %s configuration value" % attr)

        # If JRD or JRE can't be parsed, the job router can't function
        if not parsed_jr_ads[attr]:
            error("Could not read %s in the HTCondor-CE configuration." % attr)

        if attr == "JOB_ROUTER_ENTRIES":
            # Warn about routes we can find in the config that don't result in valid ads
            malformed_entry_names = find_malformed_entries(config_val)
            if malformed_entry_names:
                warn(
                    "Could not read JOB_ROUTER_ENTRIES in the HTCondor-CE configuration. "
                    + "Failed to parse the following routes: %s" %
                    ', '.join(malformed_entry_names))

            # Warn about routes specified by JOB_ROUTER_ROUTE_NAMES that don't appear in the parsed JRE.
            # The job router can function this way but it's likely a config error
            route_order = htcondor.param.get('JOB_ROUTER_ROUTE_NAMES', '')
            if route_order:
                missing_route_def = set(route_order).difference(
                    set(parse_route_names(config_val)))
                if missing_route_def:
                    warn(
                        "The following are specified in JOB_ROUTER_ROUTE_NAMES "
                        "but cannot be found in JOB_ROUTER_ENTRIES: %s" %
                        ', '.join(missing_route_def))

    # Find all eval_set_ attributes in the JOB_ROUTER_DEFAULTS
    eval_set_defaults = set([
        x.lstrip('eval_')
        for x in parsed_jr_ads['JOB_ROUTER_DEFAULTS'][0].keys()
        if x.startswith('eval_set_')
    ])

    # Find all default_ attributes used in expressions in the JOB_ROUTER_DEFAULTS
    default_attr = set([
        re.sub(r'.*(default_\w*).*', 'eval_set_\\1', str(x))
        for x in parsed_jr_ads['JOB_ROUTER_DEFAULTS'][0].values()
        if isinstance(x, classad.ExprTree) and "default_" in str(x)
    ])

    for entry in parsed_jr_ads['JOB_ROUTER_ENTRIES']:
        # Warn users if they've set_ attributes that would be overriden by eval_set in the JOB_ROUTER_DEFAULTS
        overriden_attr = eval_set_defaults.intersection(set(entry.keys()))
        if overriden_attr:
            warn(
                "%s in JOB_ROUTER_ENTRIES will be overriden by the JOB_ROUTER_DEFAULTS."
                % ', '.join(overriden_attr) +
                " Use the 'eval_set_' prefix instead.")

        # Ensure that users don't set the job environment in the Job Router
        if is_osg and any(x.endswith('environment') for x in entry.keys()):
            error(
                "Do not use the Job Router to set the environment. Place variables under "
                + "[Local Settings] in /etc/osg/config.d/40-localsettings.ini")

        # Warn users about eval_set_ default attributes in the ENTRIES since their
        # evaluation may occur after the eval_set_ expressions containg them in the
        # JOB_ROUTER_DEFAULTS
        no_effect_attr = default_attr.intersection(
            set([x for x in entry.keys() if x.startswith('eval_set_')]))
        if no_effect_attr:
            warn("%s in JOB_ROUTER_ENTRIES " % ', '.join(no_effect_attr) +
                 "may not have any effect. Use the 'set_' prefix instead.")

    # Warn users on OSG CEs if osg-configure has not been run
    if is_osg:
        try:
            htcondor.param['OSG_CONFIGURED']
        except KeyError:
            warn(
                "osg-configure has not been run, degrading the functionality "
                +
                "of the CE. Please run 'osg-configure -c' and restart condor-ce."
            )

    # Ensure that HTCondor back-ends have QUEUE_SUPER_USER_MAY_IMPERSONATE set correctly
    try:
        htcondor.param['JOB_ROUTER_SCHEDD2_NAME']
    except KeyError:
        pass
    else:
        os.environ['CONDOR_CONFIG'] = '/etc/condor/condor_config'
        htcondor.reload_config()
        su_attr = 'QUEUE_SUPER_USER_MAY_IMPERSONATE'
        if htcondor.param.get(su_attr, '') != '.*':
            error(
                "HTCondor batch system is improperly configured for use with HTCondor CE. "
                +
                "Please verify that '%s = .*' is set in your HTCondor configuration."
                % su_attr)
    finally:
        os.environ['CONDOR_CONFIG'] = '/etc/condor-ce/condor_config'
コード例 #19
0
#!/usr/bin/python

import os
import subprocess

try:
    import classad
    import htcondor
except ImportError:
    sys.exit("ERROR: Could not load HTCondor Python bindings. "
             "Ensure the 'htcondor' and 'classad' are in PYTHONPATH")

jre    = classad.parseAds('JOB_ROUTER_ENTRIES')
grs    = ( x["GridResource"] for x in jre )
rhosts = ( x.split()[1:3]    for x in grs )

for batchtype, rhost in rhosts:
    subprocess.call(['bosco_cluster', '-o', os.getenv("OVERRIDE_DIR"),
                     rhost, batchtype])

コード例 #20
0
ファイル: cache_status_jel.py プロジェクト: ddaina/CRABServer
def parseNodeStateV2(fp, nodes, level):
    """
    HTCondor 8.1.6 updated the node state file to be classad-based.
    This is a more flexible format that allows future extensions but, unfortunately,
    also requires a separate parser.
    """
    dagStatus = nodes.setdefault("DagStatus", {})
    dagStatus.setdefault("SubDagStatus", {})
    subDagStatus = dagStatus.setdefault("SubDags", {})
    for ad in classad.parseAds(fp):
        if ad['Type'] == "DagStatus":
            if level:
                statusDict = subDagStatus.setdefault(int(level), {})
                statusDict['Timestamp'] = ad.get('Timestamp', -1)
                statusDict['NodesTotal'] = ad.get('NodesTotal', -1)
                statusDict['DagStatus'] = ad.get('DagStatus', -1)
            else:
                dagStatus['Timestamp'] = ad.get('Timestamp', -1)
                dagStatus['NodesTotal'] = ad.get('NodesTotal', -1)
                dagStatus['DagStatus'] = ad.get('DagStatus', -1)
            continue
        if ad['Type'] != "NodeStatus":
            continue
        node = ad.get("Node", "")
        if node.endswith("SubJobs"):
            status = ad.get('NodeStatus', -1)
            dagname = "RunJobs{0}.subdag".format(
                nodeName2Re.match(node).group(1))
            # Add special state where we *expect* a submitted DAG for the
            # status command on the client
            if status == 5 and os.path.exists(
                    dagname) and os.stat(dagname).st_size > 0:
                status = 99
            dagStatus["SubDagStatus"][node] = status
            continue
        if not node.startswith("Job"):
            continue
        nodeid = node[3:]
        status = ad.get('NodeStatus', -1)
        retry = ad.get('RetryCount', -1)
        msg = ad.get("StatusDetails", "")
        info = nodes.setdefault(nodeid, copy.deepcopy(NODE_DEFAULTS))
        if status == 1:  # STATUS_READY
            if info.get("State") == "transferring":
                info["State"] = "cooloff"
            elif info.get('State') != "cooloff":
                info['State'] = 'unsubmitted'
        elif status == 2:  # STATUS_PRERUN
            if retry == 0:
                info['State'] = 'unsubmitted'
            else:
                info['State'] = 'cooloff'
        elif status == 3:  # STATUS_SUBMITTED
            if msg == 'not_idle':
                info.setdefault('State', 'running')
            else:
                info.setdefault('State', 'idle')
        elif status == 4:  # STATUS_POSTRUN
            if info.get("State") != "cooloff":
                info['State'] = 'transferring'
        elif status == 5:  # STATUS_DONE
            info['State'] = 'finished'
        elif status == 6:  # STATUS_ERROR
            # Older versions of HTCondor would put jobs into STATUS_ERROR
            # for a short time if the job was to be retried.  Hence, we had
            # some status parsing logic to try and guess whether the job would
            # be tried again in the near future.  This behavior is no longer
            # observed; STATUS_ERROR is terminal.
            info['State'] = 'failed'
コード例 #21
0
 def test_load_classads(self):
     self.new_ads_verify(classad.parseAds(open("tests/test_multiple.ad")))
     self.new_ads_verify(classad.parseAds(open("tests/test_multiple.ad").read()))
     self.old_ads_verify(classad.parseOldAds(open("tests/test_multiple.old.ad")))
     self.old_ads_verify(classad.parseOldAds(open("tests/test_multiple.old.ad").read()))
コード例 #22
0
    import classad
    import htcondor
except ImportError:
    error(
        "Could not load HTCondor Python bindings. "
        "Please ensure that the 'htcondor' and 'classad' are in your PYTHONPATH"
    )

is_osg = htcondor.param.get('OSG_CONFIGURE_PRESENT',
                            '').lower() in ('true', 'yes', '1')

# Create dict whose values are lists of ads specified in the relevant JOB_ROUTER_* variables
JOB_ROUTER_CONFIG = {}
for attr in ['JOB_ROUTER_DEFAULTS', 'JOB_ROUTER_ENTRIES']:
    try:
        ads = classad.parseAds(htcondor.param[attr])
    except KeyError:
        error("Missing required configuration: %s" % attr)
    JOB_ROUTER_CONFIG[attr] = list(
        ads
    )  # store the ads (iterating through ClassAdStringIterator consumes them)

# Verify job routes. classad.parseAds() ignores malformed ads so we have to compare the unparsed string to the
# parsed string, counting the number of ads by proxy: the number of opening square brackets, "["
for attr, ads in JOB_ROUTER_CONFIG.items():
    if htcondor.param[attr].count('[') != len(ads):
        error(
            "Could not read %s in the HTCondor CE configuration. Please verify syntax correctness"
            % attr)

# Find all eval_set_ attributes in the JOB_ROUTER_DEFAULTS
コード例 #23
0
ファイル: cache_status.py プロジェクト: belforte/CRABServer
def parseNodeStateV2(fp, nodes, level):
    """
    HTCondor 8.1.6 updated the node state file to be classad-based.
    This is a more flexible format that allows future extensions but, unfortunately,
    also requires a separate parser.
    """
    dagStatus = nodes.setdefault("DagStatus", {})
    dagStatus.setdefault("SubDagStatus", {})
    subDagStatus = dagStatus.setdefault("SubDags", {})
    for ad in classad.parseAds(fp):
        if ad['Type'] == "DagStatus":
            if level:
                statusDict = subDagStatus.setdefault(int(level), {})
                statusDict['Timestamp'] = ad.get('Timestamp', -1)
                statusDict['NodesTotal'] = ad.get('NodesTotal', -1)
                statusDict['DagStatus'] = ad.get('DagStatus', -1)
            else:
                dagStatus['Timestamp'] = ad.get('Timestamp', -1)
                dagStatus['NodesTotal'] = ad.get('NodesTotal', -1)
                dagStatus['DagStatus'] = ad.get('DagStatus', -1)
            continue
        if ad['Type'] != "NodeStatus":
            continue
        node = ad.get("Node", "")
        if node.endswith("SubJobs"):
            status = ad.get('NodeStatus', -1)
            dagname = "RunJobs{0}.subdag".format(nodeName2Re.match(node).group(1))
            # Add special state where we *expect* a submitted DAG for the
            # status command on the client
            if status == 5 and os.path.exists(dagname) and os.stat(dagname).st_size > 0:
                status = 99
            dagStatus["SubDagStatus"][node] = status
            continue
        if not node.startswith("Job"):
            continue
        nodeid = node[3:]
        status = ad.get('NodeStatus', -1)
        retry = ad.get('RetryCount', -1)
        msg = ad.get("StatusDetails", "")
        info = nodes.setdefault(nodeid, copy.deepcopy(NODE_DEFAULTS))
        if status == 1: # STATUS_READY
            if info.get("State") == "transferring":
                info["State"] = "cooloff"
            elif info.get('State') != "cooloff":
                info['State'] = 'unsubmitted'
        elif status == 2: # STATUS_PRERUN
            if retry == 0:
                info['State'] = 'unsubmitted'
            else:
                info['State'] = 'cooloff'
        elif status == 3: # STATUS_SUBMITTED
            if msg == 'not_idle':
                info.setdefault('State', 'running')
            else:
                info.setdefault('State', 'idle')
        elif status == 4: # STATUS_POSTRUN
            if info.get("State") != "cooloff":
                info['State'] = 'transferring'
        elif status == 5: # STATUS_DONE
            info['State'] = 'finished'
        elif status == 6: # STATUS_ERROR
            # Older versions of HTCondor would put jobs into STATUS_ERROR
            # for a short time if the job was to be retried.  Hence, we had
            # some status parsing logic to try and guess whether the job would
            # be tried again in the near future.  This behavior is no longer
            # observed; STATUS_ERROR is terminal.
            info['State'] = 'failed'