コード例 #1
0
#!/usr/bin/env python
import subprocess
from htcondor_dag import Dag, autorun

def bash(cmd):
    subprocess.check_call(["/bin/bash","-c","set -o pipefail; " + cmd])

def bash2(cmd):
    subprocess.check_call(["/bin/bash","-c","set -o pipefail; " + cmd])

autorun()

dag = Dag("htcondor_ex6")
diamond = dag.dag(id="DIAMOND", filename="diamond.dag")

d_bash = dag.defer(bash, output=None, retry=1)
d_bash2 = diamond.defer(bash2, output=None, retry=1)

# http://research.cs.wisc.edu/htcondor/manual/v7.8/2_10DAGMan_Applications.html#SECTION003107900000000000000

a = d_bash2("echo A")
b = d_bash2("echo B")
c = d_bash2("echo C")
d = d_bash2("echo D")
a.child(b,c)
d.parent(b,c)

# splice into the normal top-level dag
x = d_bash("echo X")
y = d_bash("echo Y")
コード例 #2
0
#!/usr/bin/env python
import subprocess
from htcondor_dag import Dag, autorun

# Note: you can't set input=None because this is where htcondor_dag.py
# stores the picked arguments to call the function
def bash(cmd):
    subprocess.check_call(["/bin/bash","-c","set -o pipefail; " + cmd])

autorun()

dag = Dag("htcondor_ex5")
d_bash = dag.defer(bash, output=None, arguments=["one","\"two\"","spacey 'quoted' argument"],
                   environment={"one":1,"two":'"2"',"three":"spacey 'quoted' value"})

j1 = d_bash("tr 'a-z' 'A-Z' </etc/passwd >tmp1")
j2 = d_bash("tr 'a-z' 'A-Z' </etc/hosts >tmp2")
j3 = d_bash("cat tmp1 tmp2 >tmp.out").parent(j1, j2).var(job_files="tmp1,tmp2")
dag.write()
コード例 #3
0
#!/usr/bin/env python
from htcondor_dag import Dag, autorun

# Limit the number of concurrent jobs which run in a particular category,
# in this case only 3 at a time.
   
def adder(a, b):
    return a + b

autorun()
dag = Dag("htcondor_ex4")
dag.maxjobs["adder"] = 3

d_adder = dag.defer(adder, category="adder")
d_adder(1, 1)
d_adder(2, 2)
d_adder(3, 3)
d_adder(4, 4)
d_adder(5, 5)
d_adder(6, 6)
dag.write()
コード例 #4
0
#!/usr/bin/env python
from htcondor_dag import Dag, autorun, procid

# First we run a cluster of jobs, each of which returns a value.
# Then we run another job which prints all the results from the cluster.
   
def adder(a, b):
    return a + b

def printer(v):
    print repr(v)

autorun()

dag = Dag("htcondor_ex3")

j1 = dag.defer(adder, processes=10)(procid, 5)
dag.defer(printer, output="result.txt")(j1)

dag.write()
コード例 #5
0
#!/usr/bin/env python
from htcondor_dag import Dag, autorun

# Two jobs, each writes text to its output file
   
def print_sum(a, b):
    print a + b

autorun(report_hostname=True)

dag = Dag('htcondor_ex1')
dag.defer(print_sum, output="res1.txt")(1, 2)
dag.defer(print_sum, output="res2.txt")(3, 4)
dag.write()
コード例 #6
0
#!/usr/bin/env python
from htcondor_dag import Dag, autorun

# Two jobs write a python value to their output file; the
# third job waits for these jobs to complete, reads their values
# and writes text output.


def print_sum(a, b):
    print a + b

def adder(a, b):
    return a + b

autorun()

dag = Dag('htcondor_ex2')
d_print_sum = dag.defer(print_sum, request_memory=200, output="result.txt")
d_adder = dag.defer(adder, request_memory=100)

j1 = d_adder(1, 2)
j2 = d_adder(3, 4)
j3 = d_print_sum(j1, j2)
dag.write()