コード例 #1
0
def testWriter():
    graph = WorkflowGraph()
    prod = TestProducer()
    cons1 = TestOneInOneOutWriter()
    graph.connect(prod, "output", cons1, "input")
    results = simple_process.process_and_return(graph, {prod: 5})
    tools.eq_({cons1.id: {"output": list(range(1, 6))}}, results)
コード例 #2
0
ファイル: aggregate_test.py プロジェクト: KNMI/wps_workflow
def graph_sum():
    prod = NumberProducer(1000)
    prod.name = 'NumberProducer'
    s = parallelSum()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', s, 'input')
    return graph
コード例 #3
0
def testConsumer():
    graph = WorkflowGraph()
    prod = TestProducer()
    cons = PrintDataConsumer()
    graph.connect(prod, "output", cons, "input")
    results = simple_process.process_and_return(graph, {prod: 10})
    tools.eq_({}, results)
コード例 #4
0
def test_types():
    graph = WorkflowGraph()
    prod = TestProducer()
    cons = TestOneInOneOut()
    graph.connect(prod, "output", cons, "input")
    graph.propagate_types()
    tools.eq_(prod.outputconnections["output"]["type"], cons.inputconnections["input"]["type"])
コード例 #5
0
ファイル: aggregate_test.py プロジェクト: KNMI/wps_workflow
def testContinuousReduce():
    prod = NumberProducer()
    test = TestPE()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', test, 'input')
    results = simple_process.process_and_return(graph, {prod: 5})
    tools.eq_({test.id: {'output': [[0] for i in range(5)]}}, results)
コード例 #6
0
ファイル: workflow.py プロジェクト: bird-house/malleefowl
def esgf_workflow(source, worker, monitor=None, headers=None):
    graph = WorkflowGraph()

    # TODO: configure limit
    esgsearch = EsgSearch(
        url=wps_url(),
        search_url=source.get('url', 'https://esgf-data.dkrz.de/esg-search'),
        constraints=source.get('constraints', source.get('facets')),  # facets for backward compatibility
        query=source.get('query'),
        limit=source.get('limit', 100),
        search_type='File',
        distrib=source.get('distrib'),
        replica=source.get('replica'),
        latest=source.get('latest'),
        temporal=source.get('temporal'),
        start=source.get('start'),
        end=source.get('end'))
    esgsearch.set_monitor(monitor, 0, 10)
    download = Download(url=wps_url(), headers=headers)
    download.set_monitor(monitor, 10, 50)
    doit = GenericWPS(headers=headers, **worker)
    doit.set_monitor(monitor, 50, 100)

    graph.connect(esgsearch, esgsearch.OUTPUT_NAME,
                  download, download.INPUT_NAME)
    graph.connect(download, download.OUTPUT_NAME, doit, doit.INPUT_NAME)

    result = simple_process.process(graph, inputs={esgsearch: [{}]})

    status_location = result.get((doit.id, doit.STATUS_LOCATION_NAME))[0]
    status = result.get((doit.id, doit.STATUS_NAME))[0]
    return dict(worker=dict(status_location=status_location, status=status))
コード例 #7
0
ファイル: tupleCounter.py プロジェクト: Skemes/Research
def main():
	if len(sys.argv) < 5:
		print "Incorrect arguments provided. Proper format: python tupleCounter.py <inputFile> <numRepeats> <outputFile> <numCores>"
		sys.exit()
	inputFilename = sys.argv[1]
	numRepeats = int(sys.argv[2])
	outputFile = sys.argv[3]
	numCores = int(sys.argv[4])

	producer = TupleProducer(inputFilename, numRepeats)
	makeDicts = MakeDict()
	collector = CollectCounts(outputFile)

	graph = WorkflowGraph()
	graph.connect(producer, 'output', makeDicts, 'input')
	graph.connect(makeDicts, 'output', collector, 'input')

	from dispel4py.new.multi_process import process as multi_process
	import argparse

	args = argparse.Namespace
	args.num = numCores
	args.simple = False

	
	multi_process(graph, {producer: 1}, args)
コード例 #8
0
def testIterative():
    graph = WorkflowGraph()
    prod = TestProducer()
    cons = TestIterative()
    graph.connect(prod, "output", cons, "input")
    results = simple_process.process_and_return(graph, {prod: 25})
    tools.eq_({cons.id: {"output": list(range(1, 26))}}, results)
コード例 #9
0
ファイル: aggregate.py プロジェクト: akrause2014/dispel4py
def parallelAvg(index=0):
    composite = WorkflowGraph()
    parAvg = AverageParallelPE(index)
    reduceAvg = AverageReducePE()
    composite.connect(parAvg, parAvg.OUTPUT_NAME, reduceAvg, reduceAvg.INPUT_NAME)
    composite.inputmappings = { 'input' : (parAvg, parAvg.INPUT_NAME) }
    composite.outputmappings = { 'output' : (reduceAvg, reduceAvg.OUTPUT_NAME) }
    return composite
コード例 #10
0
ファイル: aggregate.py プロジェクト: akrause2014/dispel4py
def parallelStdDev(index=0):
    composite = WorkflowGraph()
    parStdDev = StdDevPE(index)
    reduceStdDev = StdDevReducePE()
    composite.connect(parStdDev, parStdDev.OUTPUT_NAME, reduceStdDev, reduceStdDev.INPUT_NAME)
    composite.inputmappings = { 'input' : (parStdDev, parStdDev.INPUT_NAME) }
    composite.outputmappings = { 'output' : (reduceStdDev, reduceStdDev.OUTPUT_NAME) }
    return composite
コード例 #11
0
ファイル: aggregate_test.py プロジェクト: KNMI/wps_workflow
def graph_min_max():
    prod = NumberProducer(1000)
    mi = parallelMin()
    ma = parallelMax()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', mi, 'input')
    graph.connect(prod, 'output', ma, 'input')
    return graph
コード例 #12
0
def testOnetoAll():
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons = t.TestOneInOneOut()
    cons.numprocesses = 2
    cons.inputconnections['input']['grouping'] = 'all'
    graph.connect(prod, 'output', cons, 'input')
    return graph
コード例 #13
0
def testWriter():
    graph = WorkflowGraph()
    prod = TestProducer()
    prev = prod
    cons1 = TestOneInOneOutWriter()
    graph.connect(prod, 'output', cons1, 'input')
    results = simple_process.process_and_return(graph, {prod: [{}, {}, {}, {}, {}]})
    tools.eq_({ cons1.id : {'output': [1, 2, 3, 4, 5]} }, results)
コード例 #14
0
def testWordCount():
    prod = RandomWordProducer()
    filt = RandomFilter()
    count = WordCounter()
    graph = WorkflowGraph()
    graph.connect(prod, "output", filt, "input")
    graph.connect(filt, "output", count, "input")
    simple_process.process(graph, inputs={prod: 100})
コード例 #15
0
ファイル: aggregate.py プロジェクト: akrause2014/dispel4py
def parallel_aggregate(instPE, reducePE):
    composite = WorkflowGraph()
    reducePE.inputconnections[AggregatePE.INPUT_NAME]['grouping'] = 'global'
    reducePE.numprocesses = 1
    composite.connect(instPE, AggregatePE.OUTPUT_NAME, reducePE, AggregatePE.INPUT_NAME)
    composite.inputmappings = { 'input' : (instPE, AggregatePE.INPUT_NAME) }
    composite.outputmappings = { 'output' : (reducePE, AggregatePE.OUTPUT_NAME) }
    return composite
コード例 #16
0
def testTee():
    graph = WorkflowGraph()
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(prod, 'output', cons2, 'input')
    multiprocess(graph, 3, [{}, {}, {}])
コード例 #17
0
def testOnetoAll():
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons = t.TestOneInOneOut()
    cons.numprocesses = 2
    cons.inputconnections["input"]["grouping"] = "all"
    graph.connect(prod, "output", cons, "input")
    return graph
コード例 #18
0
def testTwoPipelines():
    graph = WorkflowGraph()
    prod1 = TestProducer()
    cons1 = TestOneInOneOut()
    prod2 = TestProducer()
    cons2 = TestOneInOneOut()
    graph.connect(prod1, 'output', cons1, 'input')
    graph.connect(prod2, 'output', cons2, 'input')
    multiprocess(graph, 2, [{}, {}, {}, {}, {}])
コード例 #19
0
def testPipeline():
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    results = simple_process.process_and_return(graph, inputs={ prod : [ {}, {}, {}, {}, {} ] } )
    tools.eq_({ cons2.id : { 'output' : [1, 2, 3, 4, 5] } }, results)
コード例 #20
0
def testPipeline():
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, "output", cons1, "input")
    graph.connect(cons1, "output", cons2, "input")
    results = simple_process.process_and_return(graph, inputs={prod: 5})
    tools.eq_({cons2.id: {"output": list(range(1, 6))}}, results)
コード例 #21
0
ファイル: worker_mpi_test.py プロジェクト: Ravanon/dispel4py
def testTee():
    graph = WorkflowGraph()
    prod = TestProducer()
    prev = prod
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(prod, 'output', cons2, 'input')
    return graph
コード例 #22
0
def testPipeline():
    graph = WorkflowGraph()
    prod = TestProducer()
    prev = prod
    for i in range(5):
        cons = TestOneInOneOut()
        graph.connect(prev, 'output', cons, 'input')
        prev = cons
    results = simple_process.process(graph, {prod: [{}, {}, {}, {}, {}]})
    tools.eq_({(prev.id, 'output'): [1, 2, 3, 4, 5]}, results)
コード例 #23
0
def test_input_iter_one():
    args = argparse.Namespace
    args.file = None
    args.data = None
    args.iter = 1
    graph = WorkflowGraph()
    prod = TestProducer()
    graph.add(prod)
    inputs = p.create_inputs(args, graph)
    tools.eq_(inputs[prod.id], 1)
コード例 #24
0
def testPipelineWithInputId():
    graph = WorkflowGraph()
    first = TestOneInOneOut()
    prev = first
    for i in range(5):
        cons = TestOneInOneOut()
        graph.connect(prev, 'output', cons, 'input')
        prev = cons
    results = simple_process.process(graph, { first.id: [{'input': 1}] } )
    tools.eq_({(prev.id, 'output'):[1]}, results)
コード例 #25
0
def testTee():
    graph = WorkflowGraph()
    prod = TestProducer()
    prev = prod
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(prod, 'output', cons2, 'input')
    args.num = 3
    process(graph, inputs={prod: [{}, {}, {}, {}, {}]}, args=args)
コード例 #26
0
def testTee():
    graph = WorkflowGraph()
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(prod, 'output', cons2, 'input')
    results = simple_process.process(graph, {prod: [{}, {}, {}, {}, {}]})
    tools.eq_({(cons1.id, 'output'): [1, 2, 3, 4, 5],
              (cons2.id, 'output'): [1, 2, 3, 4, 5]}, results)
コード例 #27
0
ファイル: provenance.py プロジェクト: KNMI/VERCE
def InitiateNewRun(
        graph,
        provRecorderClass,
        provImpClass=ProvenancePE,
        input=[],
        username=None,
        workflowId=None,
        description="",
        system_id=None,
        workflowName=None,
        w3c_prov=False,
        runId=None,
        clustersRecorders={},
        feedbackPEs=[]):

    if username is None or workflowId is None or workflowName is None:
        raise Exception("Missing values")
    if runId is None:
        runId = getUniqueId()

    newrun = NewWorkflowRun()

    newrun.parameters = {"input": input,
                         "username": username,
                         "workflowId": workflowId,
                         "description": description,
                         "system_id": system_id,
                         "workflowName": workflowName,
                         "runId": runId,
                         "mapping": sys.argv[1]
                         }
    _graph = WorkflowGraph()
    provrec0 = provRecorderClass(toW3C=w3c_prov)
    _graph.connect(newrun, "output", provrec0, provrec0.INPUT_NAME)

    # attachProvenanceRecorderPE(_graph,provRecorderClass,runId,username,w3c_prov)

    # newrun.provon=True
    simple_process.process(_graph, {'NewWorkflowRun': [{'input': 'None'}]})

    injectProv(graph, provImpClass)
    print("PREPARING PROVENANCE RECORDERS:")
    print("Provenance Recorders Clusters: "+str(clustersRecorders))
    print("PEs processing Recorders feedback: "+str(feedbackPEs))
    
    attachProvenanceRecorderPE(
        graph,
        provRecorderClass,
        runId,
        username,
        w3c_prov,
        clustersRecorders,
        feedbackPEs)
    provclusters={}
    return runId
コード例 #28
0
ファイル: rtxcor.py プロジェクト: andrejsim/dispel4py
def createWf():
    graph = WorkflowGraph()
    plot=CompMatrix(variables_number)
    mc = MaxClique(-0.01)
    plot.numprocesses=4
    #plot.prov_cluster="my"
    start=Start()  
    #startprov_cluster="my"
    sources={}

    for i in range(0,variables_number):
        sources[i] = Source(sampling_rate,i)
        sources[i].prov_cluster="my"
    for h in range(0,variables_number):
        graph.connect(start,'output',sources[h],'iterations')
        for j in range(h+1,variables_number):
            cc=CorrCoef(batch_size,(h,j))
            cc.prov_cluster="mycc"
            plot._add_input('input'+'_'+str(h)+'_'+str(j),grouping=[1])
            graph.connect(sources[h],'output',cc,'input1')
            graph.connect(sources[j],'output',cc,'input2')
            graph.connect(cc,'output',plot,'input'+'_'+str(h)+'_'+str(j))
            cc.single=True
            #cc.numprocesses=1
    graph.connect(plot,'output',mc,'matrix')
    
    return graph     
コード例 #29
0
def testPipeline():
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    args = argparse.Namespace
    args.num = 5
    args.simple = False
    process(graph, inputs={ prod : [ {}, {}, {}  ] }, args=args )
コード例 #30
0
ファイル: worker_mpi_test.py プロジェクト: Ravanon/dispel4py
def testSquare():
    graph = WorkflowGraph()
    prod = TestProducer(2)
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOutWriter()
    last = TestTwoInOneOut()
    graph.connect(prod, 'output0', cons1, 'input')
    graph.connect(prod, 'output1', cons2, 'input')
    graph.connect(cons1, 'output', last, 'input0')
    graph.connect(cons2, 'output', last, 'input1')
    return graph
コード例 #31
0
def testSquare():
    graph = WorkflowGraph()
    prod = TestProducer(2)
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    last = TestTwoInOneOut()
    graph.connect(prod, 'output0', cons1, 'input')
    graph.connect(prod, 'output1', cons2, 'input')
    graph.connect(cons1, 'output', last, 'input0')
    graph.connect(cons2, 'output', last, 'input1')
    results = simple_process.process_and_return(graph, { prod : [{}]} )
    tools.eq_({last.id : { 'output' :['1', '1']} }, results)
コード例 #32
0
def testSquare():
    graph = WorkflowGraph()
    prod = t.TestProducer(2)
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    last = t.TestTwoInOneOut()
    graph.connect(prod, 'output0', cons1, 'input')
    graph.connect(prod, 'output1', cons2, 'input')
    graph.connect(cons1, 'output', last, 'input0')
    graph.connect(cons2, 'output', last, 'input1')
    args.num = 4
    process(graph, inputs={prod: [{}]}, args=args)
コード例 #33
0
ファイル: aggregate.py プロジェクト: krischer/dispel4py
def parallelAvg(index=0):
    '''
    Creates an AVG composite PE that can be parallelised using a map-reduce pattern.
    '''
    composite = WorkflowGraph()
    parAvg = AverageParallelPE(index)
    reduceAvg = AverageReducePE()
    composite.connect(parAvg, parAvg.OUTPUT_NAME, reduceAvg,
                      reduceAvg.INPUT_NAME)
    composite.inputmappings = {'input': (parAvg, parAvg.INPUT_NAME)}
    composite.outputmappings = {'output': (reduceAvg, reduceAvg.OUTPUT_NAME)}
    return composite
コード例 #34
0
def testNotEnoughProcesses():
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    args = argparse.Namespace
    args.num = 1
    args.simple = False
    args.results = True
    message = process(graph, inputs={prod: 5}, args=args)
    tools.ok_('Not enough processes' in message)
コード例 #35
0
def testGrouping():
   
    words = t.RandomWordProducer()
    filter1 = t.RandomFilter()
    filter2 = t.RandomFilter()
    count = t.WordCounter()
    graph = WorkflowGraph()
    graph.connect(words, 'output', filter1, 'input')
    graph.connect(words, 'output', filter2, 'input')
    graph.connect(filter1, 'output', count, 'input')
    graph.connect(filter2, 'output', count, 'input')

    return graph
コード例 #36
0
ファイル: group_by.py プロジェクト: xpivan/dispel4py
def testGrouping():
    '''
    Creates the test graph.
    '''
    words = t.RandomWordProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()
    count = t.WordCounter()
    graph = WorkflowGraph()
    graph.connect(words, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons2, 'output', cons3, 'input')
    graph.connect(cons3, 'output', count, 'input')

    graph.partitions = [[words], [cons1, cons2, cons3], [count]]
    return graph
コード例 #37
0
ファイル: aggregate.py プロジェクト: krischer/dispel4py
def parallelStdDev(index=0):
    '''
    Creates a STDDEV composite PE that can be parallelised using a map-reduce pattern.
    '''
    composite = WorkflowGraph()
    parStdDev = StdDevPE(index)
    reduceStdDev = StdDevReducePE()
    composite.connect(parStdDev, parStdDev.OUTPUT_NAME, reduceStdDev,
                      reduceStdDev.INPUT_NAME)
    composite.inputmappings = {'input': (parStdDev, parStdDev.INPUT_NAME)}
    composite.outputmappings = {
        'output': (reduceStdDev, reduceStdDev.OUTPUT_NAME)
    }
    return composite
コード例 #38
0
def testComposite():
    comp = CompositePE()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    comp.connect(cons1, 'output', cons2, 'input')
    comp._map_input('comp_input', cons1, 'input')
    comp._map_output('comp_output', cons2, 'output')
    prod = TestProducer()
    cons = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', comp, 'comp_input')
    graph.connect(comp, 'comp_output', cons, 'input')
    graph.flatten()
    results = simple_process.process_and_return(graph, {prod: 10})
    tools.eq_({cons.id: {'output': list(range(1, 11))}}, results)
コード例 #39
0
def testParallelPipeline():
    '''
    Creates the parallel pipeline graph with partitioning information.

    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()

    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons1, 'output', cons3, 'input')

    graph.partitions = [[prod, cons1, cons2], [cons3]]

    return graph
コード例 #40
0
def testSplitMerge():
    '''
    Creates the split/merge graph with 4 nodes.
    
    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    prod = t.TestProducer(2)
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOutWriter()
    last = t.TestTwoInOneOut()
    graph.connect(prod, 'output0', cons1, 'input')
    graph.connect(prod, 'output1', cons2, 'input')
    graph.connect(cons1, 'output', last, 'input0')
    graph.connect(cons2, 'output', last, 'input1')
    return graph
コード例 #41
0
def testAlltoOne():
    '''
    Creates a graph with two consumer nodes and a global grouping.
    
    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons1.numprocesses=5
    cons2.numprocesses=5
    graph.connect(prod, 'output', cons1, 'input')
    cons2.inputconnections['input']['grouping'] = 'global'
    graph.connect(cons1, 'output', cons2, 'input')
    return graph
コード例 #42
0
def testPipelineNotEnoughProcesses():
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    cons3 = TestOneInOneOut()
    cons4 = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons2, 'output', cons3, 'input')
    graph.connect(cons3, 'output', cons4, 'input')
    args = argparse.Namespace
    args.num = 4
    args.simple = False
    args.results = True
    result_queue = process(graph, inputs={prod: 10}, args=args)
    results = []
    item = result_queue.get()
    while item != STATUS_TERMINATED:
        name, output, data = item
        tools.eq_((cons4.id, 'output'), output)
        results.extend(data)
        item = result_queue.get()
    tools.eq_(Counter(range(1, 11)), Counter(results))
コード例 #43
0
def testParallelPipeline():
    '''
    Creates a graph with 4 nodes.

    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()

    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons1, 'output', cons3, 'input')

    return graph
コード例 #44
0
ファイル: base.py プロジェクト: AndreiFrunze/wrangler
def create_iterative_chain(functions,
                           FunctionPE_class=SimpleFunctionPE,
                           name_prefix='PE_',
                           name_suffix=''):
    '''
    Creates a composite PE wrapping a pipeline that processes obspy streams.
    :param chain: list of functions that process data iteratively. The function
    accepts one input parameter, data, and returns an output data block
    (or None).
    :param requestId: id of the request that the stream is associated with
    :param controlParameters: environment parameters for the processing
    elements
    :rtype: dictionary inputs and outputs of the composite PE that was created
    '''

    prev = None
    first = None
    graph = WorkflowGraph()

    for fn_desc in functions:
        try:
            fn = fn_desc[0]
            params = fn_desc[1]
        except TypeError:
            fn = fn_desc
            params = {}

        # print 'adding %s to chain' % fn.__name__
        pe = FunctionPE_class()
        pe.compute_fn = fn
        pe.params = params
        pe.name = name_prefix + fn.__name__ + name_suffix

        if prev:
            graph.connect(prev, IterativePE.OUTPUT_NAME, pe,
                          IterativePE.INPUT_NAME)
        else:
            first = pe
        prev = pe

    # Map inputs and outputs of the wrapper to the nodes in the subgraph
    graph.inputmappings = {'input': (first, IterativePE.INPUT_NAME)}
    graph.outputmappings = {'output': (prev, IterativePE.OUTPUT_NAME)}

    return graph
コード例 #45
0
def testCreateChain():
    def add(a, b):
        return a + b

    def mult(a, b):
        return a * b

    def is_odd(a):
        return a % 2 == 1

    c = [(add, {'b': 1}), (mult, {'b': 3}), is_odd]
    chain = create_iterative_chain(c)
    prod = TestProducer()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', chain, 'input')
    graph.flatten()
    results = simple_process.process_and_return(graph, {prod: 2})
    for key, value in results.items():
        tools.eq_({'output': [False, True]}, value)
コード例 #46
0
def testTee():
    '''
    Creates a graph with two consumer nodes and a tee connection.

    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    try:
        numIterations = int(sys.argv[4])
    except:
        numIterations = 1
    prod = t.NumberProducer(numIterations)
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons1, 'output', cons3, 'input')
    return graph
コード例 #47
0
def testPipeline():
    prod = TestProducer()
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    args = argparse.Namespace
    args.num = 4
    args.simple = False
    args.results = True
    result_queue = process(graph, inputs={prod: 5}, args=args)
    results = []
    item = result_queue.get()
    while item != STATUS_TERMINATED:
        name, output, data = item
        tools.eq_(cons2.id, name)
        tools.eq_('output', output)
        results.append(data)
        item = result_queue.get()
    tools.eq_(list(range(1, 6)), results)
コード例 #48
0
def testCompositeWithCreateParams():
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()

    def create_graph(graph, connections):
        for i in range(connections):
            graph.connect(cons1, 'output', cons2, 'input')

    comp = CompositePE(create_graph, {'connections': 2})
    comp._map_input('comp_input', cons1, 'input')
    comp._map_output('comp_output', cons2, 'output')
    prod = TestProducer()
    cons = TestOneInOneOut()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', comp, 'comp_input')
    graph.connect(comp, 'comp_output', cons, 'input')
    graph.flatten()
    results = simple_process.process_and_return(graph, {prod: 10})
    expected = []
    for i in range(1, 11):
        expected += [i, i]
    tools.eq_({cons.id: {'output': expected}}, results)
コード例 #49
0
ファイル: workflow.py プロジェクト: Ouranosinc/malleefowl
def solr_workflow(source, worker, monitor=None, headers=None):
    graph = WorkflowGraph()

    solrsearch = SolrSearch(url=source.get('url'),
                            query=source.get('query'),
                            filter_query=source.get('filter_query'))
    solrsearch.set_monitor(monitor, 0, 10)
    download = Download(url=wps_url(), headers=headers)
    download.set_monitor(monitor, 10, 50)
    doit = GenericWPS(headers=headers, **worker)
    doit.set_monitor(monitor, 50, 100)

    graph.connect(solrsearch, solrsearch.OUTPUT_NAME, download,
                  download.INPUT_NAME)
    graph.connect(download, download.OUTPUT_NAME, doit, doit.INPUT_NAME)

    result = simple_process.process_and_return(graph,
                                               inputs={solrsearch: [{}]})

    status_location = result[doit.id][doit.STATUS_LOCATION_NAME][0]
    status = result[doit.id][doit.STATUS_NAME][0]
    return dict(worker=dict(status_location=status_location, status=status))
コード例 #50
0
def testSquare():
    graph = WorkflowGraph()
    prod = TestProducer(2)
    cons1 = TestOneInOneOut()
    cons2 = TestOneInOneOut()
    last = TestTwoInOneOut()
    graph.connect(prod, 'output0', cons1, 'input')
    graph.connect(prod, 'output1', cons2, 'input')
    graph.connect(cons1, 'output', last, 'input0')
    graph.connect(cons2, 'output', last, 'input1')
    args.num = 4
    args.results = True
    result_queue = process(graph, inputs={prod: 10}, args=args)
    results = []
    item = result_queue.get()
    while item != STATUS_TERMINATED:
        name, output, data = item
        tools.eq_(last.id, name)
        tools.eq_('output', output)
        results.append(data)
        item = result_queue.get()
    expected = {str(i): 2 for i in range(1, 11)}
    tools.eq_(expected, Counter(results))
コード例 #51
0
    OUTPUT_NAME = 'output'

    def __init__(self):
        GenericPE.__init__(self)
        self.outputconnections = {
            ReadJSON.OUTPUT_NAME: {
                NAME: ReadJSON.OUTPUT_NAME
            }
        }

    def _process(self, inputs):

        self.write(ReadJSON.OUTPUT_NAME, input_json)


graph = WorkflowGraph()
read = ReadJSON()
watcher = WatchDirectory()

waveplot = WavePlot_INGV()
specfem2stream = Specfem3d2Stream()
seedtostream = StreamToSeedFile()
#kmlGenerator = kmlGenerator_INGV()

#os.environ['EVENT_PATH']='./'

controlInput = json.load(open(os.environ['JSON_OUT'] + "/jsonout_run_specfem"))

#kmlGenerator.appParameters={"stations_file":os.environ['RUN_PATH']+'/stations'}
solver_name = ""
if "SOLVER_NAME" in os.environ:
コード例 #52
0
processes = [
    waveform_reader,
    (plot_stream, {
        "source": "waveform_reader",
        "output_dir": "./output-images",
        "tag": "observed-image"
    })
]

# processes.append((fn, params))
chain = create_iterative_chain(processes, FunctionPE_class=SimpleFunctionPE)

watcher = WatchDirectory(0)
watcher_xml = WatchDirectory(1)
downloadPE.name = "downloadPE"
graph = WorkflowGraph()
graph.add(downloadPE)

graph.connect(downloadPE, 'output', watcher, "input")
graph.connect(downloadPE, 'output', watcher_xml, "input")
graph.connect(watcher, 'output', chain, "input")
graph.connect(watcher_xml, 'output', xmlr, "input")

# injectProv(graph,SeismoPE)
# graph=attachProvenanceRecorderPE(graph,ProvenanceRecorderToFileBulk,username=os.environ['USER_NAME'],runId=os.environ['RUN_ID'])

# Store to local path
#ProvenancePE.PROV_PATH = os.environ['PROV_PATH']
#
# Size of the provenance bulk before sent to storage or sensor
#ProvenancePE.BULK_SIZE = 20
コード例 #53
0
ファイル: wordcount.py プロジェクト: xpivan/dispel4py
        self.log('%s: %s' % data)


def count_by_group():
    composite = CompositePE()
    count = CountByGroup()
    merge = CountByGroup()
    merge._add_input('input', grouping='global')
    composite.connect(count, 'output', merge, 'input')
    composite._map_input('input', count, 'input')
    composite._map_output('output', merge, 'output')
    return composite


from dispel4py.workflow_graph import WorkflowGraph

graph = WorkflowGraph()
textfile = SplitTextFile()
wordcount = WordCount()
# count = CountByGroup()
# merge = CountByGroup()
# merge._add_input('input', grouping='global')
count = count_by_group()
result = Print()

graph.connect(textfile, 'output', wordcount, 'input')
graph.connect(wordcount, 'output', count, 'input')
# graph.connect(count, 'output', merge, 'input')
# graph.connect(merge, 'output', result, 'input')
graph.connect(count, 'output', result, 'input')
コード例 #54
0
        
       
# Instantiates the Workflow Components        

sc = Source()
sc.name='PE_source'
divf=Div()
divf.name='PE_div'
crossp = CrossProd()
squaref=SimpleFunctionPE(square,{})
#Uncomment this line to associate this PE to the mycluster provenance-cluster 
squaref=SimpleFunctionPE(square,{'prov_cluster':'mycluster'})


#Initialise and compose the workflow graph
graph = WorkflowGraph()
graph.connect(sc,'output',squaref,'input')
graph.connect(sc,'output',crossp,'input1')
graph.connect(squaref,'output',crossp,'input2')
graph.connect(crossp,'output',divf,'input')


#Declare workflow inputs:
input_data = {"PE_source": [{"input": [10]}]}

# In[2]:

#Location of the remote repository for runtime updates of the lineage traces. Shared among ProvenanceRecorder subtypes
ProvenanceRecorder.REPOS_URL='http://verce-portal-dev.scai.fraunhofer.de/j2ep-1.0/prov/workflow/insert'

コード例 #55
0
ファイル: delayed_pipeline.py プロジェクト: xpivan/dispel4py
        Each node in the graph is executed as a separate MPI process.
        This graph has 3 nodes. For this reason we need at least 3 MPI
        processes to execute it.

    Output::

        Processes: {'TestDelayOneInOneOut2': [2, 3], 'TestProducer0': [4], \
'TestOneInOneOut1': [0, 1]}
        TestProducer0 (rank 4): Processed 10 iterations.
        TestOneInOneOut1 (rank 1): Processed 5 iterations.
        TestOneInOneOut1 (rank 0): Processed 5 iterations.
        TestDelayOneInOneOut2 (rank 3): Average processing time: 1.00058307648
        TestDelayOneInOneOut2 (rank 3): Processed 5 iterations.
        TestDelayOneInOneOut2 (rank 2): Average processing time: 1.00079641342
        TestDelayOneInOneOut2 (rank 2): Processed 5 iterations.

'''

from dispel4py.examples.graph_testing import testing_PEs as t
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.new.monitoring import ProcessTimingPE

prod = t.TestProducer()
cons1 = t.TestOneInOneOut()
''' adding a processing timer '''
cons2 = ProcessTimingPE(t.TestDelayOneInOneOut())
''' important: this is the graph_variable '''
graph = WorkflowGraph()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')
コード例 #56
0
def graph_stddev():
    prod = NumberProducer(1000)
    std = parallelStdDev()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', std, 'input')
    return graph
コード例 #57
0
def graph_count():
    prod = NumberProducer(1000)
    c = parallelCount()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', c, 'input')
    return graph
コード例 #58
0
                NAME: 'output',
                TYPE: ['timestamp', 'location', 'stream']
            }
        }

    def process(self, inputs):
        stream = read(
            '/Users/akrause/VERCE/data/laquila/20100501-20120930_fseed/TERO/20100501.fseed'
        )
        return {'output': [{}, {}, {'data': stream}]}


from dispel4py.workflow_graph import WorkflowGraph

controlParameters = {
    'runId': '12345',
    'username': '******',
    'outputdest': "./"
}

from dispel4py.seismo.obspy_stream import createProcessingComposite, INPUT_NAME, OUTPUT_NAME

chain = []
chain.append(PEMeanSub)
chain.append(PEDetrend)
composite = createProcessingComposite(chain,
                                      controlParameters=controlParameters)

producer = TestProducer()
graph = WorkflowGraph()
graph.connect(producer, 'output', composite, INPUT_NAME)
コード例 #59
0
def create_partitioned(workflow_all):
    processes_all, inputmappings_all, outputmappings_all = assign_and_connect(
        workflow_all, len(workflow_all.graph.nodes()))
    proc_to_pe_all = {v[0]: k for k, v in processes_all.iteritems()}
    partitions = get_partitions(workflow_all)
    external_connections = []
    pe_to_partition = {}
    partition_pes = []
    for i in range(len(partitions)):
        for pe in partitions[i]:
            pe_to_partition[pe.id] = i
    for index in range(len(partitions)):
        result_mappings = {}
        part = partitions[index]
        partition_id = index
        component_ids = [pe.id for pe in part]
        workflow = copy.deepcopy(workflow_all)
        graph = workflow.graph
        for node in graph.nodes():
            if node.getContainedObject().id not in component_ids:
                graph.remove_node(node)
        processes, inputmappings, outputmappings = \
            assign_and_connect(workflow, len(graph.nodes()))
        proc_to_pe = {}
        for node in graph.nodes():
            pe = node.getContainedObject()
            proc_to_pe[processes[pe.id][0]] = pe
        for node in graph.nodes():
            pe = node.getContainedObject()
            pe.rank = index
            proc_all = processes_all[pe.id][0]
            for output_name in outputmappings_all[proc_all]:
                for dest_input, comm_all in outputmappings_all[proc_all][
                        output_name]:
                    dest = proc_to_pe_all[comm_all.destinations[0]]
                    if not dest in processes:
                        # it's an external connection
                        external_connections.append(
                            (comm_all, partition_id, pe.id, output_name,
                             pe_to_partition[dest], dest, dest_input))
                        try:
                            result_mappings[pe.id].append(output_name)
                        except:
                            result_mappings[pe.id] = [output_name]
        partition_pe = SimpleProcessingPE(inputmappings, outputmappings,
                                          proc_to_pe)
        partition_pe.workflow = workflow
        partition_pe.partition_id = partition_id
        if result_mappings:
            partition_pe.result_mappings = result_mappings
        partition_pe.map_inputs = _map_inputs_to_pes
        partition_pe.map_outputs = _map_outputs_from_pes
        partition_pes.append(partition_pe)
    # print 'EXTERNAL CONNECTIONS : %s' % external_connections
    ubergraph = WorkflowGraph()
    ubergraph.pe_to_partition = pe_to_partition
    ubergraph.partition_pes = partition_pes
    # sort the external connections so that nodes are added in the same order
    # if doing this in multiple processes in parallel this is important
    for comm, source_partition, source_id, source_output, \
        dest_partition, dest_id, dest_input in sorted(external_connections):
        partition_pes[source_partition]._add_output((source_id, source_output))
        partition_pes[dest_partition]._add_input((dest_id, dest_input),
                                                 grouping=comm.name)
        ubergraph.connect(partition_pes[source_partition],
                          (source_id, source_output),
                          partition_pes[dest_partition], (dest_id, dest_input))
    return ubergraph
コード例 #60
0
def graph_avg():
    prod = NumberProducer(1000)
    a = parallelAvg()
    graph = WorkflowGraph()
    graph.connect(prod, 'output', a, 'input')
    return graph