コード例 #1
0
def testGrouping():

    words = t.RandomWordProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()
    count = t.WordCounter()
    graph = WorkflowGraph()
    graph.connect(words, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons2, 'output', cons3, 'input')
    graph.connect(cons3, 'output', count, 'input')

    graph.partitions = [[words], [cons1, cons2, cons3], [count]]
    return graph
コード例 #2
0
ファイル: group_by.py プロジェクト: Ravanon/dispel4py
def testGrouping():
    '''
    Creates the test graph.
    '''
    words = t.RandomWordProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()
    count = t.WordCounter()
    graph = WorkflowGraph()
    graph.connect(words, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons2, 'output', cons3, 'input')
    graph.connect(cons3, 'output', count, 'input')

    graph.partitions = [ [words], [cons1, cons2, cons3], [count] ]
    return graph
コード例 #3
0
def testParallelPipeline():
    """
    Creates the parallel pipeline graph with partitioning information.

    :rtype: the created graph
    """
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()

    graph.connect(prod, "output", cons1, "input")
    graph.connect(cons1, "output", cons2, "input")
    graph.connect(cons1, "output", cons3, "input")

    graph.partitions = [[prod, cons1, cons2], [cons3]]

    return graph
コード例 #4
0
def testParallelPipeline():
    '''
    Creates the parallel pipeline graph with partitioning information.

    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    prod = t.TestProducer()
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()

    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons1, 'output', cons3, 'input')

    graph.partitions = [[prod, cons1, cons2], [cons3]]

    return graph
コード例 #5
0
def testParallelPipeline():
    '''
    Creates the parallel pipeline graph with partitioning information.
    
    :rtype: the created graph
    '''
    graph = WorkflowGraph()
    prod = t.TestProducer()
    prev = prod
    cons1 = t.TestOneInOneOut()
    cons2 = t.TestOneInOneOut()
    cons3 = t.TestOneInOneOut()

    graph.connect(prod, 'output', cons1, 'input')
    graph.connect(cons1, 'output', cons2, 'input')
    graph.connect(cons1, 'output', cons3, 'input')

    graph.partitions = [ [prod, cons1, cons2], [cons3] ]

    return graph
コード例 #6
0
import processor
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.examples.graph_testing.testing_PEs import TestProducer, TestOneInOneOut

prod = TestProducer()
cons1 = TestOneInOneOut()
cons2 = TestOneInOneOut()

graph = WorkflowGraph()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')

graph.partitions = [[prod], [cons1, cons2]]

ubergraph = processor.create_partitioned(graph)
processes, inputmappings, outputmappings = processor.assign_and_connect(
    ubergraph, 2)
print processes
print inputmappings
print outputmappings

import multi_process

inputs = {prod: [{}]}
mapped_inputs = processor.map_inputs_to_partitions(ubergraph, inputs)
print 'MAPPED INPUTS: %s' % mapped_inputs
multi_process.process(ubergraph, 2, inputs=mapped_inputs)
コード例 #7
0
ファイル: partitioned_test.py プロジェクト: KNMI/wps_workflow
import processor
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.examples.graph_testing.testing_PEs import TestProducer, TestOneInOneOut


prod = TestProducer()
cons1 = TestOneInOneOut()
cons2 = TestOneInOneOut()
    
graph = WorkflowGraph()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')

graph.partitions= [ [prod], [cons1, cons2]]

ubergraph = processor.create_partitioned(graph)
processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, 2)
print processes
print inputmappings
print outputmappings

import multi_process

inputs= { prod : [{}] }
mapped_inputs=processor.map_inputs_to_partitions(ubergraph, inputs)
print 'MAPPED INPUTS: %s' % mapped_inputs
multi_process.process(ubergraph, 2, inputs = mapped_inputs)