def process(workflow, inputs, args): processes = {} inputmappings = {} outputmappings = {} success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if rank == 0 and not args.simple: try: processes, inputmappings, outputmappings =\ processor.assign_and_connect(workflow, size) except: success = False success = comm.bcast(success, root=0) if args.simple or not success: ubergraph = processor.create_partitioned(workflow) nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] if rank == 0: print 'Partitions: %s' % ', '.join(('[%s]' % ', '.join( (pe.id for pe in part)) for part in workflow.partitions)) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() ns = [ n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes() ] print('%s contains %s' % (wrapperPE.id, ns)) try: processes, inputmappings, outputmappings = \ processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True except: print 'dispel4py.mpi_process: '\ 'Not enough processes for execution of graph' success = False success = comm.bcast(success, root=0) if not success: return inputs = {pe.id: v for pe, v in inputs.iteritems()} processes = comm.bcast(processes, root=0) inputmappings = comm.bcast(inputmappings, root=0) outputmappings = comm.bcast(outputmappings, root=0) inputs = comm.bcast(inputs, root=0) if rank == 0: print 'Processes: %s' % processes # print 'Inputs: %s' % inputs for pe in nodes: if rank in processes[pe.id]: provided_inputs = processor.get_inputs(pe, inputs) wrapper = MPIWrapper(pe, provided_inputs) wrapper.targets = outputmappings[rank] wrapper.sources = inputmappings[rank] wrapper.process()
def process(workflow, inputs, args): processes = {} inputmappings = {} outputmappings = {} success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if rank == 0 and not args.simple: try: processes, inputmappings, outputmappings =\ processor.assign_and_connect(workflow, size) except: success = False success = comm.bcast(success, root=0) if args.simple or not success: ubergraph = processor.create_partitioned(workflow) nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] if rank == 0: print 'Partitions: %s' % ', '.join( ('[%s]' % ', '.join((pe.id for pe in part)) for part in workflow.partitions)) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() ns = [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()] print('%s contains %s' % (wrapperPE.id, ns)) try: processes, inputmappings, outputmappings = \ processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True except: print 'dispel4py.mpi_process: '\ 'Not enough processes for execution of graph' success = False success = comm.bcast(success, root=0) if not success: return inputs = {pe.id: v for pe, v in inputs.iteritems()} processes = comm.bcast(processes, root=0) inputmappings = comm.bcast(inputmappings, root=0) outputmappings = comm.bcast(outputmappings, root=0) inputs = comm.bcast(inputs, root=0) if rank == 0: print 'Processes: %s' % processes # print 'Inputs: %s' % inputs for pe in nodes: if rank in processes[pe.id]: provided_inputs = processor.get_inputs(pe, inputs) wrapper = MPIWrapper(pe, provided_inputs) wrapper.targets = outputmappings[rank] wrapper.sources = inputmappings[rank] wrapper.process()
def process_and_return(workflow, inputs, resultmappings=None): numnodes = 0 for node in workflow.graph.nodes(): numnodes += 1 node.getContainedObject().numprocesses = 1 processes, inputmappings, outputmappings = processor.assign_and_connect(workflow, numnodes) # print 'Processes: %s' % processes # print inputmappings # print outputmappings proc_to_pe = {} for node in workflow.graph.nodes(): pe = node.getContainedObject() proc_to_pe[processes[pe.id][0]] = pe simple = SimpleProcessingPE(inputmappings, outputmappings, proc_to_pe) simple.id = 'SimplePE' simple.result_mappings = resultmappings wrapper = SimpleProcessingWrapper(simple, [inputs]) wrapper.targets = {} wrapper.sources = {} wrapper.process() # now collect output data into a single list for each PE outputs = {} for (pe_id, output_name), data in wrapper.outputs.iteritems(): if pe_id not in outputs: outputs[pe_id] = {} try: outputs[pe_id][output_name] += data except KeyError: outputs[pe_id][output_name] = data return outputs
def process_and_return(workflow, inputs, resultmappings=None): numnodes = 0 for node in workflow.graph.nodes(): numnodes += 1 node.getContainedObject().numprocesses = 1 processes, inputmappings, outputmappings = processor.assign_and_connect( workflow, numnodes) # print 'Processes: %s' % processes # print inputmappings # print outputmappings proc_to_pe = {} for node in workflow.graph.nodes(): pe = node.getContainedObject() proc_to_pe[processes[pe.id][0]] = pe simple = SimpleProcessingPE(inputmappings, outputmappings, proc_to_pe) simple.id = 'SimplePE' simple.result_mappings = resultmappings wrapper = SimpleProcessingWrapper(simple, [inputs]) wrapper.targets = {} wrapper.sources = {} wrapper.process() # now collect output data into a single list for each PE outputs = {} for (pe_id, output_name), data in wrapper.outputs.iteritems(): if pe_id not in outputs: outputs[pe_id] = {} try: outputs[pe_id][output_name] += data except KeyError: outputs[pe_id][output_name] = data return outputs
def process_and_return(workflow, inputs, resultmappings=None): numnodes = 0 for node in workflow.graph.nodes(): numnodes += 1 node.getContainedObject().numprocesses = 1 processes, inputmappings, outputmappings = processor.assign_and_connect(workflow, numnodes) # print 'Processes: %s' % processes # print inputmappings # print outputmappings proc_to_pe = {} for node in workflow.graph.nodes(): pe = node.getContainedObject() proc_to_pe[processes[pe.id][0]] = pe simple = SimpleProcessingPE(inputmappings, outputmappings, proc_to_pe) simple.id = 'SimplePE' simple.result_mappings = resultmappings wrapper = SimpleProcessingWrapper(simple, [inputs]) wrapper.targets = {} wrapper.sources = {} wrapper.process() return wrapper.outputs
def process_and_return(workflow, inputs, resultmappings=None): numnodes = 0 for node in workflow.graph.nodes(): numnodes += 1 node.getContainedObject().numprocesses = 1 processes, inputmappings, outputmappings = processor.assign_and_connect( workflow, numnodes) # print 'Processes: %s' % processes # print inputmappings # print outputmappings proc_to_pe = {} for node in workflow.graph.nodes(): pe = node.getContainedObject() proc_to_pe[processes[pe.id][0]] = pe simple = SimpleProcessingPE(inputmappings, outputmappings, proc_to_pe) simple.id = 'SimplePE' simple.result_mappings = resultmappings wrapper = SimpleProcessingWrapper(simple, [inputs]) wrapper.targets = {} wrapper.sources = {} wrapper.process() return wrapper.outputs
def process(workflow, inputs, args): size = args.num if not size: return 'dispel4py.multi_process: ' \ 'error: missing required argument -n num_processes' success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if not args.simple: try: result = processor.assign_and_connect(workflow, size) processes, inputmappings, outputmappings = result except: success = False if args.simple or not success: ubergraph = processor.create_partitioned(workflow) print 'Partitions: %s' % ', '.join(('[%s]' % ', '.join((pe.id for pe in part)) for part in workflow.partitions)) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() print('%s contains %s' % (wrapperPE.id, [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()])) try: processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] except: return 'dispel4py.multi_process: ' \ 'Not enough processes for execution of graph' print 'Processes: %s' % processes # print 'Inputs: %s' % inputs process_pes = {} queues = {} for pe in nodes: provided_inputs = processor.get_inputs(pe, inputs) for proc in processes[pe.id]: cp = copy.deepcopy(pe) cp.rank = proc cp.log = types.MethodType(simpleLogger, cp) wrapper = MultiProcessingWrapper(proc, cp, provided_inputs) process_pes[proc] = wrapper wrapper.input_queue = multiprocessing.Queue() wrapper.input_queue.name = 'Queue_%s_%s' % (cp.id, cp.rank) queues[proc] = wrapper.input_queue wrapper.targets = outputmappings[proc] wrapper.sources = inputmappings[proc] for proc in process_pes: wrapper = process_pes[proc] wrapper.output_queues = {} for target in wrapper.targets.values(): for inp, comm in target: for i in comm.destinations: wrapper.output_queues[i] = queues[i] jobs = [] for wrapper in process_pes.values(): p = multiprocessing.Process(target=_processWorker, args=(wrapper,)) jobs.append(p) for j in jobs: j.start() for j in jobs: j.join()
import processor from dispel4py.workflow_graph import WorkflowGraph from dispel4py.examples.graph_testing.testing_PEs import TestProducer, TestOneInOneOut prod = TestProducer() cons1 = TestOneInOneOut() cons2 = TestOneInOneOut() graph = WorkflowGraph() graph.connect(prod, 'output', cons1, 'input') graph.connect(cons1, 'output', cons2, 'input') graph.partitions = [[prod], [cons1, cons2]] ubergraph = processor.create_partitioned(graph) processes, inputmappings, outputmappings = processor.assign_and_connect( ubergraph, 2) print processes print inputmappings print outputmappings import multi_process inputs = {prod: [{}]} mapped_inputs = processor.map_inputs_to_partitions(ubergraph, inputs) print 'MAPPED INPUTS: %s' % mapped_inputs multi_process.process(ubergraph, 2, inputs=mapped_inputs)
def process(workflow, inputs, args): size = args.num if not size: return 'dispel4py.multi_process: ' \ 'error: missing required argument -n num_processes' success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if not args.simple: try: result = processor.assign_and_connect(workflow, size) processes, inputmappings, outputmappings = result except: success = False if args.simple or not success: ubergraph = processor.create_partitioned(workflow) print 'Partitions: %s' % ', '.join(('[%s]' % ', '.join((pe.id for pe in part)) for part in workflow.partitions)) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() print('%s contains %s' % (wrapperPE.id, [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()])) try: processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] except: print traceback.format_exc() return 'dispel4py.multi_process: ' \ 'Not enough processes for execution of graph' print 'Processes: %s' % processes # print 'Inputs: %s' % inputs process_pes = {} queues = {} for pe in nodes: provided_inputs = processor.get_inputs(pe, inputs) for proc in processes[pe.id]: cp = copy.deepcopy(pe) cp.rank = proc cp.log = types.MethodType(simpleLogger, cp) wrapper = MultiProcessingWrapper(proc, cp, provided_inputs) process_pes[proc] = wrapper wrapper.input_queue = multiprocessing.Queue() wrapper.input_queue.name = 'Queue_%s_%s' % (cp.id, cp.rank) queues[proc] = wrapper.input_queue wrapper.targets = outputmappings[proc] wrapper.sources = inputmappings[proc] for proc in process_pes: wrapper = process_pes[proc] wrapper.output_queues = {} for target in wrapper.targets.values(): for inp, comm in target: for i in comm.destinations: wrapper.output_queues[i] = queues[i] jobs = [] for wrapper in process_pes.values(): p = multiprocessing.Process(target=_processWorker, args=(wrapper,)) jobs.append(p) for j in jobs: j.start() for j in jobs: j.join()
import processor from dispel4py.workflow_graph import WorkflowGraph from dispel4py.examples.graph_testing.testing_PEs import TestProducer, TestOneInOneOut prod = TestProducer() cons1 = TestOneInOneOut() cons2 = TestOneInOneOut() graph = WorkflowGraph() graph.connect(prod, 'output', cons1, 'input') graph.connect(cons1, 'output', cons2, 'input') graph.partitions= [ [prod], [cons1, cons2]] ubergraph = processor.create_partitioned(graph) processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, 2) print processes print inputmappings print outputmappings import multi_process inputs= { prod : [{}] } mapped_inputs=processor.map_inputs_to_partitions(ubergraph, inputs) print 'MAPPED INPUTS: %s' % mapped_inputs multi_process.process(ubergraph, 2, inputs = mapped_inputs)