def process(workflow, inputs, args): processes = {} inputmappings = {} outputmappings = {} success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if rank == 0 and not args.simple: try: processes, inputmappings, outputmappings =\ processor.assign_and_connect(workflow, size) except: success = False success = comm.bcast(success, root=0) if args.simple or not success: ubergraph = processor.create_partitioned(workflow) nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] if rank == 0: print('Partitions: %s' % ', '.join( ('[%s]' % ', '.join((pe.id for pe in part)) for part in workflow.partitions))) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() ns = [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()] print('%s contains %s' % (wrapperPE.id, ns)) try: processes, inputmappings, outputmappings = \ processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True except: print('dispel4py.mpi_process: ' 'Not enough processes for execution of graph') success = False success = comm.bcast(success, root=0) if not success: return inputs = {pe.id: v for pe, v in inputs.items()} processes = comm.bcast(processes, root=0) inputmappings = comm.bcast(inputmappings, root=0) outputmappings = comm.bcast(outputmappings, root=0) inputs = comm.bcast(inputs, root=0) if rank == 0: print('Processes: %s' % processes) # print('Inputs: %s' % inputs) for pe in nodes: if rank in processes[pe.id]: provided_inputs = processor.get_inputs(pe, inputs) wrapper = MPIWrapper(pe, provided_inputs) wrapper.targets = outputmappings[rank] wrapper.sources = inputmappings[rank] wrapper.process()
def process(workflow, inputs, args): processes = {} inputmappings = {} outputmappings = {} success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if rank == 0 and not args.simple: try: processes, inputmappings, outputmappings = processor.assign_and_connect(workflow, size) except: success = False success = comm.bcast(success, root=0) if args.simple or not success: ubergraph = processor.create_partitioned(workflow) nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] if rank == 0: print( "Partitions: %s" % ", ".join(("[%s]" % ", ".join((pe.id for pe in part)) for part in workflow.partitions)) ) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() ns = [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()] print("%s contains %s" % (wrapperPE.id, ns)) try: processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True except: print("dispel4py.mpi_process: " "Not enough processes for execution of graph") success = False success = comm.bcast(success, root=0) if not success: return inputs = {pe.id: v for pe, v in inputs.items()} processes = comm.bcast(processes, root=0) inputmappings = comm.bcast(inputmappings, root=0) outputmappings = comm.bcast(outputmappings, root=0) inputs = comm.bcast(inputs, root=0) if rank == 0: print("Processes: %s" % processes) # print('Inputs: %s' % inputs) for pe in nodes: if rank in processes[pe.id]: provided_inputs = processor.get_inputs(pe, inputs) wrapper = MPIWrapper(pe, provided_inputs) wrapper.targets = outputmappings[rank] wrapper.sources = inputmappings[rank] wrapper.process()
def process(workflow, inputs, args): size = args.num success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if not args.simple: try: result = processor.assign_and_connect(workflow, size) processes, inputmappings, outputmappings = result except: success = False if args.simple or not success: ubergraph = processor.create_partitioned(workflow) print('Partitions: %s' % ', '.join(('[%s]' % ', '.join( (pe.id for pe in part)) for part in workflow.partitions))) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() pes = [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()] print('%s contains %s' % (wrapperPE.id, pes)) try: result = processor.assign_and_connect(ubergraph, size) if result is None: return 'dispel4py.multi_process: ' \ 'Not enough processes for execution of graph' processes, inputmappings, outputmappings = result inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] except: print(traceback.format_exc()) return 'dispel4py.multi_process: ' \ 'Could not create mapping for execution of graph' print('Processes: %s' % processes) process_pes = {} queues = {} result_queue = None try: if args.results: result_queue = multiprocessing.Queue() except AttributeError: pass for pe in nodes: provided_inputs = processor.get_inputs(pe, inputs) for proc in processes[pe.id]: cp = copy.deepcopy(pe) cp.rank = proc cp.log = types.MethodType(simpleLogger, cp) wrapper = MultiProcessingWrapper(proc, cp, provided_inputs) process_pes[proc] = wrapper wrapper.input_queue = multiprocessing.Queue() wrapper.input_queue.name = 'Queue_%s_%s' % (cp.id, cp.rank) wrapper.result_queue = result_queue queues[proc] = wrapper.input_queue wrapper.targets = outputmappings[proc] wrapper.sources = inputmappings[proc] for proc in process_pes: wrapper = process_pes[proc] wrapper.output_queues = {} for target in wrapper.targets.values(): for inp, comm in target: for i in comm.destinations: wrapper.output_queues[i] = queues[i] jobs = [] for wrapper in process_pes.values(): p = multiprocessing.Process(target=_processWorker, args=(wrapper,)) jobs.append(p) for j in jobs: j.start() for j in jobs: j.join() if result_queue: result_queue.put(STATUS_TERMINATED) return result_queue
def process(workflow, inputs, args): processes = {} inputmappings = {} outputmappings = {} success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if rank == 0 and not args.simple: try: processes, inputmappings, outputmappings =\ processor.assign_and_connect(workflow, size) except: success = False success = comm.bcast(success, root=0) if args.simple or not success: ubergraph = processor.create_partitioned(workflow) nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] if rank == 0: print('Partitions: %s' % ', '.join(('[%s]' % ', '.join( (pe.id for pe in part)) for part in workflow.partitions))) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() print('%s contains %s' % (wrapperPE.id, [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()])) try: processes, inputmappings, outputmappings =\ processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True except: # print traceback.format_exc() print('dispel4py.mpi_process: \ Not enough processes for execution of graph') success = False success = comm.bcast(success, root=0) if not success: return try: inputs = {pe.id: v for pe, v in inputs.items()} except AttributeError: pass processes = comm.bcast(processes, root=0) inputmappings = comm.bcast(inputmappings, root=0) outputmappings = comm.bcast(outputmappings, root=0) inputs = comm.bcast(inputs, root=0) if rank == 0: print('Processes: %s' % processes) # print 'Inputs: %s' % inputs for pe in nodes: if rank in processes[pe.id]: provided_inputs = processor.get_inputs(pe, inputs) wrapper = MPIWrapper(pe, provided_inputs) wrapper.targets = outputmappings[rank] wrapper.sources = inputmappings[rank] wrapper.process()
def process(workflow, inputs, args): workflow_submission_id = uuid.uuid1().hex print workflow_submission_id # Check if switch profile mode on if args.profileOn: manager = multiprocessing.Manager() # A dict to store characterization profiles = manager.dict() multi_monitor = Monitor(profiles, args, workflow) t1 = time.time() size = args.num success = True nodes = [node.getContainedObject() for node in workflow.graph.nodes()] if not args.simple: try: result = processor.assign_and_connect(workflow, size) processes, inputmappings, outputmappings = result except: success = False if args.simple or not success: ubergraph = processor.create_partitioned(workflow) print('Partitions: %s' % ', '.join(('[%s]' % ', '.join( (pe.id for pe in part)) for part in workflow.partitions))) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() pes = [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()] print('%s contains %s' % (wrapperPE.id, pes)) try: result = processor.assign_and_connect(ubergraph, size) if result is None: return 'dispel4py.multi_process: ' \ 'Not enough processes for execution of graph' processes, inputmappings, outputmappings = result inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True nodes = [node.getContainedObject() for node in ubergraph.graph.nodes()] except: print(traceback.format_exc()) return 'dispel4py.multi_process: ' \ 'Could not create mapping for execution of graph' print('Processes: %s' % processes) # print ("inputmappings: %s, \noutputmappings: %s" % (inputmappings, outputmappings)) process_pes = {} queues = {} result_queue = None try: if args.results: result_queue = multiprocessing.Queue() except AttributeError: pass for pe in nodes: provided_inputs = processor.get_inputs(pe, inputs) for proc in processes[pe.id]: cp = copy.deepcopy(pe) cp.rank = proc cp.log = types.MethodType(simpleLogger, cp) if args.profileOn: wrapper = MultiProcessingWrapper(proc, cp, provided_inputs, workflow_submission_id = workflow_submission_id, profiles = profiles) else: wrapper = MultiProcessingWrapper(proc, cp, provided_inputs, workflow_submission_id = workflow_submission_id) process_pes[proc] = wrapper wrapper.input_queue = multiprocessing.Queue() wrapper.input_queue.name = 'Queue_%s_%s' % (cp.id, cp.rank) wrapper.result_queue = result_queue queues[proc] = wrapper.input_queue wrapper.targets = outputmappings[proc] wrapper.sources = inputmappings[proc] for proc in process_pes: wrapper = process_pes[proc] wrapper.output_queues = {} for target in wrapper.targets.values(): for inp, comm in target: for i in comm.destinations: wrapper.output_queues[i] = queues[i] jobs = [] for wrapper in process_pes.values(): p = multiprocessing.Process(target=_processWorker, args=(wrapper, )) jobs.append(p) for j in jobs: j.start() for j in jobs: j.join() if result_queue: result_queue.put(STATUS_TERMINATED) if args.profileOn: t2 = time.time() t3 = t2 - t1 profiles["exec_%s" % workflow_submission_id] = t3 profiles["submitted_%s" % workflow_submission_id] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t1)) # print("Total execution workflow time is %s recorded by proccess %s" % (t3, cp.rank)) multi_monitor.get_pe_process_map(processes, workflow_submission_id) multi_monitor.analyse_and_record() return result_queue