def translate_all_records(input_file, begin_phase, end_phase, output_file): current_MPI_process = utils.STARTING_MPI_PROCESS # for each record in the trace for (record, is_new_MPI_process) in input_trf.trf_file_reader(input_file, begin_phase, end_phase): # if starting new MPI process if is_new_MPI_process: # in the main task, add the finishing join of all the working tasks -> FAKE ONE BARRIER add_final_worker_tasks_join(current_MPI_process, output_file) # organize and flushout all the collected worker records for this MPI process flush_finalized_worker_task_records(output_file) current_MPI_process = current_MPI_process + 1 start_new_MPI_process(output_file, current_MPI_process) sys.stdout.write("\r") sys.stdout.write("translating MPI process number: %d\n" % current_MPI_process) sys.stdout.flush() continue # records for main task -> directly put them to the output trace new_main_task_records = translate_main_task.get_caused_main_task_records(record) flush_main_task_records(output_file, new_main_task_records) # records for worker tasks -> store them, and when the MPI process finished, orgranize them and flush to the output trace new_worker_task_records = translate_worker_task.get_caused_worker_task_records(record) store_worker_records(new_worker_task_records) # to organize and flushout worker tasks for the last MPI process, first make a join in the main task add_final_worker_tasks_join(current_MPI_process, output_file) flush_finalized_worker_task_records(output_file) sys.stdout.write("\n") print("finished translation")
def add_final_worker_tasks_join(current_MPI, out_trace): # create a fake barrier record fake_barrier = utils.TraceRecord.create_css_barrier_event(current_MPI) new_main_task_records = translate_main_task.get_caused_main_task_records(fake_barrier) flush_main_task_records(out_trace, new_main_task_records)