Exemple #1
0
def generate_report(graph):
    print(f'Generating training report... {graph.report}')
    report = graph.session.run(graph.report)
    compilation_report = ipu_utils.extract_compile_reports(report)
    execution_report = ipu_utils.extract_execute_reports(report)

    with open("report.txt", "w") as f:
        f.write(ipu_utils.extract_all_strings_from_event_trace(report))
    with open("compilation_report.json", "w") as f:
        json.dump(compilation_report, f)
    with open("execution_report.json", "w") as f:
        json.dump(execution_report, f)
    print('Reports saved to .')
Exemple #2
0
def extract_runtimes_from_report(raw_report, opts, start_time=0, display=True):
    """Returns timing information from IpuTraceEvent

    report -- Array of text encoded IpuTraceEvent

    """
    if len(raw_report) is 0:
        return

    # Timings from tf xla event timestamps
    from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent

    events = list(map(IpuTraceEvent.FromString, raw_report))
    first = start_time == 0
    if first:
        start_time = events[0].timestamp
        events = events[1:]

    # Retrieve IpuEvents, poplar report and cycles
    if display:
        evt_str = "\nIPU Timings\n"
        compile_started = False

        for evt in events:
            extra_str = ""
            if evt.type == IpuTraceEvent.COMPILE_BEGIN:
                compile_started = True
                evt_name = "Compile start"
            elif evt.type == IpuTraceEvent.COMPILE_END:
                if compile_started:
                    compile_started = False
                    evt_name = "Compile"
                else:
                    continue
            elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER:
                evt_name = "Host->Device"
                extra_str = "\n  Tensors:"
                transfered_tensors = json.loads(
                    evt.data_transfer.data_transfer.decode('utf-8'))
                for t in transfered_tensors["tensors"]:
                    extra_str += "\n    handle: {:>6}, size: {}".format(
                        t["name"], t["size"])
                extra_str += "\n  Total_size: {}".format(
                    transfered_tensors["total_size"])
            elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER:
                evt_name = "Device->Host"
                extra_str = "\n  Tensors:"
                transfered_tensors = json.loads(
                    evt.data_transfer.data_transfer.decode('utf-8'))
                for t in transfered_tensors["tensors"]:
                    extra_str += "\n    handle: {:>6}, size: {}".format(
                        t["name"], t["size"])
                extra_str += "\n  Total_size: {}".format(
                    transfered_tensors["total_size"])
            elif evt.type == IpuTraceEvent.LOAD_ENGINE:
                evt_name = "Load engine"
            elif evt.type == IpuTraceEvent.EXECUTE:
                evt_name = "Execute"
            else:
                evt_name = "Unknown event"
            evt_str += "{:<15s}: {:<8.3g} s   {}\n".format(
                evt_name, (evt.timestamp - start_time), extra_str)
            start_time = evt.timestamp

        print(evt_str)

    # Write Report to file
    if first:
        with open("graph.json", "w") as f:
            graph_report = utils.extract_compile_reports(raw_report)[0][1]
            f.write(graph_report)
        with open("execution.json", "w") as f:
            execution_report = utils.extract_execute_reports(raw_report)[0][1]
            f.write(execution_report)
        print("\nWritten to file: graph.json, execution.json")

    if opts.tile_activity_report:
        generate_tile_activity(raw_report)

    return {"graph": graph_report, "execution": execution_report}, start_time
Exemple #3
0
        # Therefore if the variable initializer graph runs on IPU, we can prevent it from being included
        # in the reports with a "session.run(trace)" after it has been run (line above).
        if args.no_var_init_profiling and not args.var_init_on_cpu:
            session.run(trace)

        # Create dummy data
        training_data = np.zeros([1, NUM_UNITS_IN])
        # Run the main graph
        session.run(logits, feed_dict={x: training_data})
        # Execute the event trace op: the result is a list of trace event serialized protobufs.
        raw_report = session.run(trace)
        # These objects can be converted to strings with utility functions, as shown below.
        ext = ".json" if args.json_report else ".txt"
        if args.split_reports:
            compile_reports = utils.extract_compile_reports(raw_report)
            execution_reports = utils.extract_execute_reports(raw_report)
            # These are lists, as long as the number of graphs profiled, except that the
            # execution_reports list will be empty if execution profiling is not enabled.
            # You could save only the last (i.e. relative to the main graph); in this case we save everything.
            with open("compile" + ext, "w", encoding="utf-8") as f:
                for report in compile_reports:
                    # Each element of the list is a tuple of 2 elements:
                    # the first is a string representing an auto-generated name of the xla graph
                    # the second is a string containing the actual report relative to the graph
                    xla_name, report_string = report
                    f.write(xla_name + "\n")
                    f.write(report_string + "\n")
            if len(execution_reports) > 0:
                with open("execution" + ext, "w", encoding="utf-8") as f:
                    for report in execution_reports:
                        xla_name, report_string = report