def deser_block(f):
    if type(f) is not io.BytesIO:
        f = io.BytesIO(f)

    blk = models.Block()

    blk.header = deser_block_header(f)
    blk.transactions = deser_vector(f, models.Transaction)
    return Block
Beispiel #2
0
def parse_block(block, chain_id):
    out = {'chain_id': chain_id, 'height': block['height']}
    out['id'] = ':'.join([out['chain_id'], str(out['height'])])
    txs = block['txs_results']
    if txs:
        out['transactions'] = parse_txs(txs, out['id'])
    events = block['begin_block_events']
    end_events = block['end_block_events']
    if end_events:
        events.extend(end_events)
    out['events'] = parse_events(events, out['id'])
    return models.Block(**out)
Beispiel #3
0
def tokenize_events(benchObj):
    events = []
    # Create events in order that they're rendered in the json, then sort
    # Per-benchmark times
    benchmark = models.Benchmark(benchObj["benchmark_name"], benchObj["label"],
                                 benchObj["max_resident_threads"])
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_RELEASE, benchObj["release_time"],
                       benchmark))
    # We can ignore "cpu_times" as that's just a duplicate of the start of copy_in and the end of copy_out
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_COPY_IN_START,
                       benchObj["times"][1]["copy_in_times"][0], benchmark))
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_COPY_IN_END,
                       benchObj["times"][1]["copy_in_times"][1], benchmark))
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_COPY_OUT_START,
                       benchObj["times"][1]["copy_out_times"][0], benchmark))
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_COPY_OUT_END,
                       benchObj["times"][1]["copy_out_times"][1], benchmark))
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_EXE_START,
                       benchObj["times"][1]["execute_times"][0], benchmark))
    events.append(
        BenchmarkEvent(EventType.BENCHMARK_EXE_END,
                       benchObj["times"][1]["execute_times"][1], benchmark))
    # Per-kernel times
    for kernelObj in benchObj["times"][2:]:
        # *** HACK *** HACK *** MAGIC NUMBERS AND PLACEHOLDER DATA. FIXME with real stream and priority data
        kernel = models.Kernel(kernelObj["kernel_name"], benchObj["TID"], 0,
                               benchmark)
        benchmark.kernels.append(kernel)
        events.append(
            KernelEvent(EventType.KERNEL_LAUNCH_START,
                        kernelObj["cuda_launch_times"][0], kernel))
        events.append(
            KernelEvent(EventType.KERNEL_LAUNCH_END,
                        kernelObj["cuda_launch_times"][1], kernel))
        # Kernel end time is optional (depends on application of cudaStreamSynchronize)
        # Falls back to last block time plus one nanosecond if unavailable
        kernel_end_time = 0
        if kernelObj["cuda_launch_times"][2] == 0:
            print(
                WARNING +
                "WARNING: Using last block time + 1ns as substitute to missing cudaStreamSynchronize timestamp for kernel "
                + kernel.name + ENDC)
            kernel_end_time = sorted(
                kernelObj["block_times"])[-1] + 0.000000001
        else:
            kernel_end_time = kernelObj["cuda_launch_times"][2]
        events.append(
            KernelEvent(EventType.KERNEL_END, kernel_end_time, kernel))
        # Per-block times
        idx = 0
        while idx * 2 < len(kernelObj["block_times"]):
            block = models.Block(kernelObj["block_smids"][idx],
                                 kernelObj["thread_count"], kernel)
            kernel.blocks.append(block)
            events.append(
                BlockEvent(EventType.BLOCK_START,
                           kernelObj["block_times"][idx * 2], block))
            events.append(
                BlockEvent(EventType.BLOCK_END,
                           kernelObj["block_times"][idx * 2 + 1], block))
            idx += 1
    return sorted(events, key=lambda event: event.timestamp)