Ejemplo n.º 1
0
def setup_graph(path: pathlib.Path, testfile):
	"""
	Parse the output files and create the graph
	
	:param path: Path holding output files
	:param testfile: Test file that graph is being created for
	"""
	idx = 0
	cycles = []
	compr_ratio = []
	for tasks in num_tasks:
		amc =  get_avg_max_cycles(path, testfile, dpus, tasks)
		if amc is -1:
			print(f"ERROR: File not found: {testfile} with {dpus} dpus and {tasks} tasklets.",
				file=sys.stderr)
			return

		cr = get_compr_ratio(path, testfile, dpus, tasks)

		compr_ratio.append(cr)
		cycles.append(amc / 1000000)

	# Print for easy debugging
	print(cycles)
	print(compr_ratio)

	# Set up plot
	plt.rc('font', size=12)
	plt.rc('axes', titlesize=12)
	plt.rc('axes', labelsize=12)
	fig, ax1 = plt.subplots()
	
	# Set up axes labels and make duplicate axis
	xpos = np.arange(len(num_tasks))
	plt.xticks(xpos, labels=num_tasks)
	ax1.set_xlabel('Number of Tasklets')
	ax2 = ax1.twinx() 

	# Set up bar graph
	ax1.bar(xpos, cycles, color='#4e6625', width=0.5)
	ax1.set_ylabel('Cycle Count (in Millions)')

	# Set up line graph
	ax2.plot(xpos, compr_ratio, color='#2e3d18', linewidth=2)
	ax2.set_ylabel('Compression Ratio')

	plt.show()
Ejemplo n.º 2
0
def run_dpu_test(files, min_dpu, max_dpu, incr):
    for testfile in files:
        os.system('make clean')
        os.system('make')
        os.system(
            f'./dpu_snappy -i ../test/{testfile}.snappy > results/decompression/{testfile}_host.txt'
        )
        os.system(
            f'./dpu_snappy -c -i ../test/{testfile}.txt > results/compression/{testfile}_host.txt'
        )

        for i in [min_dpu] + list(range(min_dpu - 1 + incr, max_dpu + 1,
                                        incr)):
            tasklets = get_optimal_tasklets(f"../test/{testfile}.txt", 32768,
                                            i)

            os.system('make clean')
            os.system(f'make NR_DPUS={i} NR_TASKLETS={tasklets}')
            print(
                f'./dpu_snappy -d -i ../test/{testfile}.snappy > results/decompression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
            )
            os.system(
                f'./dpu_snappy -d -i ../test/{testfile}.snappy > results/decompression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
            )
            print(
                f'./dpu_snappy -d -c -i ../test/{testfile}.txt > results/compression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
            )
            os.system(
                f'./dpu_snappy -d -c -i ../test/{testfile}.txt > results/compression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
            )

    # Write compression results csv
    with open('results/compression_speedup_dpu.csv', 'w',
              newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow(['version', 'time', 'dpus'])
        writer.writerow(['host', '1', '0'])

        for testfile in files:
            for i in [min_dpu] + list(
                    range(min_dpu - 1 + incr, max_dpu + 1, incr)):
                tasklets = get_optimal_tasklets(f"../test/{testfile}.txt",
                                                32768, i)

                host = get_avg_host_runtime(
                    pathlib.Path("results/compression"), testfile)
                dpu = float(
                    get_avg_max_cycles(pathlib.Path("results/compression"),
                                       testfile, i, tasklets)) / 266000000
                dpu_overhead = get_avg_overhead_time(
                    pathlib.Path("results/compression"), testfile, i, tasklets)

                if dpu > 0:
                    std_dpu = host / (dpu + sum(dpu_overhead))
                    writer.writerow([testfile, std_dpu, i])

    # Write decompression results csv
    with open('results/decompression_speedup_dpu.csv', 'w',
              newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow(['version', 'time', 'dpus'])
        writer.writerow(['host', '1', '0'])

        for testfile in files:
            for i in [min_dpu] + list(
                    range(min_dpu - 1 + incr, max_dpu + 1, incr)):
                tasklets = get_optimal_tasklets(f"../test/{testfile}.txt",
                                                32768, i)

                host = get_avg_host_runtime(
                    pathlib.Path("results/decompression"), testfile)
                dpu = float(
                    get_avg_max_cycles(pathlib.Path("results/decompression"),
                                       testfile, i, tasklets)) / 266000000
                dpu_overhead = get_avg_overhead_time(
                    pathlib.Path("results/compression"), testfile, i, tasklets)

                if dpu > 0:
                    std_dpu = host / (dpu + sum(dpu_overhead))
                    writer.writerow([testfile, std_dpu, i])
Ejemplo n.º 3
0
def run_tasklet_test(files, min_tasklet, max_tasklet, incr, num_dpu):
    for testfile in files:
        os.system('make clean')
        os.system('make')
        os.system(
            f'./dpu_snappy -i ../test/{testfile}.snappy > results/decompression/{testfile}_host.txt'
        )
        os.system(
            f'./dpu_snappy -c -i ../test/{testfile}.txt > results/compression/{testfile}_host.txt'
        )

        for i in [min_tasklet] + list(
                range(min_tasklet + 1, max_tasklet + 1, incr)):
            os.system('make clean')
            os.system(f'make NR_DPUS={num_dpu} NR_TASKLETS={i}')
            print(
                f'./dpu_snappy -d -i ../test/{testfile}.snappy > results/decompression/{testfile}_dpus={num_dpu}_tasklets={i}.txt'
            )
            os.system(
                f'./dpu_snappy -d -i ../test/{testfile}.snappy > results/decompression/{testfile}_dpus={num_dpu}_tasklets={i}.txt'
            )
            print(
                f'./dpu_snappy -d -c -i ../test/{testfile}.txt > results/compression/{testfile}_dpus={num_dpu}_tasklets={i}.txt'
            )
            os.system(
                f'./dpu_snappy -d -c -i ../test/{testfile}.txt > results/compression/{testfile}_dpus={num_dpu}_tasklets={i}.txt'
            )

    # Write compression results csv
    with open('results/compression_speedup_tasklet.csv', 'w',
              newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow(['version', 'time', 'tasklets'])
        writer.writerow(['host', '1', '0'])

        for testfile in files:
            for i in [min_tasklet] + list(
                    range(min_tasklet + 1, max_tasklet + 1, incr)):
                host = get_avg_host_runtime(
                    pathlib.Path("results/compression"), testfile)
                dpu = float(
                    get_avg_max_cycles(pathlib.Path("results/compression"),
                                       testfile, num_dpu, i)) / 266000000
                dpu_overhead = get_avg_overhead_time(
                    pathlib.Path("results/compression"), testfile, num_dpu, i)

                if dpu > 0:
                    std_dpu = host / (dpu + ovr for ovr in dpu_overhead)
                    writer.writerow([testfile, std_dpu, i])

    # Write decompression results csv
    with open('results/decompression_speedup_tasklet.csv', 'w',
              newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow(['version', 'time', 'tasklets'])
        writer.writerow(['host', '1', '0'])

        for testfile in files:
            for i in [min_tasklet] + list(
                    range(min_tasklet + 1, max_tasklet + 1, incr)):
                host = get_avg_host_runtime(
                    pathlib.Path("results/decompression"), testfile)
                dpu = float(
                    get_avg_max_cycles(pathlib.Path("results/decompression"),
                                       testfile, num_dpu, i)) / 266000000
                dpu_overhead = get_avg_overhead_time(
                    pathlib.Path("results/compression"), testfile, num_dpu, i)

                if dpu > 0:
                    std_dpu = host / (dpu + ovr for ovr in dpu_overhead)
                    writer.writerow([testfile, std_dpu, i])
Ejemplo n.º 4
0
def run_breakdown_test(testfile, min_dpu, max_dpu, incr, tasklets):
    for i in [min_dpu] + list(range(min_dpu - 1 + incr, max_dpu + 1, incr)):
        os.system('make clean')
        os.system(f'make NR_DPUS={i} NR_TASKLETS={tasklets}')
        print(
            f'./dpu_snappy -d -i ../test/{testfile}.snappy > results/decompression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
        )
        os.system(
            f'./dpu_snappy -d -i ../test/{testfile}.snappy > results/decompression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
        )
        print(
            f'./dpu_snappy -d -c -i ../test/{testfile}.txt > results/compression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
        )
        os.system(
            f'./dpu_snappy -d -c -i ../test/{testfile}.txt > results/compression/{testfile}_dpus={i}_tasklets={tasklets}.txt'
        )

    with open(f'results/{testfile}_compression_breakdown.csv', 'w',
              newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow([
            'prepare', 'alloc', 'load', 'copy_in', 'run', 'copy_out', 'free',
            'dpus'
        ])

        for i in [min_dpu] + list(range(min_dpu - 1 + incr, max_dpu + 1,
                                        incr)):
            dpu = float(
                get_avg_max_cycles(pathlib.Path("results/compression"),
                                   testfile, i, tasklets)) / 266000000
            dpu_overhead = get_avg_overhead_time(
                pathlib.Path("results/compression"), testfile, i, tasklets)

            if dpu > 0:
                writer.writerow([
                    dpu_overhead[0], dpu_overhead[1], dpu_overhead[2],
                    dpu_overhead[3], dpu, dpu_overhead[4], dpu_overhead[5], i
                ])

    with open(f'results/{testfile}_decompression_breakdown.csv',
              'w',
              newline='') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow([
            'prepare', 'alloc', 'load', 'copy_in', 'run', 'copy_out', 'free',
            'dpus'
        ])

        for i in [min_dpu] + list(range(min_dpu - 1 + incr, max_dpu + 1,
                                        incr)):
            dpu = float(
                get_avg_max_cycles(pathlib.Path("results/decompression"),
                                   testfile, i, tasklets)) / 266000000
            dpu_overhead = get_avg_overhead_time(
                pathlib.Path("results/decompression"), testfile, i, tasklets)

            if dpu > 0:
                writer.writerow([
                    dpu_overhead[0], dpu_overhead[1], dpu_overhead[2],
                    dpu_overhead[3], dpu, dpu_overhead[4], dpu_overhead[5], i
                ])
Ejemplo n.º 5
0
def setup_graph(path: pathlib.Path):
    """
	Parse the output files and create the graph

	:param path: Path holding output files
	"""
    # Loop through directory for respective output files and parse them
    dpu_time = []
    host_time = []
    for filename in files:
        params = files[filename]

        ahr = get_avg_host_runtime(path, filename)
        adr = get_avg_max_cycles(path, filename, params[0], params[1])

        if ahr is -1:
            print(f"ERROR: File not found fo host: {filename}.",
                  file=sys.stderr)
            return
        elif adr is -1:
            print(
                f"ERROR: File not found for DPU: {filename} with {params[0]} dpus and {params[1]} tasklets.",
                file=sys.stderr)
            return
        else:
            host_time.append(ahr)
            dpu_time.append(
                float(adr) / 267000000 +
                get_avg_prepostproc_time(path, filename, params[0], params[1]))

    # Calculate the speedup
    speedup = []
    for i in range(0, len(files)):
        if host_time[i] < dpu_time[i]:
            speedup.append((host_time[i] / dpu_time[i] - 1) * 100)
        else:
            speedup.append((host_time[i] / dpu_time[i]) * 100)

    # Print for easy debugging
    print(host_time)
    print(dpu_time)
    print(speedup)

    # Set up plot
    plt.rc('font', size=12)
    plt.rc('axes', titlesize=12)
    plt.rc('axes', labelsize=12)
    fig, ax = plt.subplots()

    # y-axis labels
    yticks = np.arange(len(files))
    ax.set_yticks(yticks)
    ax.set_yticklabels(files)

    # x-axis labels
    xticks = np.arange(-100, 800, step=50)
    ax.set_xticks(xticks)
    ax.set_xlabel('Speedup Over Host Application (%)')
    ax.xaxis.grid(True, linestyle="dotted")

    ax.barh(yticks,
            speedup,
            color=list(
                map(lambda x: '#d35e60' if (x < 0) else '#84ba5b', speedup)))

    plt.show()