def __init__(self, settings: Settings, record_count: int, operation_count: int) -> None: self.settings = create_settings() self.storage = Storage(settings) self.network = Network(settings) #self.remote_redis = settings.remote_command(nix_build("redis-cli")) self.nc_command = settings.remote_command(nix_build("netcat-native")) self.remote_ycsb = settings.remote_command(nix_build("ycsb-native")) self.record_count = record_count self.operation_count = operation_count
def __init__( self, settings: Settings, storage: Storage, record_count: int, operation_count: int, ) -> None: self.settings = settings self.storage = storage self.nc_command = settings.remote_command(nix_build("netcat")) self.remote_ycsb = settings.remote_command(nix_build("ycsb")) self.record_count = record_count self.operation_count = operation_count
def benchmark_dd( storage: Storage, system: str, attr: str, device: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() env.update(extra_env) dd = nix_build(attr) print(f"###### {system} >> ######") proc = subprocess.Popen([dd], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) try: if proc.stdout is None: proc.wait() else: stats["system"].append(system) for line in proc.stdout: data = line.split(',') if len(data) == 2: stats["latency"].append(data[0]) stats["Throughput"].append(data[1]) finally: pass print(f"###### {system} << ######")
def benchmark_hdparm( storage: Storage, system: str, attr: str, device: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() env.update(flamegraph_env(f"hdparm-{system}-{NOW}")) env.update(extra_env) hdparm = nix_build(attr) print(f"###### {system} >> ######") proc = subprocess.Popen(["sudo", hdparm, "bin/hdparm", "-Tt", device], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) try: if proc.stdout is None: proc.wait() else: stats["system"].append(system) for line in proc.stdout: print(line) match = re.match(r"(.*):\s+(.*) = (.*)", line) if match: stats[match.group(1)].append(match.group(3)) finally: #proc.send_signal(signal.SIGINT) pass print(f"###### {system} << ######")
def setup(self) -> Mount: image = nix_build("iotest-image") if MOUNTPOINT.is_mount(): run(["sudo", "umount", str(MOUNTPOINT)]) spdk_device = self.settings.spdk_device() time.sleep(2) # wait for device to appear dev = f"/dev/{spdk_device}" # TRIM for optimal performance run(["sudo", "blkdiscard", "-f", dev]) run( [ "sudo", "dd", f"if={image}", f"of={dev}", "bs=128M", "conv=fdatasync", "oflag=direct", "status=progress", ] ) run(["sudo", "resize2fs", dev]) return Mount(dev)
def run( self, system: str, mnt: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, mode: Mode = Mode.NORMAL, ) -> None: mysql = nix_build("mysql") command = [] if mode == Mode.PERF: command = [ "perf", "record", "-e", "intel_pt/cyc=1/u", "--" ] command += [ f"{mysql}/bin/mysqld", f"--datadir={mnt}/var/lib/mysql", "--socket=/tmp/mysql.sock", ] if os.geteuid() == 0: command += ["--user=nobody"] subprocess.run(["chown", "-R", "nobody", f"{mnt}/var/lib/mysql"]) with spawn(*command, cwd=mnt) as proc: if mode == Mode.TRACE: record = trace_with_pt(proc.pid, Path(mnt)) try: self.run_sysbench(system, stats) finally: if mode == Mode.TRACE: record.result()
def test_nginx(settings: Settings) -> None: nginx = nix_build("nginx") remote_curl = settings.remote_command(nix_build("curl-remote")) with spawn(nginx.strip()): for _ in range(10): try: curl_args = ["curl", "-s", settings.local_dpdk_ip + "/test/file-3mb"] proc = remote_curl.run("bin/curl", curl_args) sha256 = hashlib.sha256(proc.stdout).hexdigest() expected = ( "259da4e49b1d0932c5a16a9809113cf3ea6c7292e827298827e020aa7361f98d" ) assert sha256 == expected, f"{hash} == {expected}" break except subprocess.CalledProcessError: pass
def benchmark_fio( system: str, attr: str, cores: int, directory: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() # we don't need network for these benchmarks del env["SGXLKL_TAP"] env.update(dict(SGXLKL_CWD=directory)) env.update(flamegraph_env(f"fio-{system}-{NOW}")) env.update(extra_env) enable_sgxio = "1" if system == "sgx-io" else "0" env.update(SGXLKL_ENABLE_SGXIO=enable_sgxio) env.update(SGXLKL_ETHREADS=str(cores)) env.update(extra_env) fio = nix_build(attr) stdout: Optional[int] = subprocess.PIPE if os.environ.get("SGXLKL_ENABLE_GDB", "0") == "1": stdout = None cmd = [str(fio), "bin/fio", "--output-format=json", "--eta=always", f"fio-rand-RW-smp-{cores}.job"] proc = subprocess.Popen(cmd, stdout=stdout, text=True, env=env) data = "" in_json = False print(f"[Benchmark]: {system}") try: if proc.stdout is None: proc.wait() else: for line in proc.stdout: print(line, end="") if line == "{\n": in_json = True if in_json: data += line if line == "}\n": break finally: proc.send_signal(signal.SIGINT) proc.wait() if data == "": raise RuntimeError(f"Did not get a result when running benchmark for {system}") jsondata = json.loads(data) for jobnum, job in enumerate(jsondata["jobs"]): stats["system"].append(system) stats["job"].append(jobnum) stats["cores"].append(cores) for op in ["read", "write", "trim"]: metrics = job[op] for metric_name, metric in metrics.items(): if isinstance(metric, dict): for name, submetric in metric.items(): stats[f"{op}-{metric_name}-{name}"].append(submetric) else: stats[f"{op}-{metric_name}"].append(metric)
def benchmark_simpleio( storage: Storage, type: str, attr: str, directory: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, do_write: bool = True, ) -> None: env = dict(SGXLKL_CWD=directory) env.update(extra_env) env.update(SGXLKL_ENABLE_SGXIO="1") env.update(SGXLKL_ETHREADS="1") env.update(extra_env) simpleio = nix_build(attr) stdout: Optional[int] = subprocess.PIPE if os.environ.get("SGXLKL_ENABLE_GDB", "0") == "1": stdout = None size = str(2 * 1024 * 1024 * 1024) # 2G env_string = [] for k, v in env.items(): env_string.append(f"{k}={v}") report = "" in_results = False env = os.environ.copy() env.update(extra_env) cmd = [ simpleio, "bin/simpleio", f"{directory}/file", size, "0", "0" if do_write else "1", str(128 * 4096), ] print(f"$ {' '.join(env_string)} {' '.join(cmd)}") proc = subprocess.Popen(cmd, stdout=stdout, text=True, env=env) try: assert proc.stdout is not None for line in proc.stdout: print(f"stdout: {line}", end="") if line == "<result>\n": in_results = True elif in_results and line == "</result>\n": break elif in_results: report = line finally: proc.send_signal(signal.SIGINT) jsondata = json.loads(report) stats["type"].append(type) stats["bytes"].append(jsondata["bytes"]) stats["time"].append(jsondata["time"]) stats["workload"].append("write" if do_write else "read")
def benchmark_redis_sgx_io(benchmark: Benchmark, stats: DefaultDict[str, List[str]]) -> None: extra_env = benchmark.network.setup(NetworkKind.DPDK) redis_server = nix_build("redis-sgx-io") mount = benchmark.storage.setup(StorageKind.SPDK) extra_env.update(mount.extra_env()) with mount as mnt: benchmark.run("sgx-io", redis_server, mnt, stats, extra_env=extra_env)
def benchmark_redis_sgx_lkl( benchmark: Benchmark, stats: Dict[str, List], ) -> None: extra_env = benchmark.network.setup(NetworkKind.TAP) redis_server = nix_build("redis-sgx-lkl") mount = benchmark.storage.setup(StorageKind.LKL) extra_env.update(mount.extra_env()) with mount as mnt: benchmark.run("sgx-lkl", redis_server, mnt, stats, extra_env=extra_env)
def benchmark_redis_scone(benchmark: Benchmark, stats: DefaultDict[str, List[str]]) -> None: mount = benchmark.storage.setup(StorageKind.SCONE) redis_server = nix_build("redis-scone") with mount as mnt: extra_env = scone_env(mnt) extra_env.update(benchmark.network.setup(NetworkKind.NATIVE)) extra_env.update(mount.extra_env()) benchmark.run("scone", redis_server, mnt, stats, extra_env=extra_env)
def run(self, attr: str, system: str, stats: Dict[str, List[int]], extra_env: Dict[str, str] = {}) -> None: local_iperf = nix_build(attr) self._run(local_iperf, "send", system, stats, extra_env) if system == "sgx-io": # give sgx-lkl-userpci time to shutdown import time time.sleep(5) self._run(local_iperf, "receive", system, stats, extra_env)
def benchmark_sqlite( storage: Storage, system: str, attr: str, directory: str, stats: Dict[str, List[Any]], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() del env["SGXLKL_TAP"] env.update(dict(SGXLKL_CWD=directory)) env.update(extra_env) env.update(flamegraph_env(f"{os.getcwd()}/sqlite-{system}")) env.update(extra_env) sqlite = nix_build(attr) stdout = subprocess.PIPE cmd = [str(sqlite)] proc = subprocess.Popen(cmd, stdout=stdout, text=True, env=env) print(f"[Benchmark]:{system}") n_rows = 0 try: if proc.stdout is None: proc.wait() else: for line in proc.stdout: line = line.rstrip() print(line) match = re.match(r"(?: \d+ - |\s+)([^.]+)[.]+\s+([0-9.]+)s", line) if match: if "TOTAL" in match.group(1): continue stats["system"].append(system) stats["sqlite-op-type"].append(match.group(1)) stats["sqlite-time [s]"].append(match.group(2)) n_rows += 1 if n_rows == 3: break finally: proc.send_signal(signal.SIGINT) expected = 3 if n_rows < expected: raise RuntimeError( f"Expected {expected} rows, got: {n_rows} when running benchmark for {system}" )
def run(self, system: str, db_dir: str, stats: Dict[str, List], trace: bool = False) -> None: args = ["--dir", db_dir, "--requirepass", "snakeoil"] redis_server = nix_build("redis") with spawn(f"{redis_server}/bin/redis-server", *args) as proc: if trace: record = trace_with_pt(proc.pid, Path(db_dir)) try: self.run_ycsb(proc, system, stats) finally: if trace: record.result()
def benchmark_redis_native( benchmark: Benchmark, stats: Dict[str, List], ) -> None: extra_env = benchmark.network.setup(NetworkKind.NATIVE) redis_server = nix_build("redis-native") mount = benchmark.storage.setup(StorageKind.NATIVE) extra_env.update(mount.extra_env()) with mount as mnt: benchmark.run( "native", redis_server, mnt, stats, extra_env=extra_env, )
def run( self, attr: str, system: str, mnt: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = dict(SGXLKL_CWD=mnt) env.update(flamegraph_env(f"{os.getcwd()}/mysql-{system}")) env.update(extra_env) mysql = nix_build(attr) sysbench = sysbench_command(self.storage.settings) with spawn( mysql, "bin/mysqld", f"--datadir={mnt}/var/lib/mysql", "--socket=/tmp/mysql.sock", extra_env=env, ): common_flags = [ f"--mysql-host={self.settings.local_dpdk_ip}", "--mysql-db=root", "--mysql-user=root", "--mysql-password=root", "--mysql-ssl=on", "--table-size=500000", f"{sysbench.nix_path}/share/sysbench/oltp_read_write.lua", ] while True: try: proc = nc_command(self.settings).run( "bin/nc", ["-z", "-v", self.settings.local_dpdk_ip, "3306"]) break except subprocess.CalledProcessError: print(".") pass sysbench.run("bin/sysbench", common_flags + ["prepare"]) proc = sysbench.run("bin/sysbench", common_flags + ["run"]) process_sysbench(proc.stdout, system, stats) sysbench.run("bin/sysbench", common_flags + ["cleanup"])
def run( self, system: str, mnt: str, stats: Dict[str, List], trace: bool = False, ) -> None: nginx_server = nix_build("nginx") with spawn(f"{nginx_server}/bin/nginx", "-c", f"{mnt}/nginx/nginx.conf", cwd=mnt) as proc: if trace: record = trace_with_pt(proc.pid, Path(mnt)) try: self.run_wrk(proc, system, stats) finally: if trace: record.result()
def benchmark_sqlite( system: str, directory: str, stats: Dict[str, List[Any]], extra_env: Dict[str, str] = {}, trace: bool = False, ) -> None: sqlite = nix_build("sqlite-speedtest") cmd = [f"{sqlite}/bin/speedtest1"] print(f"[Benchmark]:{system}") if trace: output = trace_run(cmd, cwd=directory) else: proc = subprocess.run(cmd, cwd=directory, stdout=subprocess.PIPE, check=True, text=True) assert proc.stdout output = proc.stdout n_rows = 0 print(output) for line in output.split("\n"): line = line.rstrip() print(line) match = re.match(r"(?: \d+ - |\s+)([^.]+)[.]+\s+([0-9.]+)s", line) if match: if "TOTAL" in match.group(1): continue stats["system"].append(system) stats["sqlite-op-type"].append(match.group(1)) stats["sqlite-time [s]"].append(match.group(2)) n_rows += 1 expected = 3 if n_rows < expected: raise RuntimeError( f"Expected {expected} rows, got: {n_rows} when running benchmark for {system}" )
def bench_memcpy(kind: str, stats: Dict[str, List]) -> None: memcpy = nix_build("memcpy-test-sgx-io") stdout: Optional[int] = subprocess.PIPE proc = subprocess.Popen([memcpy, "bin/memcpy-test", KINDS[kind]], stdout=stdout, text=True) try: if proc.stdout is None: proc.wait() else: for line in proc.stdout: print(line) try: data = json.loads(line) for i in data: stats["memcpy-kind"].append(f"memcpy-test-{kind}") stats["memcpy-size"].append(i) stats["memcpy-time"].append(data[i]) except Exception as e: continue finally: pass
def run( self, attribute: str, system: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() env.update(extra_env) env["SGXLKL_ETHREADS"] = "2" if system == "sync" else "1" simpleio = nix_build(attribute) cmd = [ str(simpleio), "bin/udp-send", self.settings.remote_dpdk_ip, "2000000" ] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True, env=env) assert proc.stdout is not None found_results = False for line in proc.stdout: line = line.rstrip() print(line) if found_results: if line == "</results>": break data = json.loads(line) stats["system"].append(system) for k, v in data.items(): stats[k].append(v) elif line == "<results>": found_results = True proc.send_signal(signal.SIGINT) proc.wait() if not found_results: raise Exception("no time found in results")
def run( self, attr: str, system: str, mnt: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = extra_env.copy() env.update(flamegraph_env(f"{os.getcwd()}/nginx-{system}")) env.update(dict(SGXLKL_CWD=mnt)) nginx_server = nix_build(attr) host = self.settings.local_dpdk_ip with spawn(nginx_server, "bin/nginx", "-c", f"{mnt}/nginx/nginx.conf", extra_env=env) as proc: while True: try: self.remote_nc.run( "bin/nc", ["-z", self.settings.local_dpdk_ip, "9000"]) break except subprocess.CalledProcessError: status = proc.poll() if status is not None: raise OSError(f"nginx exiteded with {status}") time.sleep(1) pass wrk_connections = 100 wrk_proc = self.remote_wrk.run("bin/wrk", [ "-t", "16", "-c", f"{wrk_connections}", "-d", "30s", f"https://{host}:9000/test/file" ]) process_wrk_output(wrk_proc.stdout, system, stats, wrk_connections)
def __init__(self, settings: Settings, storage: Storage) -> None: self.settings = create_settings() self.storage = storage self.remote_nc = settings.remote_command(nix_build("netcat")) self.remote_wrk = settings.remote_command(nix_build("wrk"))
def setup(self, kind: StorageKind) -> Mount: if kind == StorageKind.SCONE and self.settings.spdk_hd_key: image = nix_build("iotest-image-scone") else: image = nix_build("iotest-image") if MOUNTPOINT.is_mount(): run(["sudo", "umount", str(MOUNTPOINT)]) try: spdk_device = self.settings.spdk_device() if os.path.exists(f"/dev/mapper/{spdk_device}"): cryptsetup_luks_close(spdk_device, check=False) except Exception: # spdk device might be not mapped to operating system pass run([ "sudo", str(ROOT.joinpath("..", "..", "spdk", "scripts", "setup.sh")), "reset", ]) spdk_device = self.settings.spdk_device() time.sleep(2) # wait for device to appear raw_dev = f"/dev/{spdk_device}" while not os.path.exists(raw_dev): print(".") time.sleep(1) # TRIM for optimal performance run(["sudo", "blkdiscard", "-f", raw_dev]) if self.settings.spdk_hd_key and kind != StorageKind.SCONE: dev = setup_luks(raw_dev, spdk_device, self.settings.spdk_hd_key) else: dev = raw_dev run([ "sudo", "dd", f"if={image}", f"of={dev}", "bs=128M", "conv=fdatasync", "oflag=direct", "status=progress", ]) run(["sudo", "resize2fs", dev]) if self.settings.spdk_hd_key and kind != StorageKind.SCONE: run(["sudo", "cryptsetup", "close", spdk_device]) if kind == StorageKind.SPDK: run([ "sudo", str(ROOT.joinpath("..", "..", "spdk", "scripts", "setup.sh")), "config", ]) elif kind == StorageKind.LKL: run(["sudo", "chown", getpass.getuser(), raw_dev]) setup_hugepages(kind) return Mount(kind, raw_dev, dev, self.settings.spdk_hd_key)
def __init__(self, settings: Settings) -> None: self.settings = create_settings() self.storage = Storage(settings) self.network = Network(settings) self.remote_nc = settings.remote_command(nix_build("netcat-native")) self.remote_wrk = settings.remote_command(nix_build("wrk-bench"))
def __init__(self, settings: Settings): self.settings = settings self.network = Network(settings) self.parallel_iperf = self.settings.remote_command(nix_build("parallel-iperf")) self.iperf_client = self.settings.remote_command(nix_build("iperf-client"))
def __init__(self, settings: Settings) -> None: self.settings = create_settings() self.storage = Storage(settings) self.network = Network(settings) self.local_nc = nix_build("netcat-native")
def run( self, attr: str, system: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() env.update(extra_env) network_test = nix_build(attr) server_ip = self.settings.remote_dpdk_ip num_bytes = str(1 * 1024 * 1024 * 1024) # 1 GiB batch_size = [4, 8, 16, 32, 64, 128, 256, 512] # in KiB #batch_size = [4, 8] # in KiB helper_run([ "nix", "copy", self.local_nc, "--to", f"ssh://{self.settings.remote_ssh_host}" ]) nc_cmds = [["while", "true"], [ "do", f"{self.local_nc}/bin/nc", "-l", "8888", ">", "/dev/null", "2>&1" ], ["done"]] nc_command = "; ".join(map(lambda cmd: " ".join(cmd), nc_cmds)) with spawn("ssh", self.settings.remote_ssh_host, "--", nc_command) as remote_nc_proc: for bs in batch_size: #while True: # try: # nc_cmd = [ # f"{self.local_nc}/bin/nc", # "-z", "-v", # f"{server_ip}", # "8888" # ] # nc_proc = subprocess.run(nc_cmd) # break # except subprocess.CalledProcessError: # #status = remote_nc_proc.poll() # #if status is not None: # # raise OSError(f"netcat-server exiteded with {status}") # #time.sleep(1) # pass local_proc = subprocess.Popen( [ network_test, "bin/network-test", "write", f"{server_ip}", num_bytes, str(bs), ], stdout=subprocess.PIPE, text=True, env=env, ) try: local_proc.wait() #breakpoint() assert local_proc.stdout for line in local_proc.stdout: data = json.loads(line) stats["system"].append(system) stats["batch_size"].append(bs) for i in data: stats[i].append(data[i]) print(local_proc.stdout.read()) except Exception as e: print(f"{local_proc.stdout} not in json format") print(stats)
def sysbench_command(settings: Settings) -> RemoteCommand: path = nix_build("sysbench") return settings.remote_command(path)
def nc_command(settings: Settings) -> RemoteCommand: path = nix_build("netcat") return settings.remote_command(path)