def _run( self, local_iperf: str, direction: str, system: str, stats: Dict[str, List[int]], extra_env: Dict[str, str] = {}) -> None: env = extra_env.copy() env.update(flamegraph_env(f"iperf-{direction}-{system}-{NOW}")) iperf = f"{self.iperf_client.nix_path}/bin/iperf3" fast_ssl = dict(OPENSSL_ia32cap="0x5640020247880000:0x40128") env.update(fast_ssl) if check_port(nc_command, self.settings): print("There is already an iperf instance running", file=sys.stderr) sys.exit(1) with spawn(local_iperf, "bin/iperf3", "1", extra_env=env) as iperf_server: for i in range(60): if check_port(nc_command, self.settings): break status = iperf_server.poll() if status is not None: raise OSError(f"iperf exiteded with {status}") time.sleep(1) if i == 59: stop_process(iperf_server) raise OSError(f"Could not connect to iperf after 1 min") iperf_args = ["client", "-c", self.settings.local_dpdk_ip, "--json", "-t", "10"] if direction == "send": iperf_args += ["-R"] parallel_iperf = self.parallel_iperf.run("bin/parallel-iperf", ["1", iperf] + iperf_args, extra_env=fast_ssl) _postprocess_iperf(json.loads(parallel_iperf.stdout), direction, system, stats) stop_process(iperf_server)
def benchmark_hdparm( storage: Storage, system: str, attr: str, device: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() env.update(flamegraph_env(f"hdparm-{system}-{NOW}")) env.update(extra_env) hdparm = nix_build(attr) print(f"###### {system} >> ######") proc = subprocess.Popen(["sudo", hdparm, "bin/hdparm", "-Tt", device], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) try: if proc.stdout is None: proc.wait() else: stats["system"].append(system) for line in proc.stdout: print(line) match = re.match(r"(.*):\s+(.*) = (.*)", line) if match: stats[match.group(1)].append(match.group(3)) finally: #proc.send_signal(signal.SIGINT) pass print(f"###### {system} << ######")
def benchmark_fio( system: str, attr: str, cores: int, directory: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() # we don't need network for these benchmarks del env["SGXLKL_TAP"] env.update(dict(SGXLKL_CWD=directory)) env.update(flamegraph_env(f"fio-{system}-{NOW}")) env.update(extra_env) enable_sgxio = "1" if system == "sgx-io" else "0" env.update(SGXLKL_ENABLE_SGXIO=enable_sgxio) env.update(SGXLKL_ETHREADS=str(cores)) env.update(extra_env) fio = nix_build(attr) stdout: Optional[int] = subprocess.PIPE if os.environ.get("SGXLKL_ENABLE_GDB", "0") == "1": stdout = None cmd = [str(fio), "bin/fio", "--output-format=json", "--eta=always", f"fio-rand-RW-smp-{cores}.job"] proc = subprocess.Popen(cmd, stdout=stdout, text=True, env=env) data = "" in_json = False print(f"[Benchmark]: {system}") try: if proc.stdout is None: proc.wait() else: for line in proc.stdout: print(line, end="") if line == "{\n": in_json = True if in_json: data += line if line == "}\n": break finally: proc.send_signal(signal.SIGINT) proc.wait() if data == "": raise RuntimeError(f"Did not get a result when running benchmark for {system}") jsondata = json.loads(data) for jobnum, job in enumerate(jsondata["jobs"]): stats["system"].append(system) stats["job"].append(jobnum) stats["cores"].append(cores) for op in ["read", "write", "trim"]: metrics = job[op] for metric_name, metric in metrics.items(): if isinstance(metric, dict): for name, submetric in metric.items(): stats[f"{op}-{metric_name}-{name}"].append(submetric) else: stats[f"{op}-{metric_name}"].append(metric)
def benchmark_sqlite( storage: Storage, system: str, attr: str, directory: str, stats: Dict[str, List[Any]], extra_env: Dict[str, str] = {}, ) -> None: env = os.environ.copy() del env["SGXLKL_TAP"] env.update(dict(SGXLKL_CWD=directory)) env.update(extra_env) env.update(flamegraph_env(f"{os.getcwd()}/sqlite-{system}")) env.update(extra_env) sqlite = nix_build(attr) stdout = subprocess.PIPE cmd = [str(sqlite)] proc = subprocess.Popen(cmd, stdout=stdout, text=True, env=env) print(f"[Benchmark]:{system}") n_rows = 0 try: if proc.stdout is None: proc.wait() else: for line in proc.stdout: line = line.rstrip() print(line) match = re.match(r"(?: \d+ - |\s+)([^.]+)[.]+\s+([0-9.]+)s", line) if match: if "TOTAL" in match.group(1): continue stats["system"].append(system) stats["sqlite-op-type"].append(match.group(1)) stats["sqlite-time [s]"].append(match.group(2)) n_rows += 1 if n_rows == 3: break finally: proc.send_signal(signal.SIGINT) expected = 3 if n_rows < expected: raise RuntimeError( f"Expected {expected} rows, got: {n_rows} when running benchmark for {system}" )
def run( self, attr: str, system: str, mnt: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = dict(SGXLKL_CWD=mnt) env.update(flamegraph_env(f"{os.getcwd()}/mysql-{system}")) env.update(extra_env) mysql = nix_build(attr) sysbench = sysbench_command(self.storage.settings) with spawn( mysql, "bin/mysqld", f"--datadir={mnt}/var/lib/mysql", "--socket=/tmp/mysql.sock", extra_env=env, ): common_flags = [ f"--mysql-host={self.settings.local_dpdk_ip}", "--mysql-db=root", "--mysql-user=root", "--mysql-password=root", "--mysql-ssl=on", "--table-size=500000", f"{sysbench.nix_path}/share/sysbench/oltp_read_write.lua", ] while True: try: proc = nc_command(self.settings).run( "bin/nc", ["-z", "-v", self.settings.local_dpdk_ip, "3306"]) break except subprocess.CalledProcessError: print(".") pass sysbench.run("bin/sysbench", common_flags + ["prepare"]) proc = sysbench.run("bin/sysbench", common_flags + ["run"]) process_sysbench(proc.stdout, system, stats) sysbench.run("bin/sysbench", common_flags + ["cleanup"])
def run( self, attr: str, system: str, mnt: str, stats: Dict[str, List], extra_env: Dict[str, str] = {}, ) -> None: env = extra_env.copy() env.update(flamegraph_env(f"{os.getcwd()}/nginx-{system}")) env.update(dict(SGXLKL_CWD=mnt)) nginx_server = nix_build(attr) host = self.settings.local_dpdk_ip with spawn(nginx_server, "bin/nginx", "-c", f"{mnt}/nginx/nginx.conf", extra_env=env) as proc: while True: try: self.remote_nc.run( "bin/nc", ["-z", self.settings.local_dpdk_ip, "9000"]) break except subprocess.CalledProcessError: status = proc.poll() if status is not None: raise OSError(f"nginx exiteded with {status}") time.sleep(1) pass wrk_connections = 100 wrk_proc = self.remote_wrk.run("bin/wrk", [ "-t", "16", "-c", f"{wrk_connections}", "-d", "30s", f"https://{host}:9000/test/file" ]) process_wrk_output(wrk_proc.stdout, system, stats, wrk_connections)
def run( self, system: str, redis_server: str, db_dir: str, stats: Dict[str, List], extra_env: Dict[str, str], ) -> None: args = [ "bin/redis-server", "--dir", db_dir, "--tls-port", "6379", "--port", "0", "--tls-cert-file", f"{db_dir}/server.cert", "--tls-key-file", f"{db_dir}/server.key", "--tls-ca-cert-file", f"{db_dir}/ca.crt", "--requirepass", "snakeoil", "--tls-auth-clients", "no" ] env = extra_env.copy() env.update(flamegraph_env(f"{os.getcwd()}/redis-{system}")) with spawn(redis_server, *args, extra_env=env) as proc: print(f"waiting for redis for {system} benchmark...", end="") while True: try: self.nc_command.run( "bin/nc", ["-z", "-v", self.settings.local_dpdk_ip, "6379"]) break except subprocess.CalledProcessError: status = proc.poll() if status is not None: raise OSError(f"redis-server exiteded with {status}") time.sleep(1) pass load_proc = self.remote_ycsb.run( "bin/ycsb", [ "load", "redis", "-s", "-P", f"{self.remote_ycsb.nix_path}/share/ycsb/workloads/workloada", "-p", f"redis.host={self.settings.local_dpdk_ip}", "-p", "redis.port=6379", "-p", "redis.timeout=600000", "-p", f"recordcount={self.record_count}", "-p", f"operationcount={self.operation_count}", "-p", "redis.password=snakeoil", ], ) run_proc = self.remote_ycsb.run( "bin/ycsb", [ "run", "redis", "-s", "-P", f"{self.remote_ycsb.nix_path}/share/ycsb/workloads/workloada", "-threads", "16", "-p", f"redis.host={self.settings.local_dpdk_ip}", "-p", "redis.port=6379", "-p", "redis.timeout=600000", "-p", f"recordcount={self.record_count}", "-p", f"operationcount={self.operation_count}", "-p", "redis.password=snakeoil", ], ) process_ycsb_out(run_proc.stdout, system, stats)
def benchmark_simpleio( storage: Storage, system: str, attr: str, directory: str, stats: Dict[str, List], bs: int, extra_env: Dict[str, str] = {}, do_write: bool = False, ) -> None: env = dict(SGXLKL_CWD=directory) env.update(flamegraph_env(f"simpleio-{system}-{NOW}")) env.update(extra_env) enable_sgxio = "1" if system == "sgx-io" else "0" env.update(SGXLKL_ENABLE_SGXIO=enable_sgxio) threads = "1" if system == "sgx-io" else "8" env.update(SGXLKL_ETHREADS=threads) env.update(extra_env) simpleio = nix_build(attr) stdout: Optional[int] = subprocess.PIPE if os.environ.get("SGXLKL_ENABLE_GDB", "0") == "1": stdout = None size = str(10 * 1024 * 1024 * 1024) # 10G env_string = [] for k, v in env.items(): env_string.append(f"{k}={v}") report = "" in_results = False env = os.environ.copy() env.update(extra_env) cmd = [ simpleio, "bin/simpleio", f"{directory}/file", size, "0" if do_write else "1", "1", str(bs * 1024), ] print(f"$ {' '.join(env_string)} {' '.join(cmd)}") proc = subprocess.Popen(cmd, stdout=stdout, text=True, env=env) try: assert proc.stdout is not None for line in proc.stdout: print(f"stdout: {line}", end="") if line == "<result>\n": in_results = True elif in_results and line == "</result>\n": break elif in_results: report = line finally: proc.send_signal(signal.SIGINT) jsondata = json.loads(report) stats["system"].append(system) stats["bytes"].append(jsondata["bytes"]) stats["time"].append(jsondata["time"]) stats["workload"].append("write" if do_write else "read") stats["batch-size"].append(bs)