def test_no_color(self): t = os.path.join(tests_path, "no_color.js") output = run_output([self.deno_exe, "run", t], merge_env={"NO_COLOR": "1"}) assert output.strip() == "noColor true" t = os.path.join(tests_path, "no_color.js") output = run_output([self.deno_exe, "run", t]) assert output.strip() == "noColor false"
def test_no_color(deno_exe): sys.stdout.write("no_color test...") sys.stdout.flush() t = os.path.join(tests_path, "no_color.js") output = run_output([deno_exe, t], merge_env={"NO_COLOR": "1"}) assert output.strip() == "noColor true" t = os.path.join(tests_path, "no_color.js") output = run_output([deno_exe, t]) assert output.strip() == "noColor false" print green_ok()
def test_no_color(deno_exe): sys.stdout.write("no_color test...") sys.stdout.flush() t = os.path.join(tests_path, "no_color.js") output = run_output([deno_exe, t], merge_env={"NO_COLOR": "1"}) assert output.strip() == "noColor true" t = os.path.join(tests_path, "no_color.js") output = run_output([deno_exe, t]) assert output.strip() == "noColor false" print green_ok()
def main(argv): if len(argv) == 2: build_dir = sys.argv[1] elif len(argv) == 1: build_dir = build_path() else: print "Usage: tools/benchmark.py [build_dir]" sys.exit(1) http_server.spawn() deno_path = os.path.join(build_dir, "deno") benchmark_file = os.path.join(build_dir, "benchmark.json") os.chdir(root_path) import_data_from_gh_pages() # TODO: Use hyperfine in //third_party run([ "hyperfine", "--ignore-failure", "--export-json", benchmark_file, "--warmup", "3" ] + [ deno_path + " " + " ".join(args) for [_, args] in exec_time_benchmarks ]) all_data = read_json(all_data_file) benchmark_data = read_json(benchmark_file) sha1 = run_output(["git", "rev-parse", "HEAD"]).strip() new_data = { "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"), "sha1": sha1, "binary_size": {}, "thread_count": {}, "syscall_count": {}, "benchmark": {} } for [[name, _], data] in zip(exec_time_benchmarks, benchmark_data["results"]): new_data["benchmark"][name] = { "mean": data["mean"], "stddev": data["stddev"], "user": data["user"], "system": data["system"], "min": data["min"], "max": data["max"] } new_data["binary_size"] = get_binary_sizes(build_dir) # Cannot run throughput benchmark on windows because they don't have nc or # pipe. if os.name != 'nt': hyper_hello_path = os.path.join(build_dir, "hyper_hello") new_data["throughput"] = run_throughput(deno_path) new_data["req_per_sec"] = http_benchmark(deno_path, hyper_hello_path) if "linux" in sys.platform: # Thread count test, only on linux new_data["thread_count"] = run_thread_count_benchmark(deno_path) new_data["syscall_count"] = run_syscall_count_benchmark(deno_path) all_data.append(new_data) write_json(all_data_file, all_data) write_json(recent_data_file, all_data[-20:])
def test_fmt(self): d = mkdtemp() try: fixed_filename = os.path.join(tests_path, "badly_formatted_fixed.js") src = os.path.join(tests_path, "badly_formatted.js") dst = os.path.join(d, "badly_formatted.js") shutil.copyfile(src, dst) # Set DENO_DIR to the temp dir to test an initial fetch of prettier. # TODO(ry) This make the test depend on internet access which is not # ideal. We should have prettier in the repo already, and we could # fetch it instead through tools/http_server.py. deno_dir = d result = run_output( [os.path.join(root_path, self.deno_exe), "fmt", dst], cwd=d, merge_env={"DENO_DIR": deno_dir}, exit_on_fail=True, quiet=True) self.assertEqual(result.code, 0) with open(fixed_filename) as f: expected = f.read() with open(dst) as f: actual = f.read() self.assertEqual(expected, actual) finally: shutil.rmtree(d)
def main(): util.run([sys.executable, "tools/format.py"]) output = util.run_output( ["git", "status", "-uno", "--porcelain", "--ignore-submodules"]) if len(output) > 0: print "Run tools/format.py " print output sys.exit(1)
def test_ts_library_builder(self): result = run_output([ "node", "./node_modules/.bin/ts-node", "--project", "tools/ts_library_builder/tsconfig.json", "tools/ts_library_builder/test.ts" ], quiet=True) self.assertEqual(result.code, 0) assert "ts_library_builder ok" in result.out
def main(): util.run([sys.executable, "tools/format.py"]) result = util.run_output( ["git", "status", "-uno", "--porcelain", "--ignore-submodules"], exit_on_fail=True) if result.out: print "Run tools/format.py " print result.out sys.exit(1)
def run_deno(self, deno_dir=None): cmd = [ self.deno_exe, "run", "http://localhost:4545/tests/subdir/print_hello.ts" ] deno_dir_env = {"DENO_DIR": deno_dir} if deno_dir is not None else None res = run_output(cmd, quiet=True, env=deno_dir_env) print res.code, res.out, res.err self.assertEqual(res.code, 0)
def test_exec_path(self): cmd = [ self.deno_exe, "run", "--allow-run", "--allow-env", "tests/exec_path.ts" ] result = run_output(cmd, quiet=True) print "exec_path", result.code print result.out print result.err assert self.deno_exe in result.out.strip() self.assertEqual(result.code, 0)
def test_fetch(self): deno_dir = mkdtemp() try: t = os.path.join(tests_path, "006_url_imports.ts") output = run_output([self.deno_exe, "fetch", t], merge_env={"DENO_DIR": deno_dir}) assert output == "" # Check that we actually did the prefetch. os.path.exists( os.path.join( deno_dir, "deps/http/localhost_PORT4545/tests/subdir/mod2.ts")) finally: shutil.rmtree(deno_dir)
def main(): deno_path = lookup_deno_path() if not deno_path: print "No available deno executable." sys.exit(1) util.run([deno_path, "--allow-run", "tools/format.ts"]) output = util.run_output( ["git", "status", "-uno", "--porcelain", "--ignore-submodules"]) if len(output) > 0: print "Run tools/format.ts " print output sys.exit(1)
def main(argv): if len(argv) == 2: build_dir = sys.argv[1] elif len(argv) == 1: build_dir = build_path() else: print "Usage: tools/benchmark.py [build_dir]" sys.exit(1) sha1 = run_output(["git", "rev-parse", "HEAD"], exit_on_fail=True).out.strip() http_server.spawn() deno_exe = os.path.join(build_dir, "deno") os.chdir(root_path) import_data_from_gh_pages() new_data = { "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"), "sha1": sha1, } # TODO(ry) The "benchmark" benchmark should actually be called "exec_time". # When this is changed, the historical data in gh-pages branch needs to be # changed too. new_data["benchmark"] = run_exec_time(deno_exe, build_dir) new_data["binary_size"] = get_binary_sizes(build_dir) new_data["bundle_size"] = bundle_benchmark(deno_exe) # Cannot run throughput benchmark on windows because they don't have nc or # pipe. if os.name != 'nt': new_data["throughput"] = run_throughput(deno_exe) run_http(build_dir, new_data) if "linux" in sys.platform: run_strace_benchmarks(deno_exe, new_data) new_data["max_memory"] = run_max_mem_benchmark(deno_exe) print "===== <BENCHMARK RESULTS>" print json.dumps(new_data, indent=2) print "===== </BENCHMARK RESULTS>" all_data = read_json(all_data_file) all_data.append(new_data) write_json(all_data_file, all_data) write_json(recent_data_file, all_data[-20:])
def test_exec_path(self): cmd = [ self.deno_exe, "run", "--allow-run", "--allow-env", "tests/exec_path.ts" ] result = run_output(cmd, quiet=True) print "exec_path", result self.assertEqual(result.code, 0) if os.name == "nt": # When running in github actions, the windows drive letter of the # executable path reported by deno has a different case than the one # reported by python. assert self.deno_exe.upper() in result.out.strip().upper() assert self.deno_exe[1:] in result.out.strip() else: assert self.deno_exe in result.out.strip()
def test_fetch(self): deno_dir = mkdtemp() try: t = os.path.join(tests_path, "006_url_imports.ts") result = run_output([self.deno_exe, "fetch", t], quiet=True, merge_env={"DENO_DIR": deno_dir}) self.assertEqual(result.out, "") self.assertEqual(result.code, 0) # Check that we actually did the prefetch. os.path.exists( os.path.join( deno_dir, "deps/http/localhost_PORT4545/cli/tests/subdir/mod2.ts")) finally: shutil.rmtree(deno_dir)
def maybe_add_default_target(args): lines = run_output( [third_party.ninja_path, "-t", "targets"], env=third_party.google_env(), quiet=True).split("\n") targets = [l.rsplit(":", 1)[0] for l in lines] deno_targets = [target for target in targets if target.startswith(":")] deno_targets += [target.lstrip(":") for target in deno_targets] target_specified = False for a in args: if a in deno_targets: target_specified = True break if not target_specified: args += [":all"] return args
def fetch_test(deno_exe): sys.stdout.write("fetch_test...") sys.stdout.flush() deno_dir = mkdtemp() try: t = os.path.join(tests_path, "006_url_imports.ts") output = run_output([deno_exe, "fetch", t], merge_env={"DENO_DIR": deno_dir}) assert output == "" # Check that we actually did the prefetch. os.path.exists( os.path.join(deno_dir, "deps/http/localhost_PORT4545/tests/subdir/mod2.ts")) finally: shutil.rmtree(deno_dir) print green_ok()
def prefetch_test(deno_exe): sys.stdout.write("prefetch_test...") sys.stdout.flush() deno_dir = mkdtemp() try: t = os.path.join(tests_path, "006_url_imports.ts") output = run_output([deno_exe, "--prefetch", t], merge_env={"DENO_DIR": deno_dir}) assert output == "" # Check that we actually did the prefetch. os.path.exists( os.path.join(deno_dir, "deps/http/localhost_PORT4545/tests/subdir/mod2.ts")) finally: shutil.rmtree(deno_dir) print green_ok()
def prefetch_test(deno_exe): sys.stdout.write("prefetch_test...") sys.stdout.flush() # On Windows, set the base directory that mkdtemp() uses explicitly. If not, # it'll use the short (8.3) path to the temp dir, which triggers the error # 'TS5009: Cannot find the common subdirectory path for the input files.' temp_dir = os.environ["TEMP"] if os.name == 'nt' else None deno_dir = tempfile.mkdtemp(dir=temp_dir) try: t = os.path.join(tests_path, "006_url_imports.ts") output = run_output([deno_exe, "--prefetch", t], merge_env={"DENO_DIR": deno_dir}) assert output == "" # Check that we actually did the prefetch. os.path.exists( os.path.join(deno_dir, "deps/http/localhost_PORT4545/tests/subdir/mod2.ts")) finally: shutil.rmtree(deno_dir) print green_ok()
def main(): build_dir = build_path() sha1 = run_output(["git", "rev-parse", "HEAD"], exit_on_fail=True).out.strip() deno_exe = os.path.join(build_dir, "deno") os.chdir(root_path) new_data = { "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"), "sha1": sha1, } # TODO(ry) The "benchmark" benchmark should actually be called "exec_time". # When this is changed, the historical data in gh-pages branch needs to be # changed too. new_data["benchmark"] = run_exec_time(deno_exe, build_dir) new_data["binary_size"] = get_binary_sizes(build_dir) new_data["bundle_size"] = bundle_benchmark(deno_exe) # Cannot run throughput benchmark on windows because they don't have nc or # pipe. if os.name != 'nt': new_data["throughput"] = run_throughput(deno_exe) run_http(build_dir, new_data) if "linux" in sys.platform: run_strace_benchmarks(deno_exe, new_data) new_data["max_memory"] = run_max_mem_benchmark(deno_exe) print "===== <BENCHMARK RESULTS>" print json.dumps(new_data, indent=2) print "===== </BENCHMARK RESULTS>" write_json(os.path.join(build_dir, "bench.json"), new_data)
def main(argv): if len(argv) == 2: build_dir = sys.argv[1] elif len(argv) == 1: build_dir = build_path() else: print "Usage: tools/benchmark.py [build_dir]" sys.exit(1) deno_path = os.path.join(build_dir, "deno") benchmark_file = os.path.join(build_dir, "benchmark.json") os.chdir(root_path) import_data_from_gh_pages() # TODO: Use hyperfine in //third_party run(["hyperfine", "--export-json", benchmark_file, "--warmup", "3"] + [deno_path + " " + " ".join(args) for [_, args] in benchmarks]) all_data = read_json(data_file) benchmark_data = read_json(benchmark_file) sha1 = run_output(["git", "rev-parse", "HEAD"]).strip() new_data = { "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"), "sha1": sha1, "binary_size": os.path.getsize(deno_path), "benchmark": {} } for [[name, _], data] in zip(benchmarks, benchmark_data["results"]): new_data["benchmark"][name] = { "mean": data["mean"], "stddev": data["stddev"], "user": data["user"], "system": data["system"], "min": data["min"], "max": data["max"] } all_data.append(new_data) write_json(data_file, all_data)
def exec_path_test(deno_exe): cmd = [deno_exe, "tests/exec_path.ts"] output = run_output(cmd) assert deno_exe in output.strip()
def exec_path_test(deno_exe): cmd = [deno_exe, "tests/exec_path.ts"] output = run_output(cmd) assert deno_exe in output.strip()
def test_exec_path(self): cmd = [self.deno_exe, "run", "tests/exec_path.ts"] result = run_output(cmd, quiet=True) assert self.deno_exe in result.out.strip() self.assertEqual(result.code, 0)
def test_exec_path(self): cmd = [self.deno_exe, "run", "tests/exec_path.ts"] output = run_output(cmd) assert self.deno_exe in output.strip()
def main(argv): if len(argv) == 2: build_dir = sys.argv[1] elif len(argv) == 1: build_dir = build_path() else: print "Usage: tools/benchmark.py [build_dir]" sys.exit(1) http_server.spawn() deno_path = os.path.join(build_dir, "deno") benchmark_file = os.path.join(build_dir, "benchmark.json") os.chdir(root_path) import_data_from_gh_pages() hyperfine = prebuilt.load_hyperfine() run([ hyperfine, "--ignore-failure", "--export-json", benchmark_file, "--warmup", "3" ] + [ deno_path + " " + " ".join(args) for [_, args] in exec_time_benchmarks ]) all_data = read_json(all_data_file) benchmark_data = read_json(benchmark_file) sha1 = run_output(["git", "rev-parse", "HEAD"]).strip() new_data = { "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"), "sha1": sha1, "binary_size": {}, "thread_count": {}, "syscall_count": {}, "benchmark": {} } for [[name, _], data] in zip(exec_time_benchmarks, benchmark_data["results"]): new_data["benchmark"][name] = { "mean": data["mean"], "stddev": data["stddev"], "user": data["user"], "system": data["system"], "min": data["min"], "max": data["max"] } new_data["binary_size"] = get_binary_sizes(build_dir) # Cannot run throughput benchmark on windows because they don't have nc or # pipe. if os.name != 'nt': hyper_hello_path = os.path.join(build_dir, "hyper_hello") core_http_bench_exe = os.path.join(build_dir, "deno_core_http_bench") new_data["throughput"] = run_throughput(deno_path) new_data["req_per_sec"] = http_benchmark(deno_path, hyper_hello_path, core_http_bench_exe) if "linux" in sys.platform: # Thread count test, only on linux new_data["thread_count"] = run_thread_count_benchmark(deno_path) new_data["syscall_count"] = run_syscall_count_benchmark(deno_path) all_data.append(new_data) write_json(all_data_file, all_data) write_json(recent_data_file, all_data[-20:])
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. import os import sys import third_party from util import run_output, build_path out_filename = sys.argv[1] args_list = run_output([ third_party.gn_path, "args", build_path(), "--list", "--short", "--overrides-only" ], quiet=True, env=third_party.google_env(), exit_on_fail=True).out with open(out_filename, "w") as f: f.write(args_list)
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license. import os import sys import third_party from util import run_output, build_path out_filename = sys.argv[1] args_list = run_output([ third_party.gn_path, "args", build_path(), "--list", "--short", "--overrides-only" ], env=third_party.google_env()) with open(out_filename, "w") as f: f.write(args_list)