class CaptureRunOutput(object): """Context to capture run cmd output and files. """ def __init__(self): self.out = SuppressStd() self.output = None self.result_files = [] def __enter__(self): self.output = None self.result_files = [] # Redirect the stdout/stderr fd to temp file self.out.__enter__() return self def __exit__(self, type, value, traceback): self.out.__exit__(type, value, traceback) self.output = self.out.output # Make sure to delete all the result that created by the run command. self.result_files = re.findall(r'Saving result in: (.*\.csv)', self.output) if len(self.result_files) >= 1: for result_file in self.result_files: Path(result_file).unlink() def check_output(self, pattern, repetition=None): matches = re.findall(pattern, self.output) if repetition is None: assert len(matches) > 0, self.output else: assert len(matches) == repetition, self.output
class CaptureRunOutput(object): """Context to capture run cmd output and files. """ def __init__(self): self.out = SuppressStd() self.output = None self.result_files = [] def __enter__(self): self.output = None self.result_files = [] # To make it possible to capture stdout in the child worker, we need # to make sure the execturor is spawned in the context so shutdown any # existing executor. e = get_memmapping_executor(2) e.shutdown() # Redirect the stdout/stderr fd to temp file self.out.__enter__() return self def __exit__(self, exc_class, value, traceback): self.out.__exit__(exc_class, value, traceback) self.output = self.out.output # Make sure to delete all the result that created by the run command. self.result_files = re.findall(r'Saving result in: (.*\.parquet)', self.output) if len(self.result_files) >= 1: for result_file in self.result_files: result_path = Path(result_file) result_path.unlink() # remove result file result_dir = result_path.parents[0] stem = result_path.stem for html_file in result_dir.glob(f'*{stem}*.html'): # remove html files associated with this results html_file.unlink() # If there was an exception, display the output if exc_class is not None: print(self.output) def check_output(self, pattern, repetition=None): output = self.output matches = re.findall(pattern, output) if repetition is None: assert len(matches) > 0, output else: assert len(matches) == repetition, output
def test_call(self): with SuppressStd() as out: generate_results( ['--root', str(DUMMY_BENCHMARK_PATH.parent), '--no-display'], 'benchopt', standalone_mode=False) html_results = re.findall(r'Writing results to (.*\.html)', out.output) html_benchmark = re.findall( rf'Writing {DUMMY_BENCHMARK.name} results to (.*\.html)', out.output) html_index = re.findall(r'Writing index to (.*\.html)', out.output) try: assert len(html_index) == 1, out.output assert len(html_benchmark) == 1, out.output assert len(html_results) == len(self.result_files), out.output print(out.output) for f in self.result_files: basename = Path(f).stem assert any(basename in res for res in html_results) finally: # Make sure to clean up all files even when the test fails for f in html_results + html_benchmark + html_index: Path(f).unlink()
def test_call_with_outputs(self): with SuppressStd() as out: archive([str(DUMMY_BENCHMARK_PATH), "--with-outputs"], 'benchopt', standalone_mode=False) saved_files = re.findall(r'Results are in (.*\.tar.gz)', out.output) try: assert len(saved_files) == 1 saved_file = saved_files[0] counts = { k: 0 for k in [ "__pycache__", "outputs", "objective.py", "datasets", "solvers", "README" ] } with tarfile.open(saved_file, "r:gz") as tar: for elem in tar.getmembers(): for k in counts: counts[k] += k in elem.name assert elem.uname == "benchopt" assert counts["README"] == 1, counts assert counts["objective.py"] == 1, counts assert counts["datasets"] >= 1, counts assert counts["solvers"] >= 1, counts assert counts["outputs"] >= 1, counts assert counts["__pycache__"] == 0, counts finally: # Make sure to clean up all files even when the test fails for f in saved_files: Path(f).unlink()
def setup_class(cls): "Make sure at least one result file is available" with SuppressStd() as out: clean([str(DUMMY_BENCHMARK_PATH)], 'benchopt', standalone_mode=False) clean([str(REQUIREMENT_BENCHMARK_PATH)], 'benchopt', standalone_mode=False) run([ str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED, '-s', SELECT_ONE_PGD, '-n', '2', '-r', '1', '-o', SELECT_ONE_OBJECTIVE, '--no-plot' ], 'benchopt', standalone_mode=False) time.sleep(1) # Make sure there is 2 separate files run([ str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED, '-s', SELECT_ONE_PGD, '-n', '2', '-r', '1', '-o', SELECT_ONE_OBJECTIVE, '--no-plot' ], 'benchopt', standalone_mode=False) result_files = re.findall(r'Saving result in: (.*\.parquet)', out.output) assert len(result_files) == 2, out.output cls.result_files = result_files
class CaptureRunOutput(object): """Context to capture run cmd output and files. """ def __init__(self): self.out = SuppressStd() self.output = None self.result_files = [] def __enter__(self): self.output = None self.result_files = [] # Redirect the stdout/stderr fd to temp file self.out.__enter__() return self def __exit__(self, exc_class, value, traceback): self.out.__exit__(exc_class, value, traceback) self.output = self.out.output # Make sure to delete all the result that created by the run command. self.result_files = re.findall(r'Saving result in: (.*\.csv)', self.output) if len(self.result_files) >= 1: for result_file in self.result_files: result_path = Path(result_file) result_path.unlink() # remove csv file result_dir = result_path.parents[0] stem = result_path.stem for html_file in result_dir.glob(f'*{stem}*.html'): # remove html files associated with this results html_file.unlink() # If there was an exception, display the output if exc_class is not None: print(self.output) def check_output(self, pattern, repetition=None): matches = re.findall(pattern, self.output) if repetition is None: assert len(matches) > 0, self.output else: assert len(matches) == repetition, self.output
def setup_class(cls): "Make sure at least one result file is available" with SuppressStd() as out: run([str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED, '-s', SELECT_ONE_PGD, '-n', '2', '-r', '1', '-o', SELECT_ONE_OBJECTIVE, '--no-plot'], 'benchopt', standalone_mode=False) result_files = re.findall(r'Saving result in: (.*\.csv)', out.output) assert len(result_files) == 1, out.output result_file = result_files[0] cls.result_file = result_file
def test_valid_call(self, kind): with SuppressStd() as out: plot([str(DUMMY_BENCHMARK_PATH), '-f', self.result_file, '-k', kind, '--no-display', '--no-html'], 'benchopt', standalone_mode=False) saved_files = re.findall(r'Save .* as: (.*\.pdf)', out.output) assert len(saved_files) == 1 saved_file = saved_files[0] assert kind in saved_file Path(saved_file).unlink()
def run(self, n_iter): m, n = self.X.shape w = np.zeros((n, )) zz = np.zeros((m, )) index = np.zeros((n, ), dtype=int) out = SuppressStd() try: with out: self.w, _, _ = \ nnls(self.X, m, n, self.y, w, zz, index, n_iter) except BaseException: print(out.output) raise
def setup_class(cls): "Make sure at least one result file is available" out = SuppressStd() with out: run([ str(DUMMY_BENCHMARK), '-l', '-d', 'simulated*500', '-s', 'pgd*False', '-n', '1', '-r', '1', '-p', '0.1', '--no-plot' ], 'benchopt', standalone_mode=False) result_files = re.findall(r'Saving result in: (.*\.csv)', out.output) assert len(result_files) == 1, out.output result_file = result_files[0] cls.result_file = result_file
def test_valid_call_html(self): with SuppressStd() as out: plot([ str(DUMMY_BENCHMARK_PATH), '-f', self.result_file, '--no-display', '--html' ], 'benchopt', standalone_mode=False) saved_files = re.findall(r'Writing.* results to (.*\.html)', out.output) try: assert len(saved_files) == 2 finally: # Make sure to clean up all files even when the test fails for f in saved_files: Path(f).unlink()
def test_valid_call(self, kind): with SuppressStd() as out: plot([ str(DUMMY_BENCHMARK_PATH), '-f', self.result_file, '-k', kind, '--no-display', '--no-html' ], 'benchopt', standalone_mode=False) saved_files = re.findall(r'Save .* as: (.*\.pdf)', out.output) try: assert len(saved_files) == 1 assert kind in saved_files[0] finally: # Make sure to clean up all files even when the test fails for f in saved_files: Path(f).unlink()
def get_jl_interpreter(): global jl_interpreter if jl_interpreter is None: # Only suppress std if not in debug mode. out = nullcontext() if DEBUG else SuppressStd() try: with out: import julia # configure the julia runtime runtime_config = { 'compiled_modules': False, 'debug': bool(DEBUG) } julia.install() jl_interpreter = julia.Julia(**runtime_config) except BaseException: if hasattr(out, 'output'): print(out.output) raise return jl_interpreter
def __init__(self): self.out = SuppressStd() self.output = None self.result_files = []