def test_func_dir(tmpdir): # Test the creation of the memory cache directory for the function. memory = Memory(cachedir=tmpdir.strpath, verbose=0) memory.clear() path = __name__.split('.') path.append('f') path = tmpdir.join('joblib', *path).strpath g = memory.cache(f) # Test that the function directory is created on demand assert g._get_func_dir() == path assert os.path.exists(path) # Test that the code is stored. # For the following test to be robust to previous execution, we clear # the in-memory store _FUNCTION_HASHES.clear() assert not g._check_previous_func_code() assert os.path.exists(os.path.join(path, 'func_code.py')) assert g._check_previous_func_code() # Test the robustness to failure of loading previous results. dir, _ = g.get_output_dir(1) a = g(1) assert os.path.exists(dir) os.remove(os.path.join(dir, 'output.pkl')) assert a == g(1)
def test_func_dir(tmpdir): # Test the creation of the memory cache directory for the function. memory = Memory(location=tmpdir.strpath, verbose=0) path = __name__.split('.') path.append('f') path = tmpdir.join('joblib', *path).strpath g = memory.cache(f) # Test that the function directory is created on demand func_id = _build_func_identifier(f) location = os.path.join(g.store_backend.location, func_id) assert location == path assert os.path.exists(path) assert memory.location == os.path.dirname(g.store_backend.location) with warns(DeprecationWarning) as w: assert memory.cachedir == g.store_backend.location assert len(w) == 1 assert "The 'cachedir' attribute has been deprecated" in str(w[-1].message) # Test that the code is stored. # For the following test to be robust to previous execution, we clear # the in-memory store _FUNCTION_HASHES.clear() assert not g._check_previous_func_code() assert os.path.exists(os.path.join(path, 'func_code.py')) assert g._check_previous_func_code() # Test the robustness to failure of loading previous results. func_id, args_id = g._get_output_identifiers(1) output_dir = os.path.join(g.store_backend.location, func_id, args_id) a = g(1) assert os.path.exists(output_dir) os.remove(os.path.join(output_dir, 'output.pkl')) assert a == g(1)
def test_func_dir(tmpdir): # Test the creation of the memory cache directory for the function. memory = Memory(cachedir=tmpdir.strpath, verbose=0) path = __name__.split('.') path.append('f') path = tmpdir.join('joblib', *path).strpath g = memory.cache(f) # Test that the function directory is created on demand assert g._get_func_dir() == path assert os.path.exists(path) # Test that the code is stored. # For the following test to be robust to previous execution, we clear # the in-memory store _FUNCTION_HASHES.clear() assert not g._check_previous_func_code() assert os.path.exists(os.path.join(path, 'func_code.py')) assert g._check_previous_func_code() # Test the robustness to failure of loading previous results. dir, _ = g.get_output_dir(1) a = g(1) assert os.path.exists(dir) os.remove(os.path.join(dir, 'output.pkl')) assert a == g(1)
def test_func_dir(): # Test the creation of the memory cache directory for the function. memory = Memory(cachedir=env["dir"], verbose=0) memory.clear() path = __name__.split(".") path.append("f") path = os.path.join(env["dir"], "joblib", *path) g = memory.cache(f) # Test that the function directory is created on demand yield nose.tools.assert_equal, g._get_func_dir(), path yield nose.tools.assert_true, os.path.exists(path) # Test that the code is stored. # For the following test to be robust to previous execution, we clear # the in-memory store _FUNCTION_HASHES.clear() yield nose.tools.assert_false, g._check_previous_func_code() yield nose.tools.assert_true, os.path.exists(os.path.join(path, "func_code.py")) yield nose.tools.assert_true, g._check_previous_func_code() # Test the robustness to failure of loading previous results. dir, _ = g.get_output_dir(1) a = g(1) yield nose.tools.assert_true, os.path.exists(dir) os.remove(os.path.join(dir, "output.pkl")) yield nose.tools.assert_equal, a, g(1)
def test_func_dir(): # Test the creation of the memory cache directory for the function. memory = Memory(cachedir=env['dir'], verbose=0) memory.clear() path = __name__.split('.') path.append('f') path = os.path.join(env['dir'], 'joblib', *path) g = memory.cache(f) # Test that the function directory is created on demand yield nose.tools.assert_equal, g._get_func_dir(), path yield nose.tools.assert_true, os.path.exists(path) # Test that the code is stored. # For the following test to be robust to previous execution, we clear # the in-memory store _FUNCTION_HASHES.clear() yield nose.tools.assert_false, \ g._check_previous_func_code() yield nose.tools.assert_true, \ os.path.exists(os.path.join(path, 'func_code.py')) yield nose.tools.assert_true, \ g._check_previous_func_code() # Test the robustness to failure of loading previous results. dir, _ = g.get_output_dir(1) a = g(1) yield nose.tools.assert_true, os.path.exists(dir) os.remove(os.path.join(dir, 'output.pkl')) yield nose.tools.assert_equal, a, g(1)
def test_benchopt_caching(self, n_rep): clean([str(DUMMY_BENCHMARK_PATH)], 'benchopt', standalone_mode=False) # XXX - remove once this is fixed upstream with joblib/joblib#1289 _FUNCTION_HASHES.clear() # Check that the computation caching is working properly. run_cmd = [ str(DUMMY_BENCHMARK_PATH), '-l', '-d', SELECT_ONE_SIMULATED, '-s', SELECT_ONE_PGD, '-n', '1', '-r', str(n_rep), '-o', SELECT_ONE_OBJECTIVE, '--no-plot' ] # Make a first run that should be put in cache with CaptureRunOutput() as out: run(run_cmd, 'benchopt', standalone_mode=False) # Check that this run was properly done. If only one is detected, this # could indicate that the clean command does not work properly. out.check_output(r'Python-PGD\[step_size=1\]:', repetition=5 * n_rep + 1) # Now check that the cache is hit when running the benchmark a # second time without force with CaptureRunOutput() as out: run(run_cmd, 'benchopt', standalone_mode=False) out.check_output(r'Python-PGD\[step_size=1\]:', repetition=1) # Check that the cache is also hit when running in parallel with CaptureRunOutput() as out: run(run_cmd + ['-j', 2], 'benchopt', standalone_mode=False) out.check_output(r'Python-PGD\[step_size=1\]:', repetition=1) # Make sure that -f option forces the re-run for the solver run_cmd[4] = '-f' with CaptureRunOutput() as out: run(run_cmd, 'benchopt', standalone_mode=False) out.check_output(r'Python-PGD\[step_size=1\]:', repetition=5 * n_rep + 1)