def test_split_merge_roundtrip( self, tmpdir, pspace_size, max_splits, min_items, n_splits): splitter = Splitter( str(tmpdir), Param(x=range(pspace_size)), max_splits, min_items) splitter.split() for filename in os.listdir(splitter.indir): infile = os.path.join(splitter.indir, filename) outfile = os.path.join(splitter.outdir, filename) save_dict_h5(outfile, load_dict_h5(infile)) result_file = os.path.join(str(tmpdir), 'result.h5') Splitter.merge(splitter.outdir, result_file) result = load_dict_h5(result_file) assert sorted(result['x']) == sorted(range(pspace_size))
def test_infile_with_none(tmpdir): filename = str(tmpdir.join('infile.h5')) pspace = (Param(a=[1.], b=[1.]) + Param(a=[2.], c=[2.])).build() save_dict_h5(filename, pspace) saved_pspace = load_dict_h5(filename) assert sorted(pspace.keys()) == sorted(saved_pspace.keys()) for k in pspace: for a, b in zip(pspace[k], saved_pspace[k]): assert a == b or (np.isnan(a)and np.isnan(b))
def test_worker(mapper, tmpdir): infile = os.path.join(str(tmpdir), 'in.h5') outfile = os.path.join(str(tmpdir), 'out.h5') save_dict_h5(infile, Param(a=range(7)).build()) worker = Worker(mapper) worker.start(square, infile, outfile) result = load_dict_h5(outfile) assert sorted(result['a']) == sorted(range(7)) assert sorted(result['x']) == [i ** 2 for i in range(7)]
def test_psydoit_file_dep(taskenv): with open(os.path.join(taskenv.taskdir, 'in.txt'), 'w') as f: f.write('2') psydoit(taskenv.taskdir, ['--db-file', taskenv.dbfile, 'file_dep']) result = load_dict_h5(os.path.join( taskenv.workdir, 'file_dep', 'result.h5')) assert sorted(result['y']) == [4] # Ensure that modification time changes as some file systems only support # 1s resolution. time.sleep(1) with open(os.path.join(taskenv.taskdir, 'in.txt'), 'w') as f: f.write('3') psydoit(taskenv.taskdir, ['--db-file', taskenv.dbfile, 'file_dep']) result = load_dict_h5(os.path.join( taskenv.workdir, 'file_dep', 'result.h5')) assert sorted(result['y']) == [8]
def test_merging_multidimensional_results(tmpdir): filename = str(tmpdir.join('r.h5')) data1 = {'a': np.zeros((2, 2, 2))} data2 = {'a': np.ones((3, 2, 2))} append_dict_h5(filename, data1) append_dict_h5(filename, data2) saved = load_dict_h5(filename) assert np.all(np.concatenate([data1['a'], data2['a']]) == saved['a'])
def start(self, fn, infile, outfile): """Start processing a parameter space. Parameters ---------- fn : function Function to evaluate on the parameter space. infile : str Parameter space input filename. outfile : str Output filename for the results. """ pspace = Param(**load_dict_h5(infile)) data = self.mapper(fn, pspace, **self.mapper_kwargs) save_dict_h5(outfile, data)
def test_merging_results_with_varying_dimensionality(tmpdir): filename = str(tmpdir.join('r.h5')) data1 = {'a': np.zeros((1, 1, 2))} data2 = {'a': np.ones((2, 2, 1))} expected = np.array([ [[0., 0.], [np.nan, np.nan]], [[1., np.nan], [1., np.nan]], [[1., np.nan], [1., np.nan]]]) append_dict_h5(filename, data1) append_dict_h5(filename, data2) saved = load_dict_h5(filename) assert expected.shape == saved['a'].shape for a, b in zip(expected.flat, saved['a'].flat): assert a == b or (np.isnan(a) and np.isnan(b))
def merge(cls, outdir, merged_filename, append=True): """Merge processed files together. Parameters ---------- outdir : str Directory with the output files. merged_filename : str Filename of file to save with the merged results. append : bool If ``True`` the merged data will be appended, otherwise the file will be overwritten with the merged data. """ if not append: save_dict_h5(merged_filename, {}) for filename in os.listdir(outdir): if os.path.splitext(filename)[1] != '.h5': continue infile = os.path.join(outdir, filename) append_dict_h5(merged_filename, load_dict_h5(infile))
def test_psydoit(taskenv): psydoit(taskenv.taskdir, ['--db-file', taskenv.dbfile, 'square']) result = load_dict_h5(os.path.join(taskenv.workdir, 'square', 'result.h5')) assert sorted(result['y']) == [0, 1, 4, 9]