def build_search_space(function): # abuse configscope to parse search space definitions scope = ConfigScope(function) space_dict = dict(scope()) # parse generic dict to a search space space = SearchSpace(space_dict) return space
def named_config(self, func): """ Decorator to turn a function into a named configuration. See :ref:`named_configurations`. """ config_scope = ConfigScope(func) self._add_named_config(func.__name__, config_scope) return config_scope
def dataset_default_config(config): if isinstance(config, list): configs = [] for c in config: configs.append(dataset_default_config(c)) return configs scope = ConfigScope(dataset_cfg_fn) config = scope(preset=config) config['transform'] = transform_default_config( config.get('transform', {})) return config
def config(self, function): """ Decorator to add a function to the configuration of the Experiment. The decorated function is turned into a :class:`~sacred.config_scope.ConfigScope` and added to the Ingredient/Experiment. When the experiment is run, this function will also be executed and all json-serializable local variables inside it will end up as entries in the configuration of the experiment. """ self.configurations.append(ConfigScope(function)) return self.configurations[-1]
def test_searcher_bm25_grid(tmpdir_as_cache, tmpdir, dummy_index): searcher_config = ConfigScope(BM25Grid.config)() searcher_config["_name"] = BM25Grid.name searcher = BM25Grid(searcher_config) searcher.modules["index"] = dummy_index bs = np.around(np.arange(0.1, 1 + 0.1, 0.1), 1) k1s = np.around(np.arange(0.1, 1 + 0.1, 0.1), 1) topics_fn = DummyBenchmark.topic_file output_fn = searcher.query_from_file( topics_fn, os.path.join(searcher.get_cache_path(), DummyBenchmark.name)) assert output_fn == os.path.join(searcher.get_cache_path(), DummyBenchmark.name) for k1 in k1s: for b in bs: assert os.path.exists( os.path.join(output_fn, "searcher_k1={0},b={1}".format(k1, b))) assert os.path.exists(os.path.join(output_fn, "done"))
def test_searcher_bm25(tmpdir_as_cache, tmpdir, dummy_index): searcher_config = ConfigScope(BM25.config)() searcher_config["_name"] = BM25.name searcher = BM25(searcher_config) searcher.modules["index"] = dummy_index topics_fn = DummyBenchmark.topic_file output_fn = searcher.query_from_file( topics_fn, os.path.join(searcher.get_cache_path(), DummyBenchmark.name)) assert output_fn == os.path.join(searcher.get_cache_path(), DummyBenchmark.name) with open(os.path.join(output_fn, "searcher"), "r") as fp: file_contents = fp.readlines() assert file_contents == [ "301 Q0 LA010189-0001 1 0.139500 Anserini\n", "301 Q0 LA010189-0002 2 0.097000 Anserini\n" ]
"""doc""" pass def _config_scope_with_multiline_doc(): """Multiline docstring! """ pass @pytest.mark.parametrize( 'indent, path, named_config, expected', [(0, 'a', None, 'a'), (1, 'b', None, ' b'), (4, 'a.b.c', None, ' a.b.c'), (0, 'c', ConfigScope(_config_scope_with_single_line_doc), 'c' + COLOR_DOC + ' # doc' + ENDC), (0, 'd', ConfigScope(_config_scope_with_multiline_doc), 'd' + COLOR_DOC + '\n """Multiline\n docstring!\n """' + ENDC)]) def test_format_named_config(indent, path, named_config, expected): assert _format_named_config(indent, path, named_config) == expected def test_format_named_configs(): ingred = Ingredient('ingred') ex = Experiment(name='experiment', ingredients=[ingred]) @ingred.named_config def named_config1(): pass
"""doc""" pass def _config_scope_with_multiline_doc(): """Multiline docstring! """ pass @pytest.mark.parametrize('indent, path, named_config, expected', [ (0, 'a', None, 'a'), (1, 'b', None, ' b'), (4, 'a.b.c', None, ' a.b.c'), (0, 'c', ConfigScope(_config_scope_with_single_line_doc), 'c' + COLOR_DOC + ' # doc' + ENDC), (0, 'd', ConfigScope(_config_scope_with_multiline_doc), 'd' + COLOR_DOC + '\n """Multiline\n docstring!\n """' + ENDC) ]) def test_format_named_config(indent, path, named_config, expected): assert _format_named_config(indent, path, named_config) == expected def test_format_named_configs(): ingred = Ingredient('ingred') ex = Experiment(name='experiment', ingredients=[ingred]) @ingred.named_config def named_config1(): pass
def build_default_config(name, config): fn = build_cfg_fn(name) scope = ConfigScope(fn) return scope(preset=config)
def build_config(config): def build_default_config(name, config): fn = build_cfg_fn(name) scope = ConfigScope(fn) return scope(preset=config) def model_default_config(config): name = config['name'] return build_default_config(name, config) def transform_default_config(config): return build_default_config('transform', config) def dataset_default_config(config): if isinstance(config, list): configs = [] for c in config: configs.append(dataset_default_config(c)) return configs scope = ConfigScope(dataset_cfg_fn) config = scope(preset=config) config['transform'] = transform_default_config( config.get('transform', {})) return config def single_sampler_default_config(config): name = config['type'] config['dataset'] = dataset_default_config(config['dataset']) return build_default_config(name, config) def sampler_default_config(config): if "samplers" in config: #multi sampler samplers = config['samplers'] for name, c in samplers.items(): samplers[name] = single_sampler_default_config(c) return config else: return single_sampler_default_config(config) def loss_default_config(config): if isinstance(config, list): configs = [] for c in config: configs.append(loss_default_config(c)) return configs name = config['name'] cfg = build_default_config(name, config) return cfg def scheduler_default_config(config): if 'preset' in config: name = "{}_scheduler".format(config['preset']) else: name = "{}_scheduler".format(config['name']) return build_default_config(name, config) def optimizer_default_config(config): name = config['name'] return build_default_config(name, config) def training_default_config(config): config = build_default_config('train', config) for key, values in config.items(): if key == 'model': config['model'] = model_default_config(config['model']) elif key == 'dataloader': config['dataloader']['sampler'] = sampler_default_config( config['dataloader']['sampler']) elif key == 'losses': config['losses'] = loss_default_config(config['losses']) elif key == 'scheduler': config['scheduler'] = scheduler_default_config( config['scheduler']) elif key == 'optimizer': config['optimizer'] = optimizer_default_config( config['optimizer']) elif key == 'checkpoint_frequency': pass elif key == 'restore_checkpoint': pass elif key == 'epochs': pass elif key == 'num_workers': pass else: raise ValueError(key) return config # TODO make central or get rid reid_datasets = ["market-1501", "duke"] reid_attribute_datasets = ["market-1501-attribute", "duke-attribute"] pose_datasets = ["mpii"] def evaluation_dataset_default_config(config): if isinstance(config, list): configs = [] for c in config: configs.append(evaluation_dataset_default_config(c)) return configs name = config['name'].lower() config = build_default_config('evaluation_dataset', config) if name in reid_datasets: return build_default_config('reid_evaluation', config) elif name in reid_attribute_datasets: return build_default_config('reid_attribute_evaluation', config) elif name in pose_datasets: return config else: raise ValueError( "Unknown evaluation dataset in config builder: {}.".format( name)) def evaluation_default_config(config): if 'experiment' in config: # TODO restore the config return config #config['sampler']['datasets'] = evaluation_dataset_default_config(config['sampler']['datasets']) config = build_default_config('evaluation', config) return config scope = ConfigScope(general_cfg_fn) config = scope(preset=config) for key, value in config.items(): if key == 'training': config['training'] = training_default_config(config['training']) elif key == 'evaluation': config['evaluation'] = evaluation_default_config( config['evaluation']) elif key == 'validation': pass elif key == 'device_id': pass elif key == 'num_workers': pass elif key == 'restore_checkpoint': pass elif key == 'experiment': pass else: pass return config