def test_all_special_names_grabbed(): base_names = ['a', 'b'] special_names = ['a:one', 'a:two'] gold_special_names = ['one', 'two'] reporting = parse_extra_args(base_names, special_names) for gold in gold_special_names: assert gold in reporting['a']
def test_special_shared_across_base(): base_names = ['a', 'b'] special_names = ['--b:one', 'b', '--a:one', 'a'] reporting = parse_extra_args(base_names, special_names) for name in base_names: assert 'one' in reporting[name] assert reporting[name]['one'] == name
def test_values_are_grabbed(): base_names = ['a', 'b'] special_names = ['--a:xxx', 'xxx', '--a:yyy', 'yyy', '--b:zzz', 'zzz'] reporting = parse_extra_args(base_names, special_names) for name in base_names: for special, value in reporting[name].items(): assert special == value
def test_special_names_multiple_bases(): base_names = ['a', 'b'] special_names = ['a:one', 'b:two'] reporting = parse_extra_args(base_names, special_names) assert 'one' in reporting['a'] assert 'two' not in reporting['a'] assert 'two' in reporting['b'] assert 'one' not in reporting['b']
def main(): parser = argparse.ArgumentParser(description='Train a text classifier') parser.add_argument('--config', help='JSON Configuration for an experiment', type=convert_path, default="$MEAD_CONFIG") parser.add_argument('--settings', help='JSON Configuration for mead', default='config/mead-settings.json', type=convert_path) parser.add_argument('--datasets', help='json library of dataset labels', default='config/datasets.json', type=convert_path) parser.add_argument('--embeddings', help='json library of embeddings', default='config/embeddings.json', type=convert_path) parser.add_argument('--logging', help='json file for logging', default='config/logging.json', type=convert_path) parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm']) parser.add_argument('--gpus', help='Number of GPUs (defaults to 1)', type=int) parser.add_argument('--reporting', help='reporting hooks', nargs='+') args, reporting_args = parser.parse_known_args() config_params = read_config_stream(args.config) args.settings = read_config_stream(args.settings) args.datasets = read_config_stream(args.datasets) args.embeddings = read_config_stream(args.embeddings) args.logging = read_config_stream(args.logging) if args.gpus is not None: config_params['model']['gpus'] = args.gpus if args.reporting is not None: reporting = parse_extra_args(args.reporting, reporting_args) config_params['reporting'] = reporting task_name = config_params.get( 'task', 'classify') if args.task is None else args.task print('Task: [{}]'.format(task_name)) task = mead.Task.get_task_specific(task_name, args.logging, args.settings) task.read_config(config_params, args.datasets, reporting_args=reporting_args, config_file=args.config) task.initialize(args.embeddings) task.train()
def get_ends(hpctl_settings, extra_args): ends = parse_extra_args(['frontend', 'backend'], extra_args) fe = ends['frontend'] # Merge dicts for key, val in hpctl_settings.get('frontend', {'type': 'console'}).items(): if key not in fe: fe[key] = val be = ends['backend'] # Merge dicts for key, val in hpctl_settings.get('backend', {'type': 'mp'}).items(): if key not in be: be[key] = val if isinstance(be.get('real_gpus'), str): be['real_gpus'] = be['real_gpus'].split(",") return fe, be
def main(): parser = argparse.ArgumentParser(description='Train a text classifier') parser.add_argument('--config', help='JSON Configuration for an experiment', type=convert_path, default="$MEAD_CONFIG") parser.add_argument('--settings', help='JSON Configuration for mead', default='config/mead-settings.json', type=convert_path) parser.add_argument('--datasets', help='json library of dataset labels', default='config/datasets.json', type=convert_path) parser.add_argument('--embeddings', help='json library of embeddings', default='config/embeddings.json', type=convert_path) parser.add_argument('--logging', help='json file for logging', default='config/logging.json', type=convert_path) parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm']) parser.add_argument('--gpus', help='Number of GPUs (defaults to number available)', type=int, default=-1) parser.add_argument('--basedir', help='Override the base directory where models are stored', type=str) parser.add_argument('--reporting', help='reporting hooks', nargs='+') parser.add_argument('--backend', help='The deep learning backend to use') parser.add_argument('--checkpoint', help='Restart training from this checkpoint') args, reporting_args = parser.parse_known_args() args.logging = read_config_stream(args.logging) configure_logger(args.logging) config_params = read_config_stream(args.config) try: args.settings = read_config_stream(args.settings) except: logger.warning('Warning: no mead-settings file was found at [{}]'.format(args.settings)) args.settings = {} args.datasets = read_config_stream(args.datasets) args.embeddings = read_config_stream(args.embeddings) if args.gpus is not None: config_params['model']['gpus'] = args.gpus if args.basedir is not None: config_params['basedir'] = args.basedir if args.backend is not None: config_params['backend'] = normalize_backend(args.backend) cmd_hooks = args.reporting if args.reporting is not None else [] config_hooks = config_params.get('reporting') if config_params.get('reporting') is not None else [] reporting = parse_extra_args(set(chain(cmd_hooks, config_hooks)), reporting_args) config_params['reporting'] = reporting task_name = config_params.get('task', 'classify') if args.task is None else args.task logger.info('Task: [{}]'.format(task_name)) task = mead.Task.get_task_specific(task_name, args.settings) task.read_config(config_params, args.datasets, reporting_args=reporting_args, config_file=deepcopy(config_params)) task.initialize(args.embeddings) task.train(args.checkpoint)
def get_ends(hpctl_settings, extra_args): ends = parse_extra_args(['frontend', 'backend'], extra_args) fe = ends['frontend'] # Merge dicts for key, val in hpctl_settings.get('frontend', { 'type': 'console' }).items(): if key not in fe: fe[key] = val be = ends['backend'] # Merge dicts for key, val in hpctl_settings.get('backend', {'type': 'mp'}).items(): if key not in be: be[key] = val if isinstance(be.get('real_gpus'), str): be['real_gpus'] = be['real_gpus'].split(",") return fe, be
def test_all_base_name_appear(): base_names = ['a', 'b'] reporting = parse_extra_args(base_names, []) for name in base_names: assert name in reporting
def main(): parser = argparse.ArgumentParser(description='Export a model') parser.add_argument('--config', help='configuration for an experiment', required=True, type=convert_path) parser.add_argument('--settings', help='configuration for mead', required=False, default=DEFAULT_SETTINGS_LOC, type=convert_path) parser.add_argument('--modules', help='modules to load', default=[], nargs='+', required=False) parser.add_argument('--datasets', help='json library of dataset labels') parser.add_argument('--vecs', help='index of vectorizers: local file, remote URL or hub mead-ml/ref', default='config/vecs.json', type=convert_path) parser.add_argument('--logging', help='json file for logging', default='config/logging.json', type=convert_path) parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm']) parser.add_argument('--exporter_type', help="exporter type (default 'default')", default=None) parser.add_argument('--return_labels', help='if true, the exported model returns actual labels else ' 'the indices for labels vocab (default False)', default=None) parser.add_argument('--model', help='model name', required=True, type=unzip_files) parser.add_argument('--model_version', help='model_version', default=None) parser.add_argument('--output_dir', help="output dir (default './models')", default=None) parser.add_argument('--project', help='Name of project, used in path first', default=None) parser.add_argument('--name', help='Name of the model, used second in the path', default=None) parser.add_argument('--beam', help='beam_width', default=30, type=int) parser.add_argument('--nbest_input', help='Is the input to this model N-best', default=False, type=str2bool) parser.add_argument('--is_remote', help='if True, separate items for remote server and client. If False bundle everything together (default True)', default=None) parser.add_argument('--backend', help='The deep learning backend to use') parser.add_argument('--reporting', help='reporting hooks', nargs='+') parser.add_argument('--use_version', help='Should we use the version?', type=str2bool, default=True) parser.add_argument('--use_all_features', help='If a feature is found via vectorizer and not in embeddings, should we include it?', type=str2bool, default=False) parser.add_argument('--zip', help='Should we zip the results?', type=str2bool, default=False) args, overrides = parser.parse_known_args() configure_logger(args.logging) config_params = read_config_stream(args.config) config_params = parse_and_merge_overrides(config_params, overrides, pre='x') try: args.settings = read_config_stream(args.settings) except Exception: logger.warning('Warning: no mead-settings file was found at [{}]'.format(args.settings)) args.settings = {} task_name = config_params.get('task', 'classify') if args.task is None else args.task # Remove multigpu references os.environ['CUDA_VISIBLE_DEVICES'] = "" os.environ['NV_GPU'] = "" if 'gpus' in config_params.get('train', {}): del config_params['train']['gpus'] if task_name == 'seq2seq' and 'beam' not in config_params: config_params['beam'] = args.beam config_params['modules'] = config_params.get('modules', []) + args.modules if args.backend is not None: config_params['backend'] = normalize_backend(args.backend) cmd_hooks = args.reporting if args.reporting is not None else [] config_hooks = config_params.get('reporting') if config_params.get('reporting') is not None else [] reporting = parse_extra_args(set(chain(cmd_hooks, config_hooks)), overrides) config_params['reporting'] = reporting args.vecs = read_config_stream(args.vecs) task = mead.Task.get_task_specific(task_name, args.settings) output_dir, project, name, model_version, exporter_type, return_labels, is_remote = get_export_params( config_params.get('export', {}), args.output_dir, args.project, args.name, args.model_version, args.exporter_type, args.return_labels, args.is_remote, ) # Here we reuse code in `.read_config` which needs a dataset index (when used with mead-train) # but when used with mead-export it is not needed. This is a dummy dataset index that will work # It means we don't need to pass it in datasets = [{'label': config_params['dataset']}] task.read_config(config_params, datasets, args.vecs, exporter_type=exporter_type) feature_exporter_field_map = create_feature_exporter_field_map(config_params['features']) exporter = create_exporter(task, exporter_type, return_labels=return_labels, feature_exporter_field_map=feature_exporter_field_map, nbest_input=args.nbest_input) exporter.run(args.model, output_dir, project, name, model_version, remote=is_remote, use_version=args.use_version, zip_results=args.zip, use_all_features=args.use_all_features)
def test_nothing_if_no_special(): base_names = ['a', 'b'] special_names = ['a:one', 'a:two'] reporting = parse_extra_args(base_names, special_names) assert {} == reporting['b']
def test_extra_things_ignored(): base_names = ['b'] special_names = ['a:one'] reporting = parse_extra_args(base_names, special_names) assert {} == reporting['b']
def main(): parser = argparse.ArgumentParser(description='Train a text classifier') parser.add_argument( '--config', help= 'JSON/YML Configuration for an experiment: local file or remote URL', type=convert_path, default="$MEAD_CONFIG") parser.add_argument('--settings', help='JSON/YML Configuration for mead', default=DEFAULT_SETTINGS_LOC, type=convert_path) parser.add_argument('--task_modules', help='tasks to load, must be local', default=[], nargs='+', required=False) parser.add_argument( '--datasets', help= 'index of dataset labels: local file, remote URL or mead-ml/hub ref', type=convert_path) parser.add_argument( '--modules', help='modules to load: local files, remote URLs or mead-ml/hub refs', default=[], nargs='+', required=False) parser.add_argument('--mod_train_file', help='override the training set') parser.add_argument('--mod_valid_file', help='override the validation set') parser.add_argument('--mod_test_file', help='override the test set') parser.add_argument('--fit_func', help='override the fit function') parser.add_argument( '--embeddings', help='index of embeddings: local file, remote URL or mead-ml/hub ref', type=convert_path) parser.add_argument( '--vecs', help='index of vectorizers: local file, remote URL or hub mead-ml/ref', type=convert_path) parser.add_argument('--logging', help='json file for logging', default=DEFAULT_LOGGING_LOC, type=convert_path) parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm']) parser.add_argument('--gpus', help='Number of GPUs (defaults to number available)', type=int, default=-1) parser.add_argument( '--basedir', help='Override the base directory where models are stored', type=str) parser.add_argument('--reporting', help='reporting hooks', nargs='+') parser.add_argument('--backend', help='The deep learning backend to use') parser.add_argument('--checkpoint', help='Restart training from this checkpoint') parser.add_argument( '--prefer_eager', help="If running in TensorFlow, should we prefer eager model", type=str2bool) args, overrides = parser.parse_known_args() config_params = read_config_stream(args.config) config_params = parse_and_merge_overrides(config_params, overrides, pre='x') if args.basedir is not None: config_params['basedir'] = args.basedir # task_module overrides are not allowed via hub or HTTP, must be defined locally for task in args.task_modules: import_user_module(task) task_name = config_params.get( 'task', 'classify') if args.task is None else args.task args.logging = read_config_stream(args.logging) configure_logger(args.logging, config_params.get('basedir', './{}'.format(task_name))) try: args.settings = read_config_stream(args.settings) except: logger.warning( 'Warning: no mead-settings file was found at [{}]'.format( args.settings)) args.settings = {} args.datasets = args.settings.get( 'datasets', convert_path( DEFAULT_DATASETS_LOC)) if args.datasets is None else args.datasets args.datasets = read_config_stream(args.datasets) if args.mod_train_file or args.mod_valid_file or args.mod_test_file: logging.warning( 'Warning: overriding the training/valid/test data with user-specified files' ' different from what was specified in the dataset index. Creating a new key for this entry' ) update_datasets(args.datasets, config_params, args.mod_train_file, args.mod_valid_file, args.mod_test_file) args.embeddings = args.settings.get( 'embeddings', convert_path(DEFAULT_EMBEDDINGS_LOC) ) if args.embeddings is None else args.embeddings args.embeddings = read_config_stream(args.embeddings) args.vecs = args.settings.get('vecs', convert_path( DEFAULT_VECTORIZERS_LOC)) if args.vecs is None else args.vecs args.vecs = read_config_stream(args.vecs) if args.gpus: # why does it go to model and not to train? config_params['train']['gpus'] = args.gpus if args.fit_func: config_params['train']['fit_func'] = args.fit_func if args.backend: config_params['backend'] = normalize_backend(args.backend) config_params['modules'] = list( set(chain(config_params.get('modules', []), args.modules))) cmd_hooks = args.reporting if args.reporting is not None else [] config_hooks = config_params.get('reporting') if config_params.get( 'reporting') is not None else [] reporting = parse_extra_args(set(chain(cmd_hooks, config_hooks)), overrides) config_params['reporting'] = reporting logger.info('Task: [{}]'.format(task_name)) task = mead.Task.get_task_specific(task_name, args.settings) task.read_config(config_params, args.datasets, args.vecs, reporting_args=overrides, prefer_eager=args.prefer_eager) task.initialize(args.embeddings) task.train(args.checkpoint)
def main(): parser = argparse.ArgumentParser(description='Evaluate on a dataset') parser.add_argument('--model', required=True) parser.add_argument('--dataset', required=True) parser.add_argument('--settings', default=DEFAULT_SETTINGS_LOC, type=convert_path) parser.add_argument('--modules', nargs="+", default=[]) parser.add_argument('--reporting', nargs="+") parser.add_argument('--logging', default=DEFAULT_LOGGING_LOC, type=convert_path) parser.add_argument('--task', default='classify', choices={'classify', 'tagger', 'seq2seq', 'lm'}) parser.add_argument('--backend', default='tf') parser.add_argument('--reader', default='default') parser.add_argument('--trim', default=True, type=str2bool) parser.add_argument('--batchsz', default=50) parser.add_argument('--trainer', default='default') parser.add_argument('--output', default=None) parser.add_argument('--remote') parser.add_argument( '--features', help= '(optional) features in the format feature_name:index (column # in conll) or ' 'just feature names (assumed sequential)', default=[], nargs='+', ) parser.add_argument('--device', default='cpu') # our parse_extra_args doesn't handle lists :/ parser.add_argument('--pair_suffix', nargs='+', default=[]) args, extra_args = parser.parse_known_args() args.batchsz = args.batchsz if args.task != 'lm' else 1 named_fields = { str(v): k for k, v in feature_index_mapping(args.features).items() } reader_options = parse_extra_args(['reader'], extra_args)['reader'] reader_options = process_reader_options(reader_options) verbose_options = parse_extra_args(['verbose'], extra_args)['verbose'] trainer_options = parse_extra_args(['trainer'], extra_args)['trainer'] if 'span_type' not in trainer_options: trainer_options['span_type'] = 'iobes' model_options = parse_extra_args(['model'], extra_args)['model'] args.logging = read_config_stream(args.logging) configure_logger(args.logging) try: args.settings = read_config_stream(args.settings) except: logger.warning( 'Warning: no mead-settings file was found at [{}]'.format( args.settings)) args.settings = {} backend = Backend(args.backend) backend.load(args.task) for module in args.modules: import_user_module(module) reporting = parse_extra_args( args.reporting if args.reporting is not None else [], extra_args) reporting_hooks, reporting = merge_reporting_with_settings( reporting, args.settings) reporting_fns = [ x.step for x in create_reporting(reporting_hooks, reporting, {'task': args.task}) ] service = get_service(args.task) model = service.load(args.model, backend=args.backend, remote=args.remote, device=args.device, **model_options) vectorizers = get_vectorizers(args.task, model) reader = create_reader(args.task, vectorizers, args.trim, type=args.reader, named_fields=named_fields, pair_suffix=args.pair_suffix, **reader_options) reader = patch_reader(args.task, model, reader) data, txts = load_data(args.task, reader, model, args.dataset, args.batchsz) if args.task == 'seq2seq': trainer_options['tgt_rlut'] = { v: k for k, v in model.tgt_vocab.items() } trainer = get_trainer(model, args.trainer, verbose_options, backend.name, gpu=args.device != 'cpu', nogpu=args.device == 'cpu', **trainer_options) if args.task == 'classify': _ = trainer.test(data, reporting_fns=reporting_fns, phase='Test', verbose=verbose_options, output=args.output, txts=txts, **model_options) elif args.task == 'tagger': _ = trainer.test(data, reporting_fns=reporting_fns, phase='Test', verbose=verbose_options, conll_output=args.output, txts=txts, **model_options) else: _ = trainer.test(data, reporting_fns=reporting_fns, phase='Test', verbose=verbose_options, **model_options)
def main(): parser = argparse.ArgumentParser(description='Train a text classifier') parser.add_argument('--config', help='configuration for an experiment', type=convert_path, default="$MEAD_CONFIG") parser.add_argument('--settings', help='configuration for mead', default=DEFAULT_SETTINGS_LOC, type=convert_path) parser.add_argument('--datasets', help='index of dataset labels', type=convert_path) parser.add_argument('--modules', help='modules to load', default=[], nargs='+', required=False) parser.add_argument('--mod_train_file', help='override the training set') parser.add_argument('--mod_valid_file', help='override the validation set') parser.add_argument('--mod_test_file', help='override the test set') parser.add_argument('--embeddings', help='index of embeddings', type=convert_path) parser.add_argument('--logging', help='config file for logging', default=DEFAULT_LOGGING_LOC, type=convert_path) parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm']) parser.add_argument('--gpus', help='Number of GPUs (defaults to number available)', type=int, default=-1) parser.add_argument( '--basedir', help='Override the base directory where models are stored', type=str) parser.add_argument('--reporting', help='reporting hooks', nargs='+') parser.add_argument('--backend', help='The deep learning backend to use') parser.add_argument('--checkpoint', help='Restart training from this checkpoint') args, reporting_args = parser.parse_known_args() config_params = read_config_stream(args.config) if args.basedir is not None: config_params['basedir'] = args.basedir task_name = config_params.get( 'task', 'classify') if args.task is None else args.task args.logging = read_config_stream(args.logging) configure_logger(args.logging, config_params.get('basedir', './{}'.format(task_name))) try: args.settings = read_config_stream(args.settings) except: logger.warning( 'Warning: no mead-settings file was found at [{}]'.format( args.settings)) args.settings = {} args.datasets = args.datasets if args.datasets else args.settings.get( 'datasets', convert_path(DEFAULT_DATASETS_LOC)) args.datasets = read_config_stream(args.datasets) if args.mod_train_file or args.mod_valid_file or args.mod_test_file: logging.warning( 'Warning: overriding the training/valid/test data with user-specified files' ' different from what was specified in the dataset index. Creating a new key for this entry' ) update_datasets(args.datasets, config_params, args.mod_train_file, args.mod_valid_file, args.mod_test_file) args.embeddings = args.embeddings if args.embeddings else args.settings.get( 'embeddings', convert_path(DEFAULT_EMBEDDINGS_LOC)) args.embeddings = read_config_stream(args.embeddings) if args.gpus is not None: config_params['model']['gpus'] = args.gpus if args.backend is None and 'backend' in args.settings: args.backend = args.settings['backend'] if args.backend is not None: config_params['backend'] = normalize_backend(args.backend) config_params['modules'] = list( set(chain(config_params.get('modules', []), args.modules))) cmd_hooks = args.reporting if args.reporting is not None else [] config_hooks = config_params.get('reporting') if config_params.get( 'reporting') is not None else [] reporting = parse_extra_args(set(chain(cmd_hooks, config_hooks)), reporting_args) config_params['reporting'] = reporting logger.info('Task: [{}]'.format(task_name)) task = mead.Task.get_task_specific(task_name, args.settings) task.read_config(config_params, args.datasets, reporting_args=reporting_args) task.initialize(args.embeddings) task.train(args.checkpoint)
def main(): parser = argparse.ArgumentParser(description='Train a text classifier') parser.add_argument('--config', help='JSON Configuration for an experiment', type=convert_path, default="$MEAD_CONFIG") parser.add_argument('--settings', help='JSON Configuration for mead', default='config/mead-settings.json', type=convert_path) parser.add_argument('--datasets', help='json library of dataset labels', default='config/datasets.json', type=convert_path) parser.add_argument('--embeddings', help='json library of embeddings', default='config/embeddings.json', type=convert_path) parser.add_argument('--logging', help='json file for logging', default='config/logging.json', type=convert_path) parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm']) parser.add_argument('--gpus', help='Number of GPUs (defaults to number available)', type=int, default=-1) parser.add_argument('--reporting', help='reporting hooks', nargs='+') parser.add_argument('--backend', help='The deep learning backend to use') args, reporting_args = parser.parse_known_args() config_params = read_config_stream(args.config) try: args.settings = read_config_stream(args.settings) except: print('Warning: no mead-settings file was found at [{}]'.format( args.config)) args.settings = {} args.datasets = read_config_stream(args.datasets) args.embeddings = read_config_stream(args.embeddings) args.logging = read_config_stream(args.logging) if args.gpus is not None: config_params['model']['gpus'] = args.gpus if args.backend is not None: config_params['backend'] = normalize_backend(args.backend) cmd_hooks = args.reporting if args.reporting is not None else [] config_hooks = config_params.get('reporting') if config_params.get( 'reporting') is not None else [] reporting = parse_extra_args(set(chain(cmd_hooks, config_hooks)), reporting_args) config_params['reporting'] = reporting task_name = config_params.get( 'task', 'classify') if args.task is None else args.task print('Task: [{}]'.format(task_name)) task = mead.Task.get_task_specific(task_name, args.logging, args.settings) task.read_config(config_params, args.datasets, reporting_args=reporting_args, config_file=deepcopy(config_params)) task.initialize(args.embeddings) task.train()
def search(config, settings, logging, hpctl_logging, datasets, embeddings, reporting, unknown, task, num_iters, **kwargs): """Search for optimal hyperparameters.""" mead_config = get_config(config, reporting, unknown) hp_settings, mead_settings = get_settings(settings) load_user_modules(mead_config, hp_settings) exp_hash = hash_config(mead_config) hp_logs, mead_logs = get_logs(hp_settings, logging, hpctl_logging) datasets = read_config_file_or_json(datasets) embeddings = read_config_file_or_json(embeddings) if task is None: task = mead_config.get('task', 'classify') frontend_config, backend_config = get_ends(hp_settings, unknown) # Figure out xpctl xpctl_config = None auto_xpctl = 'xpctl' in mead_config.get('reporting', []) if not auto_xpctl: # If the jobs aren't setup to use xpctl automatically create your own xpctl_config = get_xpctl_settings(mead_settings) if xpctl_config is not None: xpctl_extra = parse_extra_args(['xpctl'], unknown) xpctl_config['label'] = xpctl_extra.get('xpctl', {}).get('label') results_config = {} # Set frontend defaults frontend_config['experiment_hash'] = exp_hash default = mead_config['train'].get('early_stopping_metric', 'avg_loss') frontend_config.setdefault('train', 'avg_loss') frontend_config.setdefault('dev', default) frontend_config.setdefault('test', default) # Negotiate remote status if backend_config['type'] != 'remote': set_root(hp_settings) _remote_monkey_patch(backend_config, hp_logs, results_config, xpctl_config) xpctl = get_xpctl(xpctl_config) results = get_results(results_config) results.add_experiment(mead_config) backend = get_backend(backend_config) config_sampler = get_config_sampler(mead_config, results) logs = get_log_server(hp_logs) frontend = get_frontend(frontend_config, results, xpctl) labels = run(num_iters, results, backend, frontend, config_sampler, logs, mead_logs, hp_logs, mead_settings, datasets, embeddings, task) logs.stop() frontend.finalize() results.save() if auto_xpctl: for label in labels: results.set_xpctl(label, True) return labels, results
def test_no_base_names(): base_names = [] special_names = ["--visdom:name", "sst2"] reporting = parse_extra_args(base_names, special_names) assert {} == reporting
def search( config, settings, logging, hpctl_logging, datasets, embeddings, reporting, unknown, task, num_iters, **kwargs ): """Search for optimal hyperparameters.""" mead_config = get_config(config, reporting, unknown) hp_settings, mead_settings = get_settings(settings) load_user_modules(mead_config, hp_settings) exp_hash = hash_config(mead_config) hp_logs, mead_logs = get_logs(hp_settings, logging, hpctl_logging) datasets = read_config_file_or_json(datasets) embeddings = read_config_file_or_json(embeddings) if task is None: task = mead_config.get('task', 'classify') frontend_config, backend_config = get_ends(hp_settings, unknown) # Figure out xpctl xpctl_config = None auto_xpctl = 'xpctl' in mead_config.get('reporting', []) if not auto_xpctl: # If the jobs aren't setup to use xpctl automatically create your own xpctl_config = get_xpctl_settings(mead_settings) if xpctl_config is not None: xpctl_extra = parse_extra_args(['xpctl'], unknown) xpctl_config['label'] = xpctl_extra.get('xpctl', {}).get('label') results_config = {} # Set frontend defaults frontend_config['experiment_hash'] = exp_hash default = mead_config['train'].get('early_stopping_metric', 'avg_loss') frontend_config.setdefault('train', 'avg_loss') frontend_config.setdefault('dev', default) frontend_config.setdefault('test', default) # Negotiate remote status if backend_config['type'] != 'remote': set_root(hp_settings) _remote_monkey_patch(backend_config, hp_logs, results_config, xpctl_config) xpctl = get_xpctl(xpctl_config) results = get_results(results_config) results.add_experiment(mead_config) backend = get_backend(backend_config) config_sampler = get_config_sampler(mead_config, results) logs = get_log_server(hp_logs) frontend = get_frontend(frontend_config, results, xpctl) labels = run(num_iters, results, backend, frontend, config_sampler, logs, mead_logs, hp_logs, mead_settings, datasets, embeddings, task) logs.stop() frontend.finalize() results.save() if auto_xpctl: for label in labels: results.set_xpctl(label, True) return labels, results
def get_config(config, reporting, extra_args): mead_config = read_config_file_or_json(config) if reporting is not None: mead_config['reporting'] = parse_extra_args(reporting, extra_args) return mead_config