def test_add_to_parser():

    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'test0', '4')
    utils.add_to_parser(parser, 'test1', '5')
    utils.add_to_parser(parser, 'test2', [1, 2, 3])

    namespace, _ = parser.parse_known_args(['--test0', '3'])

    # single argument
    assert namespace.test0 == '3'  # user defined arg
    assert namespace.test1 == '5'  # default arg

    # list argument
    assert namespace.test2 is None
    assert parser.opt_args['--test2'].opt_values == [1, 2, 3]
    assert parser.opt_args['--test2'].tunable
Example #2
0
    # optimize across all gpus
    print('submitting jobs...')
    cluster.optimize_parallel_cluster_gpu(main,
                                          nb_trials=hyperparams.nb_hopt_trials,
                                          job_name=job_display_name)


if __name__ == '__main__':
    root_dir = os.path.split(os.path.dirname(
        sys.modules['__main__'].__file__))[0]

    parser = HyperOptArgumentParser(add_help=False, strategy='grid_search')
    parser = Trainer.add_argparse_args(parser)
    parser.add_argument('--model', type=str, default='galaxypred_model')

    (temp_args, arr) = parser.parse_known_args()
    model_name = temp_args.model
    MODEL_CLASS = MODEL_NAMES[model_name]

    # give the module a chance to add own params
    # good practice to define LightningModule specific params in the module
    parser = MODEL_CLASS.add_model_specific_args(parser)
    parser.add_argument('--nodes', type=int, default=1)
    parser.add_argument('--conda_env', type=str, default='dm2gal')
    parser.add_argument('--on_cluster', default=True, action='store_true')
    parser.add_argument('-n', '--tt_name', default='speckled')
    parser.add_argument('-d',
                        '--tt_description',
                        default='predict a central mass')
    parser.add_argument('--logs_save_path',
                        default='/projects/QUIJOTE/Noah/logs')
def test_add_dependent_params(tmpdir):

    # -----------------
    # ae
    # -----------------
    # arch params correctly added to parser
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'ae')
    utils.add_to_parser(parser, 'model_type', 'conv')
    utils.add_to_parser(parser, 'n_ae_latents', 32)
    utils.add_to_parser(parser, 'n_input_channels', 2)
    utils.add_to_parser(parser, 'y_pixels', 32)
    utils.add_to_parser(parser, 'x_pixels', 32)
    utils.add_to_parser(parser, 'ae_arch_json', None)
    utils.add_to_parser(parser, 'approx_batch_size', 200)
    utils.add_to_parser(parser, 'mem_limit_gb', 10)
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--architecture_params' in parser.opt_args

    # linear autoencoder
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'ae')
    utils.add_to_parser(parser, 'model_type', 'linear')
    utils.add_to_parser(parser, 'n_ae_latents', 32)
    utils.add_to_parser(parser, 'n_input_channels', 2)
    utils.add_to_parser(parser, 'y_pixels', 32)
    utils.add_to_parser(parser, 'x_pixels', 32)
    utils.add_to_parser(parser, 'ae_arch_json', None)
    utils.add_to_parser(parser, 'approx_batch_size', 200)
    utils.add_to_parser(parser, 'mem_limit_gb', 10)
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    print(parser)
    assert namespace.model_type == 'linear'
    assert namespace.n_latents == '32'

    # raise exception when max latents exceeded
    # parser = HyperOptArgumentParser(strategy='grid_search')
    # utils.add_to_parser(parser, 'model_class', 'ae')
    # utils.add_to_parser(parser, 'n_ae_latents', 100000)
    # utils.add_to_parser(parser, 'n_input_channels', 2)
    # utils.add_to_parser(parser, 'y_pixels', 32)
    # utils.add_to_parser(parser, 'x_pixels', 32)
    # utils.add_to_parser(parser, 'ae_arch_json', None)
    # utils.add_to_parser(parser, 'approx_batch_size', 200)
    # utils.add_to_parser(parser, 'mem_limit_gb', 10)
    # namespace, _ = parser.parse_known_args([])
    # with pytest.raises(ValueError):
    #     utils.add_dependent_params(parser, namespace)

    # -----------------
    # vae
    # -----------------
    # arch params correctly added to parser
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'vae')
    utils.add_to_parser(parser, 'model_type', 'conv')
    utils.add_to_parser(parser, 'n_ae_latents', 32)
    utils.add_to_parser(parser, 'n_input_channels', 2)
    utils.add_to_parser(parser, 'y_pixels', 32)
    utils.add_to_parser(parser, 'x_pixels', 32)
    utils.add_to_parser(parser, 'ae_arch_json', None)
    utils.add_to_parser(parser, 'approx_batch_size', 200)
    utils.add_to_parser(parser, 'mem_limit_gb', 10)
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--architecture_params' in parser.opt_args

    # -----------------
    # neural
    # -----------------
    # make tmp hdf5 file
    path = tmpdir.join('data.hdf5')
    idx_data = {
        'i0': np.array([0, 1, 2]),
        'i1': np.array([3, 4, 5]),
        'i2': np.array([6, 7, 8])
    }
    with h5py.File(path, 'w') as f:
        group0 = f.create_group('regions')
        # groupa = f.create_group('neural')
        group1 = group0.create_group('indxs')
        group1.create_dataset('i0', data=idx_data['i0'])
        group1.create_dataset('i1', data=idx_data['i1'])
        group1.create_dataset('i2', data=idx_data['i2'])

    # subsample idxs not added to parser when not requested
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'none')
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--subsample_idxs_name' not in parser.opt_args

    # subsample idxs added to parser when requested (all datasets)
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'data_dir', tmpdir)
    utils.add_to_parser(parser, 'lab', '')
    utils.add_to_parser(parser, 'expt', '')
    utils.add_to_parser(parser, 'animal', '')
    utils.add_to_parser(parser, 'session', '')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'single')
    utils.add_to_parser(parser, 'subsample_idxs_dataset', 'all')
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    assert '--subsample_idxs_name' in parser.opt_args
    parser_vals = parser.opt_args['--subsample_idxs_name'].opt_values.keys()
    assert sorted(['i0', 'i1', 'i2']) == sorted(parser_vals)

    # subsample idxs added to parser when requested (single dataset)
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'data_dir', tmpdir)
    utils.add_to_parser(parser, 'lab', '')
    utils.add_to_parser(parser, 'expt', '')
    utils.add_to_parser(parser, 'animal', '')
    utils.add_to_parser(parser, 'session', '')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'single')
    utils.add_to_parser(parser, 'subsample_idxs_dataset', 'i0')
    namespace, _ = parser.parse_known_args([])
    utils.add_dependent_params(parser, namespace)
    parser.parse_args([])
    assert parser.parsed_args['subsample_idxs_name'] == 'i0'

    # raise exception when dataset is not a string
    parser = HyperOptArgumentParser(strategy='grid_search')
    utils.add_to_parser(parser, 'model_class', 'neural-ae')
    utils.add_to_parser(parser, 'subsample_method', 'single')
    utils.add_to_parser(parser, 'subsample_idxs_dataset', ['i0', 'i1'])
    namespace, _ = parser.parse_known_args([])
    with pytest.raises(ValueError):
        utils.add_dependent_params(parser, namespace)