Exemplo n.º 1
0
def test_hyperopt_executor_get_metric_score():
    executor = EXECUTORS[0]
    output_feature = "of_name"
    split = "validation"

    train_stats = {
        "training": {
            output_feature: {
                "loss": [0.58760345, 1.5066891],
                "accuracy": [0.6666667, 0.33333334],
                "hits_at_k": [1.0, 1.0],
            },
            "combined": {
                "loss": [0.58760345, 1.5066891]
            },
        },
        "validation": {
            output_feature: {
                "loss": [0.30233705, 2.6505466],
                "accuracy": [1.0, 0.0],
                "hits_at_k": [1.0, 1.0]
            },
            "combined": {
                "loss": [0.30233705, 2.6505466]
            },
        },
        "test": {
            output_feature: {
                "loss": [1.0876318, 1.4353828],
                "accuracy": [0.7, 0.5],
                "hits_at_k": [1.0, 1.0]
            },
            "combined": {
                "loss": [1.0876318, 1.4353828]
            },
        },
    }

    metric = "loss"
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats)
    assert score == 0.30233705

    metric = "accuracy"
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats)
    assert score == 1.0
Exemplo n.º 2
0
def test_hyperopt_executor(sampler,
                           executor,
                           csv_filename,
                           validate_output_feature=False,
                           validation_metric=None):
    if executor['type'] == 'fiber' and sampler['type'] == 'grid':
        # This test is very slow and doesn't give us additional converage
        pytest.skip('Skipping Fiber grid search')

    input_features = [
        text_feature(name="utterance", cell_type="lstm", reduce_output="sum"),
        category_feature(vocab_size=2, reduce_input="sum")
    ]

    output_features = [category_feature(vocab_size=2, reduce_input="sum")]

    rel_path = generate_data(input_features, output_features, csv_filename)

    config = {
        "input_features": input_features,
        "output_features": output_features,
        "combiner": {
            "type": "concat",
            "num_fc_layers": 2
        },
        "training": {
            "epochs": 2,
            "learning_rate": 0.001
        }
    }

    config = merge_with_defaults(config)

    hyperopt_config = HYPEROPT_CONFIG.copy()

    if validate_output_feature:
        hyperopt_config['output_feature'] = output_features[0]['name']
    if validation_metric:
        hyperopt_config['validation_metric'] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    hyperopt_sampler = get_build_hyperopt_sampler(sampler["type"])(goal,
                                                                   parameters,
                                                                   **sampler)

    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_executor.execute(config,
                              dataset=rel_path,
                              gpus=get_available_gpus_cuda_string())
Exemplo n.º 3
0
def test_hyperopt_executor(sampler,
                           executor,
                           csv_filename,
                           validate_output_feature=False,
                           validation_metric=None):
    input_features = [
        text_feature(name="utterance", cell_type="lstm", reduce_output="sum"),
        category_feature(vocab_size=2, reduce_input="sum")
    ]

    output_features = [category_feature(vocab_size=2, reduce_input="sum")]

    rel_path = generate_data(input_features, output_features, csv_filename)

    model_definition = {
        "input_features": input_features,
        "output_features": output_features,
        "combiner": {
            "type": "concat",
            "num_fc_layers": 2
        },
        "training": {
            "epochs": 2,
            "learning_rate": 0.001
        }
    }

    model_definition = merge_with_defaults(model_definition)

    hyperopt_config = HYPEROPT_CONFIG.copy()

    if validate_output_feature:
        hyperopt_config['output_feature'] = output_features[0]['name']
    if validation_metric:
        hyperopt_config['validation_metric'] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    hyperopt_sampler = get_build_hyperopt_sampler(sampler["type"])(goal,
                                                                   parameters,
                                                                   **sampler)

    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_executor.execute(model_definition,
                              dataset=rel_path,
                              gpus=get_available_gpus_cuda_string())
Exemplo n.º 4
0
def test_hyperopt_executor(sampler,
                           executor,
                           csv_filename,
                           validate_output_feature=False,
                           validation_metric=None):
    input_features = [
        text_feature(name="utterance", cell_type="lstm", reduce_output="sum"),
        category_feature(vocab_size=2, reduce_input="sum"),
    ]

    output_features = [category_feature(vocab_size=2, reduce_input="sum")]

    rel_path = generate_data(input_features, output_features, csv_filename)

    config = {
        "input_features": input_features,
        "output_features": output_features,
        "combiner": {
            "type": "concat",
            "num_fc_layers": 2
        },
        TRAINER: {
            "epochs": 2,
            "learning_rate": 0.001
        },
    }

    config = merge_with_defaults(config)

    hyperopt_config = HYPEROPT_CONFIG.copy()

    if validate_output_feature:
        hyperopt_config["output_feature"] = output_features[0]["name"]
    if validation_metric:
        hyperopt_config["validation_metric"] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    hyperopt_sampler = get_build_hyperopt_sampler(sampler["type"])(goal,
                                                                   parameters,
                                                                   **sampler)

    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    gpus = [i for i in range(torch.cuda.device_count())]
    hyperopt_executor.execute(config, dataset=rel_path, gpus=gpus)
Exemplo n.º 5
0
def run_hyperopt_executor(sampler, executor, csv_filename,
                          validate_output_feature=False,
                          validation_metric=None):
    input_features = [
        text_feature(name="utterance", cell_type="lstm", reduce_output="sum"),
        category_feature(vocab_size=2, reduce_input="sum")]

    output_features = [category_feature(vocab_size=2, reduce_input="sum")]

    rel_path = generate_data(input_features, output_features, csv_filename)

    config = {
        "input_features": input_features,
        "output_features": output_features,
        "combiner": {"type": "concat", "num_fc_layers": 2},
        "training": {"epochs": 2, "learning_rate": 0.001},
        "hyperopt": {
            **HYPEROPT_CONFIG,
            "executor": executor,
            "sampler": sampler,
        },
    }

    config = merge_with_defaults(config)

    hyperopt_config = config["hyperopt"]

    if validate_output_feature:
        hyperopt_config['output_feature'] = output_features[0]['name']
    if validation_metric:
        hyperopt_config['validation_metric'] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    if sampler.get("search_alg", {}).get("type", "") == 'bohb':
        # bohb does not support grid_search search space
        del parameters['utterance.cell_type']

    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    hyperopt_sampler = get_build_hyperopt_sampler(
        sampler["type"])(goal, parameters, **sampler)

    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_executor.execute(config, dataset=rel_path)
Exemplo n.º 6
0
def run_hyperopt_executor(
    search_alg,
    executor,
    csv_filename,
    tmpdir,
    validate_output_feature=False,
    validation_metric=None,
    use_split=True,
):
    config = _get_config(search_alg, executor)
    rel_path = generate_data(config["input_features"], config["output_features"], csv_filename)

    if not use_split:
        df = pd.read_csv(rel_path)
        df["split"] = 0
        df.to_csv(rel_path)

    config = merge_with_defaults(config)

    hyperopt_config = config["hyperopt"]

    if validate_output_feature:
        hyperopt_config["output_feature"] = config["output_features"][0]["name"]
    if validation_metric:
        hyperopt_config["validation_metric"] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    if search_alg.get("type", "") == "bohb":
        # bohb does not support grid_search search space
        del parameters["utterance.cell_type"]
        hyperopt_config["parameters"] = parameters

    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]
    search_alg = hyperopt_config["search_alg"]

    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        parameters, output_feature, metric, goal, split, search_alg=search_alg, **executor
    )

    hyperopt_executor.execute(
        config,
        dataset=rel_path,
        output_directory=tmpdir,
        backend="local",
    )
Exemplo n.º 7
0
def test_hyperopt_search_alg(search_alg,
                             csv_filename,
                             tmpdir,
                             ray_cluster,
                             validate_output_feature=False,
                             validation_metric=None):
    config, rel_path = _setup_ludwig_config(csv_filename)

    hyperopt_config = HYPEROPT_CONFIG.copy()

    # finalize hyperopt config settings
    if search_alg == "dragonfly":
        hyperopt_config["search_alg"] = {
            "type": search_alg,
            "domain": "euclidean",
            "optimizer": "random",
        }
    elif search_alg is None:
        hyperopt_config["search_alg"] = {}
    else:
        hyperopt_config["search_alg"] = {
            "type": search_alg,
        }

    if validate_output_feature:
        hyperopt_config["output_feature"] = config[OUTPUT_FEATURES][0][NAME]
    if validation_metric:
        hyperopt_config["validation_metric"] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]
    executor = hyperopt_config["executor"]
    search_alg = hyperopt_config["search_alg"]

    hyperopt_executor = get_build_hyperopt_executor(RAY)(parameters,
                                                         output_feature,
                                                         metric,
                                                         goal,
                                                         split,
                                                         search_alg=search_alg,
                                                         **executor)
    raytune_results = hyperopt_executor.execute(config,
                                                dataset=rel_path,
                                                output_directory=tmpdir)
    assert isinstance(raytune_results, RayTuneResults)
Exemplo n.º 8
0
def run_hyperopt_executor(
    sampler,
    executor,
    csv_filename,
    validate_output_feature=False,
    validation_metric=None,
):
    config = _get_config(sampler, executor)
    rel_path = generate_data(config["input_features"],
                             config["output_features"], csv_filename)

    config = merge_with_defaults(config)

    hyperopt_config = config["hyperopt"]

    if validate_output_feature:
        hyperopt_config["output_feature"] = config["output_features"][0][
            "name"]
    if validation_metric:
        hyperopt_config["validation_metric"] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    if sampler.get("search_alg", {}).get("type", "") == "bohb":
        # bohb does not support grid_search search space
        del parameters["utterance.cell_type"]

    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    hyperopt_sampler = get_build_hyperopt_sampler(sampler["type"])(goal,
                                                                   parameters,
                                                                   **sampler)

    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_executor.execute(
        config,
        dataset=rel_path,
        backend="local",
    )
Exemplo n.º 9
0
def hyperopt(
    model_definition=None,
    model_definition_file=None,
    data_df=None,
    data_train_df=None,
    data_validation_df=None,
    data_test_df=None,
    data_csv=None,
    data_train_csv=None,
    data_validation_csv=None,
    data_test_csv=None,
    data_hdf5=None,
    data_train_hdf5=None,
    data_validation_hdf5=None,
    data_test_hdf5=None,
    train_set_metadata_json=None,
    experiment_name="hyperopt",
    model_name="run",
    # model_load_path=None,
    # model_resume_path=None,
    skip_save_training_description=True,
    skip_save_training_statistics=True,
    skip_save_model=True,
    skip_save_progress=True,
    skip_save_log=True,
    skip_save_processed_input=True,
    skip_save_unprocessed_output=True,
    skip_save_test_predictions=True,
    skip_save_test_statistics=True,
    skip_save_hyperopt_statistics=False,
    output_directory="results",
    gpus=None,
    gpu_memory_limit=None,
    allow_parallel_threads=True,
    use_horovod=False,
    random_seed=default_random_seed,
    debug=False,
    **kwargs,
):
    # check for model_definition and model_definition_file
    if model_definition is None and model_definition_file is None:
        raise ValueError(
            "Either model_definition of model_definition_file have to be"
            "not None to initialize a LudwigModel")
    if model_definition is not None and model_definition_file is not None:
        raise ValueError("Only one between model_definition and "
                         "model_definition_file can be provided")

    # merge with default model definition to set defaults
    if model_definition_file is not None:
        with open(model_definition_file, "r") as def_file:
            model_definition = yaml.safe_load(def_file)
    model_definition = merge_with_defaults(model_definition)

    if HYPEROPT not in model_definition:
        raise ValueError("Hyperopt Section not present in Model Definition")

    hyperopt_config = model_definition["hyperopt"]
    update_hyperopt_params_with_defaults(hyperopt_config)

    # print hyperopt config
    logger.info(pformat(hyperopt_config, indent=4))
    logger.info('\n')

    sampler = hyperopt_config["sampler"]
    executor = hyperopt_config["executor"]
    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    ######################
    # check validity of output_feature / metric/ split combination
    ######################
    if split == TRAINING:
        if not (data_train_df or data_train_csv or data_train_hdf5) and (
                model_definition['preprocessing']['split_probabilities'][0] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    elif split == VALIDATION:
        if not (data_validation_df or data_validation_csv
                or data_validation_hdf5) and (
                    model_definition['preprocessing']['split_probabilities'][1]
                    <= 0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    elif split == TEST:
        if not (data_test_df or data_test_csv or data_test_hdf5) and (
                model_definition['preprocessing']['split_probabilities'][2] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    else:
        raise ValueError('unrecognized hyperopt split "{}". '
                         'Please provide one of: {}'.format(
                             split, {TRAINING, VALIDATION, TEST}))
    if output_feature == COMBINED:
        if metric != LOSS:
            raise ValueError(
                'The only valid metric for "combined" output feature is "loss"'
            )
    else:
        output_feature_names = set(
            of['name'] for of in model_definition['output_features'])
        if output_feature not in output_feature_names:
            raise ValueError('The output feature specified for hyperopt "{}" '
                             'cannot be found in the model definition. '
                             'Available ones are: {} and "combined"'.format(
                                 output_feature, output_feature_names))

        output_feature_type = None
        for of in model_definition['output_features']:
            if of['name'] == output_feature:
                output_feature_type = of['type']
        feature_class = get_from_registry(output_feature_type,
                                          output_type_registry)
        if metric not in feature_class.metric_functions:
            # TODO allow users to specify also metrics from the overall
            #  and per class metrics from the trainign stats and in general
            #  and potprocessed metric
            raise ValueError(
                'The specified metric for hyperopt "{}" is not a valid metric '
                'for the specified output feature "{}" of type "{}". '
                'Available metrics are: {}'.format(metric, output_feature,
                                                   output_feature_type,
                                                   available_metrics))

    hyperopt_sampler = get_build_hyperopt_sampler(sampler["type"])(goal,
                                                                   parameters,
                                                                   **sampler)
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_results = hyperopt_executor.execute(
        model_definition,
        data_df=data_df,
        data_train_df=data_train_df,
        data_validation_df=data_validation_df,
        data_test_df=data_test_df,
        data_csv=data_csv,
        data_train_csv=data_train_csv,
        data_validation_csv=data_validation_csv,
        data_test_csv=data_test_csv,
        data_hdf5=data_hdf5,
        data_train_hdf5=data_train_hdf5,
        data_validation_hdf5=data_validation_hdf5,
        data_test_hdf5=data_test_hdf5,
        train_set_metadata_json=train_set_metadata_json,
        experiment_name=experiment_name,
        model_name=model_name,
        # model_load_path=None,
        # model_resume_path=None,
        skip_save_training_description=skip_save_training_description,
        skip_save_training_statistics=skip_save_training_statistics,
        skip_save_model=skip_save_model,
        skip_save_progress=skip_save_progress,
        skip_save_log=skip_save_log,
        skip_save_processed_input=skip_save_processed_input,
        skip_save_unprocessed_output=skip_save_unprocessed_output,
        skip_save_test_predictions=skip_save_test_predictions,
        skip_save_test_statistics=skip_save_test_statistics,
        output_directory=output_directory,
        gpus=gpus,
        gpu_memory_limit=gpu_memory_limit,
        allow_parallel_threads=allow_parallel_threads,
        use_horovod=use_horovod,
        random_seed=random_seed,
        debug=debug,
        **kwargs)

    if is_on_master():
        print_hyperopt_results(hyperopt_results)

        if not skip_save_hyperopt_statistics:
            if not os.path.exists(output_directory):
                os.makedirs(output_directory)

            hyperopt_stats = {
                'hyperopt_config': hyperopt_config,
                'hyperopt_results': hyperopt_results
            }

            save_hyperopt_stats(hyperopt_stats, output_directory)
            logger.info('Hyperopt stats saved to: {}'.format(output_directory))

    logger.info('Finished hyperopt')

    return hyperopt_results
Exemplo n.º 10
0
def hyperopt(
    config: Union[str, dict],
    dataset: Union[str, dict, pd.DataFrame] = None,
    training_set: Union[str, dict, pd.DataFrame] = None,
    validation_set: Union[str, dict, pd.DataFrame] = None,
    test_set: Union[str, dict, pd.DataFrame] = None,
    training_set_metadata: Union[str, dict] = None,
    data_format: str = None,
    experiment_name: str = 'hyperopt',
    model_name: str = 'run',
    skip_save_training_description: bool = False,
    skip_save_training_statistics: bool = False,
    skip_save_model: bool = False,
    skip_save_progress: bool = False,
    skip_save_log: bool = False,
    skip_save_processed_input: bool = True,
    skip_save_unprocessed_output: bool = False,
    skip_save_predictions: bool = False,
    skip_save_eval_stats: bool = False,
    skip_save_hyperopt_statistics: bool = False,
    output_directory: str = 'results',
    gpus: Union[str, int, List[int]] = None,
    gpu_memory_limit: int = None,
    allow_parallel_threads: bool = True,
    backend: Union[Backend, str] = None,
    random_seed: int = default_random_seed,
    debug: bool = False,
    **kwargs,
) -> List[dict]:
    """This method performs an hyperparameter optimization.

    # Inputs

    :param config: (Union[str, dict]) config which defines
        the different parameters of the model, features, preprocessing and
        training.  If `str`, filepath to yaml configuration file.
    :param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing the entire dataset to be used in the experiment.
        If it has a split column, it will be used for splitting (0 for train,
        1 for validation, 2 for test), otherwise the dataset will be
        randomly split.
    :param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing training data.
    :param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing validation data.
    :param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing test data.
    :param training_set_metadata: (Union[str, dict], default: `None`)
        metadata JSON file or loaded metadata.  Intermediate preprocessed
        structure containing the mappings of the input
        dataset created the first time an input file is used in the same
        directory with the same name and a '.meta.json' extension.
    :param data_format: (str, default: `None`) format to interpret data
        sources. Will be inferred automatically if not specified.  Valid
        formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`, `'feather'`,
        `'fwf'`, `'hdf5'` (cache file produced during previous training),
        `'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
        `'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
        `'stata'`, `'tsv'`.
    :param experiment_name: (str, default: `'experiment'`) name for
        the experiment.
    :param model_name: (str, default: `'run'`) name of the model that is
        being used.
    :param skip_save_training_description: (bool, default: `False`) disables
        saving the description JSON file.
    :param skip_save_training_statistics: (bool, default: `False`) disables
        saving training statistics JSON file.
    :param skip_save_model: (bool, default: `False`) disables
        saving model weights and hyperparameters each time the model
        improves. By default Ludwig saves model weights after each epoch
        the validation metric improves, but if the model is really big
        that can be time consuming. If you do not want to keep
        the weights and just find out what performance a model can get
        with a set of hyperparameters, use this parameter to skip it,
        but the model will not be loadable later on and the returned model
        will have the weights obtained at the end of training, instead of
        the weights of the epoch with the best validation performance.
    :param skip_save_progress: (bool, default: `False`) disables saving
        progress each epoch. By default Ludwig saves weights and stats
        after each epoch for enabling resuming of training, but if
        the model is really big that can be time consuming and will uses
        twice as much space, use this parameter to skip it, but training
        cannot be resumed later on.
    :param skip_save_log: (bool, default: `False`) disables saving
        TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
        but if it is not needed turning it off can slightly increase the
        overall speed.
    :param skip_save_processed_input: (bool, default: `False`) if input
        dataset is provided it is preprocessed and cached by saving an HDF5
        and JSON files to avoid running the preprocessing again. If this
        parameter is `False`, the HDF5 and JSON file are not saved.
    :param skip_save_unprocessed_output: (bool, default: `False`) by default
        predictions and their probabilities are saved in both raw
        unprocessed numpy files containing tensors and as postprocessed
        CSV files (one for each output feature). If this parameter is True,
        only the CSV ones are saved and the numpy ones are skipped.
    :param skip_save_predictions: (bool, default: `False`) skips saving test
        predictions CSV files.
    :param skip_save_eval_stats: (bool, default: `False`) skips saving test
        statistics JSON file.
    :param skip_save_hyperopt_statistics: (bool, default: `False`) skips saving
        hyperopt stats file.
    :param output_directory: (str, default: `'results'`) the directory that
        will contain the training statistics, TensorBoard logs, the saved
        model and the training progress files.
    :param gpus: (list, default: `None`) list of GPUs that are available
        for training.
    :param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
        allocate per GPU device.
    :param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
        to use multithreading parallelism to improve performance at
        the cost of determinism.
    :param backend: (Union[Backend, str]) `Backend` or string name
        of backend to use to execute preprocessing / training steps.
    :param random_seed: (int: default: 42) random seed used for weights
        initialization, splits and any other random function.
    :param debug: (bool, default: `False) if `True` turns on `tfdbg` with
        `inf_or_nan` checks.

    # Return

    :return: (List[dict]) The results for the hyperparameter optimization
    """
    backend = initialize_backend(backend)

    # check if config is a path or a dict
    if isinstance(config, str):  # assume path
        with open(config, 'r') as def_file:
            config_dict = yaml.safe_load(def_file)
    else:
        config_dict = config

    # merge config with defaults
    config = merge_with_defaults(config_dict)

    if HYPEROPT not in config:
        raise ValueError("Hyperopt Section not present in config")

    hyperopt_config = config["hyperopt"]

    update_hyperopt_params_with_defaults(hyperopt_config)

    # print hyperopt config
    logger.info(pformat(hyperopt_config, indent=4))
    logger.info('\n')

    sampler = hyperopt_config["sampler"]
    executor = hyperopt_config["executor"]
    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    ######################
    # check validity of output_feature / metric/ split combination
    ######################
    if split == TRAINING:
        if training_set is None and (
                config['preprocessing']['split_probabilities'][0] <= 0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the config is not greater than 0'.format(split))
    elif split == VALIDATION:
        if validation_set is None and (
                config['preprocessing']['split_probabilities'][1] <= 0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the config is not greater than 0'.format(split))
    elif split == TEST:
        if test_set is None and (
                config['preprocessing']['split_probabilities'][2] <= 0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the config is not greater than 0'.format(split))
    else:
        raise ValueError('unrecognized hyperopt split "{}". '
                         'Please provide one of: {}'.format(
                             split, {TRAINING, VALIDATION, TEST}))
    if output_feature == COMBINED:
        if metric != LOSS:
            raise ValueError(
                'The only valid metric for "combined" output feature is "loss"'
            )
    else:
        output_feature_names = set(of['name']
                                   for of in config['output_features'])
        if output_feature not in output_feature_names:
            raise ValueError('The output feature specified for hyperopt "{}" '
                             'cannot be found in the config. '
                             'Available ones are: {} and "combined"'.format(
                                 output_feature, output_feature_names))

        output_feature_type = None
        for of in config['output_features']:
            if of['name'] == output_feature:
                output_feature_type = of[TYPE]
        feature_class = get_from_registry(output_feature_type,
                                          output_type_registry)
        if metric not in feature_class.metric_functions:
            # todo v0.4: allow users to specify also metrics from the overall
            #  and per class metrics from the trainign stats and in general
            #  and potprocessed metric
            raise ValueError(
                'The specified metric for hyperopt "{}" is not a valid metric '
                'for the specified output feature "{}" of type "{}". '
                'Available metrics are: {}'.format(
                    metric, output_feature, output_feature_type,
                    feature_class.metric_functions.keys()))

    hyperopt_sampler = get_build_hyperopt_sampler(sampler[TYPE])(goal,
                                                                 parameters,
                                                                 **sampler)

    hyperopt_executor = get_build_hyperopt_executor(executor[TYPE])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_results = hyperopt_executor.execute(
        config,
        dataset=dataset,
        training_set=training_set,
        validation_set=validation_set,
        test_set=test_set,
        training_set_metadata=training_set_metadata,
        data_format=data_format,
        experiment_name=experiment_name,
        model_name=model_name,
        # model_load_path=None,
        # model_resume_path=None,
        skip_save_training_description=skip_save_training_description,
        skip_save_training_statistics=skip_save_training_statistics,
        skip_save_model=skip_save_model,
        skip_save_progress=skip_save_progress,
        skip_save_log=skip_save_log,
        skip_save_processed_input=skip_save_processed_input,
        skip_save_unprocessed_output=skip_save_unprocessed_output,
        skip_save_predictions=skip_save_predictions,
        skip_save_eval_stats=skip_save_eval_stats,
        output_directory=output_directory,
        gpus=gpus,
        gpu_memory_limit=gpu_memory_limit,
        allow_parallel_threads=allow_parallel_threads,
        backend=backend,
        random_seed=random_seed,
        debug=debug,
        **kwargs)

    if backend.is_coordinator():
        print_hyperopt_results(hyperopt_results)

        if not skip_save_hyperopt_statistics:
            if not os.path.exists(output_directory):
                os.makedirs(output_directory)

            hyperopt_stats = {
                'hyperopt_config': hyperopt_config,
                'hyperopt_results': hyperopt_results
            }

            save_hyperopt_stats(hyperopt_stats, output_directory)
            logger.info('Hyperopt stats saved to: {}'.format(output_directory))

    logger.info('Finished hyperopt')

    return hyperopt_results
Exemplo n.º 11
0
def hyperopt(
    model_definition,
    dataset=None,
    training_set=None,
    validation_set=None,
    test_set=None,
    training_set_metadata=None,
    data_format=None,
    experiment_name="hyperopt",
    model_name="run",
    # model_load_path=None,
    # model_resume_path=None,
    skip_save_training_description=True,
    skip_save_training_statistics=True,
    skip_save_model=False,  # False because want use model best validation
    skip_save_progress=True,
    skip_save_log=True,
    skip_save_processed_input=True,
    skip_save_unprocessed_output=True,
    skip_save_predictions=True,
    skip_save_eval_stats=True,
    skip_save_hyperopt_statistics=False,
    output_directory="results",
    gpus=None,
    gpu_memory_limit=None,
    allow_parallel_threads=True,
    use_horovod=None,
    random_seed=default_random_seed,
    debug=False,
    **kwargs,
) -> dict:
    """This method performs an hyperparameter optimization.

    :param model_definition:
    :param dataset:
    :param training_set:
    :param validation_set:
    :param test_set:
    :param training_set_metadata:
    :param data_format:
    :param experiment_name:
    :param model_name:
    :param skip_save_training_description:
    :param skip_save_training_statistics:
    :param skip_save_model:
    :param skip_save_progress:
    :param skip_save_log:
    :param skip_save_processed_input:
    :param skip_save_unprocessed_output:
    :param skip_save_predictions:
    :param skip_save_eval_stats:
    :param skip_save_hyperopt_statistics:
    :param output_directory:
    :param gpus:
    :param gpu_memory_limit:
    :param allow_parallel_threads:
    :param use_horovod:
    :param random_seed:
    :param debug:
    :param kwargs:
    :return: (dict) The results fo the hyperparameter optimization
    """
    # todo refactoring: complete docstrings
    # check if model definition is a path or a dict
    if isinstance(model_definition, str):  # assume path
        with open(model_definition, 'r') as def_file:
            model_definition_dict = yaml.safe_load(def_file)
    else:
        model_definition_dict = model_definition

    # merge model definition with defaults
    model_definition = merge_with_defaults(model_definition_dict)

    if HYPEROPT not in model_definition:
        raise ValueError("Hyperopt Section not present in Model Definition")

    hyperopt_config = model_definition["hyperopt"]
    update_hyperopt_params_with_defaults(hyperopt_config)

    # print hyperopt config
    logger.info(pformat(hyperopt_config, indent=4))
    logger.info('\n')

    sampler = hyperopt_config["sampler"]
    executor = hyperopt_config["executor"]
    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    ######################
    # check validity of output_feature / metric/ split combination
    ######################
    if split == TRAINING:
        if not training_set and (
                model_definition['preprocessing']['split_probabilities'][0] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    elif split == VALIDATION:
        if not validation_set and (
                model_definition['preprocessing']['split_probabilities'][1] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    elif split == TEST:
        if not test_set and (
                model_definition['preprocessing']['split_probabilities'][2] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    else:
        raise ValueError('unrecognized hyperopt split "{}". '
                         'Please provide one of: {}'.format(
                             split, {TRAINING, VALIDATION, TEST}))
    if output_feature == COMBINED:
        if metric != LOSS:
            raise ValueError(
                'The only valid metric for "combined" output feature is "loss"'
            )
    else:
        output_feature_names = set(
            of['name'] for of in model_definition['output_features'])
        if output_feature not in output_feature_names:
            raise ValueError('The output feature specified for hyperopt "{}" '
                             'cannot be found in the model definition. '
                             'Available ones are: {} and "combined"'.format(
                                 output_feature, output_feature_names))

        output_feature_type = None
        for of in model_definition['output_features']:
            if of['name'] == output_feature:
                output_feature_type = of[TYPE]
        feature_class = get_from_registry(output_feature_type,
                                          output_type_registry)
        if metric not in feature_class.metric_functions:
            # todo v0.4: allow users to specify also metrics from the overall
            #  and per class metrics from the trainign stats and in general
            #  and potprocessed metric
            raise ValueError(
                'The specified metric for hyperopt "{}" is not a valid metric '
                'for the specified output feature "{}" of type "{}". '
                'Available metrics are: {}'.format(
                    metric, output_feature, output_feature_type,
                    feature_class.metric_functions.keys()))

    hyperopt_sampler = get_build_hyperopt_sampler(sampler[TYPE])(goal,
                                                                 parameters,
                                                                 **sampler)
    hyperopt_executor = get_build_hyperopt_executor(executor[TYPE])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_results = hyperopt_executor.execute(
        model_definition,
        dataset=dataset,
        training_set=training_set,
        validation_set=validation_set,
        test_set=test_set,
        training_set_metadata=training_set_metadata,
        data_format=data_format,
        experiment_name=experiment_name,
        model_name=model_name,
        # model_load_path=None,
        # model_resume_path=None,
        skip_save_training_description=skip_save_training_description,
        skip_save_training_statistics=skip_save_training_statistics,
        skip_save_model=skip_save_model,
        skip_save_progress=skip_save_progress,
        skip_save_log=skip_save_log,
        skip_save_processed_input=skip_save_processed_input,
        skip_save_unprocessed_output=skip_save_unprocessed_output,
        skip_save_predictions=skip_save_predictions,
        skip_save_eval_stats=skip_save_eval_stats,
        output_directory=output_directory,
        gpus=gpus,
        gpu_memory_limit=gpu_memory_limit,
        allow_parallel_threads=allow_parallel_threads,
        use_horovod=use_horovod,
        random_seed=random_seed,
        debug=debug,
        **kwargs)

    if is_on_master():
        print_hyperopt_results(hyperopt_results)

        if not skip_save_hyperopt_statistics:
            if not os.path.exists(output_directory):
                os.makedirs(output_directory)

            hyperopt_stats = {
                'hyperopt_config': hyperopt_config,
                'hyperopt_results': hyperopt_results
            }

            save_hyperopt_stats(hyperopt_stats, output_directory)
            logger.info('Hyperopt stats saved to: {}'.format(output_directory))

    logger.info('Finished hyperopt')

    return hyperopt_results
Exemplo n.º 12
0
def test_hyperopt_executor_get_metric_score():
    executor = EXECUTORS[0]
    output_feature = "of_name"
    split = 'test'

    train_stats = {
        'training': {
            output_feature: {
                'loss': [0.58760345, 1.5066891],
                'accuracy': [0.6666667, 0.33333334],
                'hits_at_k': [1.0, 1.0]
            },
            'combined': {
                'loss': [0.58760345, 1.5066891]
            }
        },
        'validation': {
            output_feature: {
                'loss': [0.30233705, 2.6505466],
                'accuracy': [1.0, 0.0],
                'hits_at_k': [1.0, 1.0]
            },
            'combined': {
                'loss': [0.30233705, 2.6505466]
            }
        },
        'test': {
            output_feature: {
                'loss': [1.0876318, 1.4353828],
                'accuracy': [0.7, 0.5],
                'hits_at_k': [1.0, 1.0]
            },
            'combined': {
                'loss': [1.0876318, 1.4353828]
            }
        }
    }

    eval_stats = {
        output_feature: {
            'loss': 1.4353828,
            'accuracy': 0.5,
            'hits_at_k': 1.0,
            'overall_stats': {
                'token_accuracy': 1.0,
                'avg_precision_macro': 1.0,
                'avg_recall_macro': 1.0,
                'avg_f1_score_macro': 1.0,
                'avg_precision_micro': 1.0,
                'avg_recall_micro': 1.0,
                'avg_f1_score_micro': 1.0,
                'avg_precision_weighted': 1.0,
                'avg_recall_weighted': 1.0,
                'avg_f1_score_weighted': 1.0,
                'kappa_score': 0.6
            },
            'combined': {
                'loss': 1.4353828
            }
        }
    }

    metric = 'loss'
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
    assert score == 1.0876318

    metric = 'accuracy'
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
    assert score == 0.7

    metric = 'overall_stats.kappa_score'
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
    assert score == 0.6
Exemplo n.º 13
0
def hyperopt(
    model_definition,
    dataset=None,
    training_set=None,
    validation_set=None,
    test_set=None,
    training_set_metadata=None,
    data_format=None,
    experiment_name="hyperopt",
    model_name="run",
    # model_load_path=None,
    # model_resume_path=None,
    skip_save_training_description=True,
    skip_save_training_statistics=True,
    skip_save_model=False,  # False because want use model best validation
    skip_save_progress=True,
    skip_save_log=True,
    skip_save_processed_input=True,
    skip_save_unprocessed_output=True,
    skip_save_predictions=True,
    skip_save_eval_stats=True,
    skip_save_hyperopt_statistics=False,
    output_directory="results",
    gpus=None,
    gpu_memory_limit=None,
    allow_parallel_threads=True,
    use_horovod=None,
    random_seed=default_random_seed,
    debug=False,
    **kwargs,
) -> dict:
    """This method performs an hyperparameter optimization.

    :param model_definition: Model definition which defines the different
           parameters of the model, features, preprocessing and training.
    :type model_definition: Dictionary
    :param dataset: Source containing the entire dataset.
           If it has a split column, it will be used for splitting (0: train,
           1: validation, 2: test), otherwise the dataset will be randomly split.
    :type dataset: Str, Dictionary, DataFrame
    :param training_set: Source containing training data.
    :type training_set: Str, Dictionary, DataFrame
    :param validation_set: Source containing validation data.
    :type validation_set: Str, Dictionary, DataFrame
    :param test_set: Source containing test data.
    :type test_set: Str, Dictionary, DataFrame
    :param training_set_metadata: Metadata JSON file or loaded metadata.
           Intermediate preprocess structure containing the mappings of the input
           CSV created the first time a CSV file is used in the same
           directory with the same name and a '.json' extension.
    :type training_set_metadata: Str, Dictionary
    :param data_format: Format to interpret data sources. Will be inferred
           automatically if not specified.
    :type data_format: Str
    :param experiment_name: The name for the experiment.
    :type experiment_name: Str
    :param model_name: Name of the model that is being used.
    :type model_name: Str
    :param skip_save_training_description: Disables saving
           the description JSON file.
    :type skip_save_training_description: Boolean
    :param skip_save_training_statistics: Disables saving
           training statistics JSON file.
    :type skip_save_training_statistics: Boolean
    :param skip_save_model: Disables
               saving model weights and hyperparameters each time the model
           improves. By default Ludwig saves model weights after each epoch
           the validation metric improves, but if the model is really big
           that can be time consuming if you do not want to keep
           the weights and just find out what performance can a model get
           with a set of hyperparameters, use this parameter to skip it,
           but the model will not be loadable later on.
    :type skip_save_model: Boolean
    :param skip_save_progress: Disables saving
           progress each epoch. By default Ludwig saves weights and stats
           after each epoch for enabling resuming of training, but if
           the model is really big that can be time consuming and will uses
           twice as much space, use this parameter to skip it, but training
           cannot be resumed later on.
    :type skip_save_progress: Boolean
    :param skip_save_log: Disables saving TensorBoard
           logs. By default Ludwig saves logs for the TensorBoard, but if it
           is not needed turning it off can slightly increase the
           overall speed..
    :type skip_save_log: Boolean
    :param skip_save_processed_input: If a CSV dataset is provided it is
           preprocessed and then saved as an hdf5 and json to avoid running
           the preprocessing again. If this parameter is False,
           the hdf5 and json file are not saved.
    :type skip_save_processed_input: Boolean
    :param skip_save_unprocessed_output: By default predictions and
           their probabilities are saved in both raw unprocessed numpy files
           containing tensors and as postprocessed CSV files
           (one for each output feature). If this parameter is True,
           only the CSV ones are saved and the numpy ones are skipped.
    :type skip_save_unprocessed_output: Boolean
    :param skip_save_predictions: skips saving test predictions CSV files
    :type skip_save_predictions: Boolean
    :param skip_save_eval_stats: skips saving test statistics JSON file
    :type skip_save_eval_stats: Boolean
    :param skip_save_hyperopt_statistics: skips saving hyperopt stats file
    :type skip_save_hyperopt_statistics: Boolean
    :param output_directory: The directory that will contain the training
           statistics, the saved model and the training progress files.
    :type output_directory: filepath (str)
    :param gpus: List of GPUs that are available for training.
    :type gpus: List
    :param gpu_memory_limit: maximum memory in MB to allocate per GPU device.
    :type gpu_memory_limit: Integer
    :param allow_parallel_threads: allow TensorFlow to use multithreading parallelism
           to improve performance at the cost of determinism.
    :type allow_parallel_threads: Boolean
    :param use_horovod: Flag for using horovod
    :type use_horovod: Boolean
    :param random_seed: Random seed used for weights initialization,
           splits and any other random function.
    :type random_seed: Integer
    :param debug: If true turns on tfdbg with inf_or_nan checks.
    :type debug: Boolean

    :return: (dict) The results fo the hyperparameter optimization
    """
    # check if model definition is a path or a dict
    if isinstance(model_definition, str):  # assume path
        with open(model_definition, 'r') as def_file:
            model_definition_dict = yaml.safe_load(def_file)
    else:
        model_definition_dict = model_definition

    # merge model definition with defaults
    model_definition = merge_with_defaults(model_definition_dict)

    if HYPEROPT not in model_definition:
        raise ValueError("Hyperopt Section not present in Model Definition")

    hyperopt_config = model_definition["hyperopt"]
    update_hyperopt_params_with_defaults(hyperopt_config)

    # print hyperopt config
    logger.info(pformat(hyperopt_config, indent=4))
    logger.info('\n')

    sampler = hyperopt_config["sampler"]
    executor = hyperopt_config["executor"]
    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    ######################
    # check validity of output_feature / metric/ split combination
    ######################
    if split == TRAINING:
        if not training_set and (
                model_definition['preprocessing']['split_probabilities'][0] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    elif split == VALIDATION:
        if not validation_set and (
                model_definition['preprocessing']['split_probabilities'][1] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    elif split == TEST:
        if not test_set and (
                model_definition['preprocessing']['split_probabilities'][2] <=
                0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                'was not provided, '
                'or the split amount specified in the preprocessing section '
                'of the model definition is not greater than 0'.format(split))
    else:
        raise ValueError('unrecognized hyperopt split "{}". '
                         'Please provide one of: {}'.format(
                             split, {TRAINING, VALIDATION, TEST}))
    if output_feature == COMBINED:
        if metric != LOSS:
            raise ValueError(
                'The only valid metric for "combined" output feature is "loss"'
            )
    else:
        output_feature_names = set(
            of['name'] for of in model_definition['output_features'])
        if output_feature not in output_feature_names:
            raise ValueError('The output feature specified for hyperopt "{}" '
                             'cannot be found in the model definition. '
                             'Available ones are: {} and "combined"'.format(
                                 output_feature, output_feature_names))

        output_feature_type = None
        for of in model_definition['output_features']:
            if of['name'] == output_feature:
                output_feature_type = of[TYPE]
        feature_class = get_from_registry(output_feature_type,
                                          output_type_registry)
        if metric not in feature_class.metric_functions:
            # todo v0.4: allow users to specify also metrics from the overall
            #  and per class metrics from the trainign stats and in general
            #  and potprocessed metric
            raise ValueError(
                'The specified metric for hyperopt "{}" is not a valid metric '
                'for the specified output feature "{}" of type "{}". '
                'Available metrics are: {}'.format(
                    metric, output_feature, output_feature_type,
                    feature_class.metric_functions.keys()))

    hyperopt_sampler = get_build_hyperopt_sampler(sampler[TYPE])(goal,
                                                                 parameters,
                                                                 **sampler)
    hyperopt_executor = get_build_hyperopt_executor(executor[TYPE])(
        hyperopt_sampler, output_feature, metric, split, **executor)

    hyperopt_results = hyperopt_executor.execute(
        model_definition,
        dataset=dataset,
        training_set=training_set,
        validation_set=validation_set,
        test_set=test_set,
        training_set_metadata=training_set_metadata,
        data_format=data_format,
        experiment_name=experiment_name,
        model_name=model_name,
        # model_load_path=None,
        # model_resume_path=None,
        skip_save_training_description=skip_save_training_description,
        skip_save_training_statistics=skip_save_training_statistics,
        skip_save_model=skip_save_model,
        skip_save_progress=skip_save_progress,
        skip_save_log=skip_save_log,
        skip_save_processed_input=skip_save_processed_input,
        skip_save_unprocessed_output=skip_save_unprocessed_output,
        skip_save_predictions=skip_save_predictions,
        skip_save_eval_stats=skip_save_eval_stats,
        output_directory=output_directory,
        gpus=gpus,
        gpu_memory_limit=gpu_memory_limit,
        allow_parallel_threads=allow_parallel_threads,
        use_horovod=use_horovod,
        random_seed=random_seed,
        debug=debug,
        **kwargs)

    if is_on_master():
        print_hyperopt_results(hyperopt_results)

        if not skip_save_hyperopt_statistics:
            if not os.path.exists(output_directory):
                os.makedirs(output_directory)

            hyperopt_stats = {
                'hyperopt_config': hyperopt_config,
                'hyperopt_results': hyperopt_results
            }

            save_hyperopt_stats(hyperopt_stats, output_directory)
            logger.info('Hyperopt stats saved to: {}'.format(output_directory))

    logger.info('Finished hyperopt')

    return hyperopt_results
Exemplo n.º 14
0
def hyperopt(
    config: Union[str, dict],
    dataset: Union[str, dict, pd.DataFrame] = None,
    training_set: Union[str, dict, pd.DataFrame] = None,
    validation_set: Union[str, dict, pd.DataFrame] = None,
    test_set: Union[str, dict, pd.DataFrame] = None,
    training_set_metadata: Union[str, dict] = None,
    data_format: str = None,
    experiment_name: str = "hyperopt",
    model_name: str = "run",
    resume: Optional[bool] = None,
    skip_save_training_description: bool = False,
    skip_save_training_statistics: bool = False,
    skip_save_model: bool = False,
    skip_save_progress: bool = False,
    skip_save_log: bool = False,
    skip_save_processed_input: bool = True,
    skip_save_unprocessed_output: bool = False,
    skip_save_predictions: bool = False,
    skip_save_eval_stats: bool = False,
    skip_save_hyperopt_statistics: bool = False,
    output_directory: str = "results",
    gpus: Union[str, int, List[int]] = None,
    gpu_memory_limit: int = None,
    allow_parallel_threads: bool = True,
    callbacks: List[Callback] = None,
    backend: Union[Backend, str] = None,
    random_seed: int = default_random_seed,
    hyperopt_log_verbosity: int = 3,
    **kwargs,
) -> HyperoptResults:
    """This method performs an hyperparameter optimization.

    # Inputs

    :param config: (Union[str, dict]) config which defines
        the different parameters of the model, features, preprocessing and
        training.  If `str`, filepath to yaml configuration file.
    :param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing the entire dataset to be used in the experiment.
        If it has a split column, it will be used for splitting (0 for train,
        1 for validation, 2 for test), otherwise the dataset will be
        randomly split.
    :param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing training data.
    :param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing validation data.
    :param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
        source containing test data.
    :param training_set_metadata: (Union[str, dict], default: `None`)
        metadata JSON file or loaded metadata.  Intermediate preprocessed
        structure containing the mappings of the input
        dataset created the first time an input file is used in the same
        directory with the same name and a '.meta.json' extension.
    :param data_format: (str, default: `None`) format to interpret data
        sources. Will be inferred automatically if not specified.  Valid
        formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`, `'feather'`,
        `'fwf'`, `'hdf5'` (cache file produced during previous training),
        `'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
        `'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
        `'stata'`, `'tsv'`.
    :param experiment_name: (str, default: `'experiment'`) name for
        the experiment.
    :param model_name: (str, default: `'run'`) name of the model that is
        being used.
    :param resume: (bool) If true, continue hyperopt from the state of the previous
        run in the output directory with the same experiment name. If false, will create
        new trials, ignoring any previous state, even if they exist in the output_directory.
        By default, will attempt to resume if there is already an existing experiment with
        the same name, and will create new trials if not.
    :param skip_save_training_description: (bool, default: `False`) disables
        saving the description JSON file.
    :param skip_save_training_statistics: (bool, default: `False`) disables
        saving training statistics JSON file.
    :param skip_save_model: (bool, default: `False`) disables
        saving model weights and hyperparameters each time the model
        improves. By default Ludwig saves model weights after each epoch
        the validation metric improves, but if the model is really big
        that can be time consuming. If you do not want to keep
        the weights and just find out what performance a model can get
        with a set of hyperparameters, use this parameter to skip it,
        but the model will not be loadable later on and the returned model
        will have the weights obtained at the end of training, instead of
        the weights of the epoch with the best validation performance.
    :param skip_save_progress: (bool, default: `False`) disables saving
        progress each epoch. By default Ludwig saves weights and stats
        after each epoch for enabling resuming of training, but if
        the model is really big that can be time consuming and will uses
        twice as much space, use this parameter to skip it, but training
        cannot be resumed later on.
    :param skip_save_log: (bool, default: `False`) disables saving
        TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
        but if it is not needed turning it off can slightly increase the
        overall speed.
    :param skip_save_processed_input: (bool, default: `False`) if input
        dataset is provided it is preprocessed and cached by saving an HDF5
        and JSON files to avoid running the preprocessing again. If this
        parameter is `False`, the HDF5 and JSON file are not saved.
    :param skip_save_unprocessed_output: (bool, default: `False`) by default
        predictions and their probabilities are saved in both raw
        unprocessed numpy files containing tensors and as postprocessed
        CSV files (one for each output feature). If this parameter is True,
        only the CSV ones are saved and the numpy ones are skipped.
    :param skip_save_predictions: (bool, default: `False`) skips saving test
        predictions CSV files.
    :param skip_save_eval_stats: (bool, default: `False`) skips saving test
        statistics JSON file.
    :param skip_save_hyperopt_statistics: (bool, default: `False`) skips saving
        hyperopt stats file.
    :param output_directory: (str, default: `'results'`) the directory that
        will contain the training statistics, TensorBoard logs, the saved
        model and the training progress files.
    :param gpus: (list, default: `None`) list of GPUs that are available
        for training.
    :param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
        allocate per GPU device.
    :param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
        to use multithreading parallelism to improve performance at
        the cost of determinism.
    :param callbacks: (list, default: `None`) a list of
        `ludwig.callbacks.Callback` objects that provide hooks into the
        Ludwig pipeline.
    :param backend: (Union[Backend, str]) `Backend` or string name
        of backend to use to execute preprocessing / training steps.
    :param random_seed: (int: default: 42) random seed used for weights
        initialization, splits and any other random function.
    :param hyperopt_log_verbosity: (int: default: 3) controls verbosity of
        ray tune log messages.  Valid values: 0 = silent, 1 = only status updates,
        2 = status and brief trial results, 3 = status and detailed trial results.

    # Return

    :return: (List[dict]) List of results for each trial, ordered by
        descending performance on the target metric.
    """
    from ludwig.hyperopt.execution import get_build_hyperopt_executor, RayTuneExecutor

    # check if config is a path or a dict
    if isinstance(config, str):  # assume path
        with open_file(config, "r") as def_file:
            config_dict = yaml.safe_load(def_file)
    else:
        config_dict = config

    # Get mapping of input/output features that don't have an encoder for shared parameters
    features_eligible_for_shared_params = {
        INPUT_FEATURES:
        get_features_eligible_for_shared_params(config_dict, INPUT_FEATURES),
        OUTPUT_FEATURES:
        get_features_eligible_for_shared_params(config_dict, OUTPUT_FEATURES),
    }

    # merge config with defaults
    config = merge_with_defaults(config_dict)

    if HYPEROPT not in config:
        raise ValueError("Hyperopt Section not present in config")

    hyperopt_config = config[HYPEROPT]

    update_hyperopt_params_with_defaults(hyperopt_config)

    # print hyperopt config
    logging.info("Hyperopt config")
    logging.info(pformat(hyperopt_config, indent=4))
    logging.info("\n")

    logging.info(
        "Features that may be updated in hyperopt trials if default parameters are specified in the search space"
    )
    logging.info(pformat(dict(features_eligible_for_shared_params), indent=4))
    logging.info("\n")

    search_alg = hyperopt_config["search_alg"]
    executor = hyperopt_config[EXECUTOR]
    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]

    ######################
    # check validity of output_feature / metric/ split combination
    ######################
    splitter = get_splitter(**config[PREPROCESSING]["split"])
    if split == TRAINING:
        if training_set is None and not splitter.has_split(0):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                "was not provided, "
                "or the split amount specified in the preprocessing section "
                "of the config is not greater than 0".format(split))
    elif split == VALIDATION:
        if validation_set is None and not splitter.has_split(1):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                "was not provided, "
                "or the split amount specified in the preprocessing section "
                "of the config is not greater than 0".format(split))
    elif split == TEST:
        if test_set is None and not splitter.has_split(2):
            raise ValueError(
                'The data for the specified split for hyperopt "{}" '
                "was not provided, "
                "or the split amount specified in the preprocessing section "
                "of the config is not greater than 0".format(split))
    else:
        raise ValueError('unrecognized hyperopt split "{}". '
                         "Please provide one of: {}".format(
                             split, {TRAINING, VALIDATION, TEST}))
    if output_feature == COMBINED:
        if metric != LOSS:
            raise ValueError(
                'The only valid metric for "combined" output feature is "loss"'
            )
    else:
        output_feature_names = {of[NAME] for of in config[OUTPUT_FEATURES]}
        if output_feature not in output_feature_names:
            raise ValueError('The output feature specified for hyperopt "{}" '
                             "cannot be found in the config. "
                             'Available ones are: {} and "combined"'.format(
                                 output_feature, output_feature_names))

        output_feature_type = None
        for of in config[OUTPUT_FEATURES]:
            if of[NAME] == output_feature:
                output_feature_type = of[TYPE]
        feature_class = get_from_registry(output_feature_type,
                                          output_type_registry)
        if metric not in feature_class.metric_functions:
            # todo v0.4: allow users to specify also metrics from the overall
            #  and per class metrics from the trainign stats and in general
            #  and potprocessed metric
            raise ValueError(
                'The specified metric for hyperopt "{}" is not a valid metric '
                'for the specified output feature "{}" of type "{}". '
                "Available metrics are: {}".format(
                    metric, output_feature, output_feature_type,
                    feature_class.metric_functions.keys()))

    hyperopt_executor = get_build_hyperopt_executor(executor[TYPE])(
        parameters,
        output_feature,
        metric,
        goal,
        split,
        search_alg=search_alg,
        **executor)

    # Explicitly default to a local backend to avoid picking up Ray or Horovod
    # backend from the environment.
    backend = backend or config_dict.get("backend") or "local"
    backend = initialize_backend(backend)
    if not (isinstance(backend, LocalBackend) or
            (isinstance(hyperopt_executor, RayTuneExecutor)
             and isinstance(backend, RayBackend))):
        raise ValueError(
            "Hyperopt requires using a `local` backend at this time, or "
            "`ray` backend with `ray` executor.")

    for callback in callbacks or []:
        callback.on_hyperopt_init(experiment_name)

    if not should_tune_preprocessing(config):
        # preprocessing is not being tuned, so generate it once before starting trials
        for callback in callbacks or []:
            callback.on_hyperopt_preprocessing_start(experiment_name)

        model = LudwigModel(
            config=config,
            backend=backend,
            gpus=gpus,
            gpu_memory_limit=gpu_memory_limit,
            allow_parallel_threads=allow_parallel_threads,
            callbacks=callbacks,
        )

        training_set, validation_set, test_set, training_set_metadata = model.preprocess(
            dataset=dataset,
            training_set=training_set,
            validation_set=validation_set,
            test_set=test_set,
            training_set_metadata=training_set_metadata,
            data_format=data_format,
            skip_save_processed_input=skip_save_processed_input,
            random_seed=random_seed,
        )
        dataset = None

        for callback in callbacks or []:
            callback.on_hyperopt_preprocessing_end(experiment_name)

    for callback in callbacks or []:
        callback.on_hyperopt_start(experiment_name)

    hyperopt_results = hyperopt_executor.execute(
        config,
        dataset=dataset,
        training_set=training_set,
        validation_set=validation_set,
        test_set=test_set,
        training_set_metadata=training_set_metadata,
        data_format=data_format,
        experiment_name=experiment_name,
        model_name=model_name,
        resume=resume,
        skip_save_training_description=skip_save_training_description,
        skip_save_training_statistics=skip_save_training_statistics,
        skip_save_model=skip_save_model,
        skip_save_progress=skip_save_progress,
        skip_save_log=skip_save_log,
        skip_save_processed_input=skip_save_processed_input,
        skip_save_unprocessed_output=skip_save_unprocessed_output,
        skip_save_predictions=skip_save_predictions,
        skip_save_eval_stats=skip_save_eval_stats,
        output_directory=output_directory,
        gpus=gpus,
        gpu_memory_limit=gpu_memory_limit,
        allow_parallel_threads=allow_parallel_threads,
        callbacks=callbacks,
        backend=backend,
        random_seed=random_seed,
        hyperopt_log_verbosity=hyperopt_log_verbosity,
        features_eligible_for_shared_params=features_eligible_for_shared_params,
        **kwargs,
    )

    if backend.is_coordinator():
        print_hyperopt_results(hyperopt_results)

        if not skip_save_hyperopt_statistics:
            results_directory = os.path.join(output_directory, experiment_name)
            makedirs(results_directory, exist_ok=True)

            hyperopt_stats = {
                "hyperopt_config":
                hyperopt_config,
                "hyperopt_results":
                [t.to_dict() for t in hyperopt_results.ordered_trials],
            }

            save_hyperopt_stats(hyperopt_stats, results_directory)
            logging.info(f"Hyperopt stats saved to: {results_directory}")

    for callback in callbacks or []:
        callback.on_hyperopt_end(experiment_name)
        callback.on_hyperopt_finish(experiment_name)

    logging.info("Finished hyperopt")

    return hyperopt_results
Exemplo n.º 15
0
def test_hyperopt_executor_get_metric_score():
    executor = EXECUTORS[0]
    output_feature = "of_name"
    split = "test"

    train_stats = {
        "training": {
            output_feature: {
                "loss": [0.58760345, 1.5066891],
                "accuracy": [0.6666667, 0.33333334],
                "hits_at_k": [1.0, 1.0],
            },
            "combined": {
                "loss": [0.58760345, 1.5066891]
            },
        },
        "validation": {
            output_feature: {
                "loss": [0.30233705, 2.6505466],
                "accuracy": [1.0, 0.0],
                "hits_at_k": [1.0, 1.0]
            },
            "combined": {
                "loss": [0.30233705, 2.6505466]
            },
        },
        "test": {
            output_feature: {
                "loss": [1.0876318, 1.4353828],
                "accuracy": [0.7, 0.5],
                "hits_at_k": [1.0, 1.0]
            },
            "combined": {
                "loss": [1.0876318, 1.4353828]
            },
        },
    }

    eval_stats = {
        output_feature: {
            "loss": 1.4353828,
            "accuracy": 0.5,
            "hits_at_k": 1.0,
            "overall_stats": {
                "token_accuracy": 1.0,
                "avg_precision_macro": 1.0,
                "avg_recall_macro": 1.0,
                "avg_f1_score_macro": 1.0,
                "avg_precision_micro": 1.0,
                "avg_recall_micro": 1.0,
                "avg_f1_score_micro": 1.0,
                "avg_precision_weighted": 1.0,
                "avg_recall_weighted": 1.0,
                "avg_f1_score_weighted": 1.0,
                "kappa_score": 0.6,
            },
            "combined": {
                "loss": 1.4353828
            },
        }
    }

    metric = "loss"
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
    assert score == 1.0876318

    metric = "accuracy"
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
    assert score == 0.7

    metric = "overall_stats.kappa_score"
    hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
        None, output_feature, metric, split, **executor)
    score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
    assert score == 0.6
Exemplo n.º 16
0
def test_hyperopt_scheduler(scheduler,
                            csv_filename,
                            tmpdir,
                            ray_cluster,
                            validate_output_feature=False,
                            validation_metric=None):
    config, rel_path = _setup_ludwig_config(csv_filename)

    hyperopt_config = HYPEROPT_CONFIG.copy()

    # finalize hyperopt config settings
    if scheduler == "pb2":
        # setup scheduler hyperparam_bounds parameter
        min = hyperopt_config["parameters"]["trainer.learning_rate"]["lower"]
        max = hyperopt_config["parameters"]["trainer.learning_rate"]["upper"]
        hyperparam_bounds = {
            "trainer.learning_rate": [min, max],
        }
        hyperopt_config["executor"]["scheduler"] = {
            "type": scheduler,
            "hyperparam_bounds": hyperparam_bounds,
        }
    else:
        hyperopt_config["executor"]["scheduler"] = {
            "type": scheduler,
        }

    if validate_output_feature:
        hyperopt_config["output_feature"] = config[OUTPUT_FEATURES][0][NAME]
    if validation_metric:
        hyperopt_config["validation_metric"] = validation_metric

    update_hyperopt_params_with_defaults(hyperopt_config)

    parameters = hyperopt_config["parameters"]
    split = hyperopt_config["split"]
    output_feature = hyperopt_config["output_feature"]
    metric = hyperopt_config["metric"]
    goal = hyperopt_config["goal"]
    executor = hyperopt_config["executor"]
    search_alg = hyperopt_config["search_alg"]

    # TODO: Determine if we still need this if-then-else construct
    if search_alg["type"] in {""}:
        with pytest.raises(ImportError):
            get_build_hyperopt_executor(RAY)(parameters,
                                             output_feature,
                                             metric,
                                             goal,
                                             split,
                                             search_alg=search_alg,
                                             **executor)
    else:
        hyperopt_executor = get_build_hyperopt_executor(RAY)(
            parameters,
            output_feature,
            metric,
            goal,
            split,
            search_alg=search_alg,
            **executor)
        raytune_results = hyperopt_executor.execute(config,
                                                    dataset=rel_path,
                                                    output_directory=tmpdir)
        assert isinstance(raytune_results, RayTuneResults)