コード例 #1
0
def test_model_comparison_give_non_null_performance_with_regression_and_categorical_feature(
):
    # Given
    cross_validation_n_folds = 2
    features = pd.DataFrame({
        "string_feature":
        list(
            np.random.choice(["Paris", "London", "Madrid", "Roma"],
                             n_samples - 1)) + [None],
    })
    comparison_dataset = ComparisonDataset(TaskName.REGRESSION, features,
                                           numerical_target,
                                           cross_validation_n_folds)

    # When
    model_comparison = TunedModelComparison(comparison_dataset,
                                            max_parameters_to_test_in_tuning=5,
                                            early_stopping_patience=1)
    comparison = model_comparison.get_models_scores_and_training_time()

    # Then
    for model_name, performance_and_training_time in comparison.items():
        performance = performance_and_training_time[MODEL_SCORE]
        assert_that(~np.isnan(performance),
                    reason=f"Null performance value for model {model_name}")
def test_model_comparison_give_non_null_performance_with_classification():
    # Given
    features = pd.DataFrame(
        {"numeric_feature": np.random.normal(size=n_samples)})
    comparison_dataset = ComparisonDataset(TaskName.CLASSIFICATION, features,
                                           categorical_target,
                                           cross_validation_n_folds)

    # When
    comparison = ModelComparison(
        comparison_dataset).get_models_scores_and_training_time()

    # Then
    for model_name, performance_and_training_time in comparison.items():
        performance = performance_and_training_time[MODEL_SCORE]
        assert_that(~np.isnan(performance),
                    reason=f"Null performance value for model {model_name}")
コード例 #3
0
def test_model_comparison_give_non_null_performance_with_regression_and_numerial_feature(
):
    # Given
    cross_validation_n_folds = 2
    features = pd.DataFrame(
        {"numeric_feature": np.random.normal(size=n_samples)})
    comparison_dataset = ComparisonDataset(TaskName.REGRESSION, features,
                                           numerical_target,
                                           cross_validation_n_folds)

    # When
    model_comparison = TunedModelComparison(comparison_dataset,
                                            max_parameters_to_test_in_tuning=5,
                                            early_stopping_patience=1)
    comparison = model_comparison.get_models_scores_and_training_time()

    # Then
    for model_name, performance_and_training_time in comparison.items():
        performance = performance_and_training_time[MODEL_SCORE]
        assert_that(~np.isnan(performance),
                    reason=f"Null performance value for model {model_name}")
def test_model_comparison_give_non_null_performance_and_categorical_feature():
    # Given
    features = pd.DataFrame({
        "string_feature":
        np.random.choice(["Paris", "London", "Madrid", "Roma"], n_samples),
        "numeric_feature":
        np.random.normal(size=n_samples)
    })
    comparison_dataset = ComparisonDataset(TaskName.REGRESSION, features,
                                           categorical_target,
                                           cross_validation_n_folds)

    # When
    comparison = ModelComparison(
        comparison_dataset).get_models_scores_and_training_time()

    # Then
    for model_name, performance_and_training_time in comparison.items():
        performance = performance_and_training_time[MODEL_SCORE]
        assert_that(~np.isnan(performance),
                    reason=f"Null performance value for model {model_name}")