Пример #1
0
def test_features_features_list_exclude_transformed(
        data_classification_balanced, feature_descriptor, transformed_features
):
    """Testing if returning feature list with transformed columns excluded works properly."""
    col_list = ["AgeGroup", "bool", "Height", "Price", "Product", "Sex", "Target"]
    X, y = data_classification_balanced

    f = Features(X, y, feature_descriptor, transformed_features)
    actual_result = f.features(exclude_transformed=True)
    if transformed_features:
        expected_result = [feature for feature in col_list if feature not in transformed_features]
    else:
        expected_result = col_list

    assert actual_result == expected_result
Пример #2
0
def test_features_features_list_no_target(
        data_classification_balanced, feature_descriptor_type, feature_descriptor, feature_descriptor_forced_categories
):
    """Testing if .features() returns correct values when drop_target = True (without Target feature name)."""
    expected = ["AgeGroup", "bool", "Height", "Price", "Product", "Sex"]
    X, y = data_classification_balanced

    # couldn't find a way to incorporate fixtures into @pytest.mark.parametrize
    if feature_descriptor_type == "normal":
        fd = feature_descriptor
    elif feature_descriptor_type == "forced":
        fd = feature_descriptor_forced_categories
    else:
        raise

    f = Features(X, y, fd)
    actual = f.features(drop_target=True)

    assert actual == expected