コード例 #1
0
def test_TargetEncoderRegressor(cv, noise_level):
    df = get_sample_df(100)
    df["cat_col"] = df["text_col"].apply(lambda s: s[0:3])
    np.random.seed(123)
    y = np.random.randn(100)

    encoder = TargetEncoderRegressor(noise_level=noise_level, cv=cv)
    encoder.fit(df, y)
    res = encoder.transform(df)

    assert encoder.get_feature_names() == ["float_col", "int_col", "text_col", "cat_col__target_mean"]
    assert list(res.columns) == ["float_col", "int_col", "text_col", "cat_col__target_mean"]
    assert res["cat_col__target_mean"].isnull().sum() == 0
    assert (res.index == df.index).all()
    assert encoder._columns_informations["input_columns"] == ["cat_col"]

    temp = pd.DataFrame({"cat_col": df["cat_col"], "cat_col__target_mean": res["cat_col__target_mean"]})
    assert temp.groupby("cat_col")["cat_col__target_mean"].std().max() == 0

    encoder = TargetEncoderRegressor(noise_level=noise_level, cv=cv)
    res = encoder.fit_transform(df, y)

    assert encoder.get_feature_names() == ["float_col", "int_col", "text_col", "cat_col__target_mean"]
    assert list(res.columns) == ["float_col", "int_col", "text_col", "cat_col__target_mean"]
    assert res["cat_col__target_mean"].isnull().sum() == 0
    assert (res.index == df.index).all()
    assert encoder._columns_informations["input_columns"] == ["cat_col"]
コード例 #2
0
def test_NumImputer_output_type():
    df = get_sample_df(100, seed=123)

    # with type float64
    df["float_col"].astype("float64")
    imp = NumImputer()
    Xenc = imp.fit_transform(df)

    assert Xenc.dtypes["float_col"] == df.dtypes["float_col"]

    # with type float32
    df2 = df.copy()
    df2["float_col"] = df2["float_col"].astype("float32")
    imp = NumImputer()
    Xenc = imp.fit_transform(df2)

    assert Xenc.dtypes["float_col"] == df2.dtypes["float_col"]

    # with type float64
    df["float_col"].astype("float64")
    df.loc[0, "float_col"] = np.nan

    imp = NumImputer()
    Xenc = imp.fit_transform(df)

    assert Xenc.dtypes["float_col"] == df.dtypes["float_col"]

    # with type float32
    df2 = df.copy()
    df2["float_col"] = df2["float_col"].astype("float32")
    df2.loc[0, "float_col"] = np.nan
    imp = NumImputer()
    Xenc = imp.fit_transform(df2)

    assert Xenc.dtypes["float_col"] == df2.dtypes["float_col"]
コード例 #3
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_default_and_null_values():
    np.random.seed(123)
    df = get_sample_df(100, seed=123)
    df.index = np.arange(len(df))

    df["cat_col_1"] = df["text_col"].apply(lambda s: s[0:3])
    df.loc[0:10, "cat_col_1"] = None

    # All modalities are kept, __null__ category is created
    encoder = NumericalEncoder(encoding_type="num",
                               min_modalities_number=2,
                               max_cum_proba=0.8,
                               max_na_percentage=0)

    res = encoder.fit_transform(df)
    assert "__default__" in encoder.model.variable_modality_mapping[
        "cat_col_1"]
    assert "__null__" in encoder.model.variable_modality_mapping["cat_col_1"]

    df["cat_col_1"] = "zzz"  # Never seen value
    res = encoder.transform(df)
    assert res["cat_col_1"].unique(
    )[0] == encoder.model.variable_modality_mapping["cat_col_1"]["__default__"]

    df["cat_col_1"] = None
    res = encoder.transform(df)
    assert res["cat_col_1"].unique(
    )[0] == encoder.model.variable_modality_mapping["cat_col_1"]["__null__"]
コード例 #4
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_nothing_to_do():
    df = get_sample_df(100)[["float_col", "int_col"]]

    encoder = NumericalEncoder()
    df_transformed = encoder.fit_transform(df)

    assert (df.values == df_transformed.values).all().all()
    assert (df.dtypes == df_transformed.dtypes).all()
コード例 #5
0
def test_NumImputer_mixtype():
    df = get_sample_df(100, seed=123)
    df.loc[[2, 10, 50], "float_col"] = ["string", "string", "string"]

    imp = _NumImputer()

    Xenc = imp.fit_transform(df)

    assert _index_with_number(Xenc["float_col"]).all()
    assert not (Xenc.dtypes == "O").any()
コード例 #6
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_int_as_cat():
    df = get_sample_df(100)[["float_col", "int_col"]]
    df["int_cat"] = np.random.choice((0, 1, 2), 100)
    df["int_cat"] = df["int_cat"].astype("category")

    encoder = NumericalEncoder()
    df_transformed = encoder.fit_transform(df)

    assert "int_cat" not in df_transformed.columns
    assert df["int_cat"].nunique() + 2 == df_transformed.shape[1]
    assert df.loc[df["int_cat"] == 1,
                  "int_cat"].shape[0] == (df["int_cat"] == 1).sum()
コード例 #7
0
def test_NumericalEncoder_int_as_cat():
    df = get_sample_df(100)[['float_col', 'int_col']]
    df['int_cat'] = np.random.choice((0, 1, 2), 100)
    df['int_cat'] = df['int_cat'].astype('category')

    encoder = NumericalEncoder()
    df_transformed = encoder.fit_transform(df)

    assert 'int_cat' not in df_transformed.columns
    assert df['int_cat'].nunique() + 2 == df_transformed.shape[1]
    assert df.loc[df['int_cat'] == 1,
                  'int_cat'].shape[0] == (df['int_cat'] == 1).sum()
コード例 #8
0
def test_Word2VecVectorizer():
    df = get_sample_df(size=200, seed=123)

    ### default mode : 'drop' ##"
    vect = Word2VecVectorizer(columns_to_use=["text_col"], window=100)
    vect.fit(df)

    Xres = vect.transform(df)

    assert Xres.shape == (200, 100)
    assert not pd.isnull(Xres).any().any()
    assert vect.get_feature_names() == [
        "text_col__EMB__%d" % i for i in range(100)
    ]
    assert list(Xres.columns) == vect.get_feature_names()

    ### keep mode ###
    vect = Word2VecVectorizer(columns_to_use=["text_col"],
                              window=100,
                              drop_unused_columns=False,
                              drop_used_columns=False)
    vect.fit(df)

    Xres = vect.transform(df)

    assert Xres.shape == (200, 100 + df.shape[1])
    assert not pd.isnull(Xres).any().any()
    assert vect.get_feature_names() == list(
        df.columns) + ["text_col__EMB__%d" % i for i in range(100)]
    assert list(Xres.columns) == vect.get_feature_names()

    cols = [c for c in list(df.columns) if c in list(Xres.columns)]
    assert (Xres.loc[:, cols] == df.loc[:, cols]).all().all()

    ### delta mode ###
    vect = Word2VecVectorizer(columns_to_use=["text_col"],
                              window=100,
                              drop_unused_columns=False,
                              drop_used_columns=True)
    vect.fit(df)

    Xres = vect.transform(df)

    assert Xres.shape == (200, 100 + df.shape[1] - 1)
    assert not pd.isnull(Xres).any().any()
    assert vect.get_feature_names() == [
        c for c in list(df.columns) if c != "text_col"
    ] + ["text_col__EMB__%d" % i for i in range(100)]
    assert list(Xres.columns) == vect.get_feature_names()

    cols = [c for c in list(df.columns) if c in list(Xres.columns)]
    assert (Xres.loc[:, cols] == df.loc[:, cols]).all().all()
コード例 #9
0
def test_guess_type_of_variable():
    df = get_sample_df(100)
    df["cat_col_1"] = df["text_col"].apply(lambda s: s[0:3])

    assert guess_type_of_variable(df["float_col"]) == "NUM"
    assert guess_type_of_variable(df["int_col"]) == "NUM"
    assert guess_type_of_variable(df["text_col"]) == "TEXT"
    assert guess_type_of_variable(df["cat_col_1"]) == "CAT"

    df_with_cat = df.copy()
    df_with_cat["cat_col_1"] = df_with_cat["cat_col_1"].astype("category")
    assert np.all([guess_type_of_variable(df[col]) == guess_type_of_variable(df_with_cat[col]) for col in df.columns])
    assert (df.values == df_with_cat.values).all()
コード例 #10
0
def test_target_encoder_with_cat_dtypes():
    np.random.seed(123)
    X = get_sample_df(100)
    X["cat_col_1"] = X["text_col"].apply(lambda s: s[0:3])
    y = 1 * (np.random.randn(100) > 0)

    encoder = TargetEncoderClassifier()
    X_no_cat_dtype_encoded = encoder.fit_transform(X, y)

    X_cat_dtype = X.copy()
    X_cat_dtype["cat_col_1"] = X_cat_dtype["cat_col_1"].astype("category")
    X_with_cat_dtype_encoded = encoder.fit_transform(X_cat_dtype, y)

    assert (X_with_cat_dtype_encoded == X_no_cat_dtype_encoded).all().all()
    assert (X_with_cat_dtype_encoded.dtypes == X_no_cat_dtype_encoded.dtypes).all()
コード例 #11
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_dummy_output_dtype():
    np.random.seed(123)
    df = get_sample_df(100, seed=123)
    ind = np.arange(len(df))
    df.index = ind

    df["cat_col_1"] = df["text_col"].apply(lambda s: s[0:3])
    df["cat_col_2"] = df["text_col"].apply(lambda s: s[3:6])

    encoder = NumericalEncoder(encoding_type="dummy")
    encoder.fit(df)
    res = encoder.transform(df)

    assert (res.dtypes[res.columns.str.startswith("cat_col_")] == "int32"
            ).all()  # check default encoding type = int32
コード例 #12
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_encode_int():
    df = get_sample_df(100)[["float_col"]]
    df["int_col"] = np.random.choice((0, 1, 2), 100)

    encoder = NumericalEncoder(columns_to_use=["int_col"])
    df_transformed = encoder.fit_transform(df)

    df_copy = df.copy()
    df_copy["int_col"] = df_copy["int_col"].astype("category")

    encoder_2 = NumericalEncoder()
    df_copy_transformed = encoder_2.fit_transform(df_copy)

    assert (df_transformed.values == df_copy_transformed.values).all().all()
    assert (df_transformed.dtypes == df_copy_transformed.dtypes).all()
    assert df_transformed.shape[1] == 1 + df["int_col"].nunique()
コード例 #13
0
def test_NumericalEncoder_with_cat_dtypes():
    X = get_sample_df(100)
    X["cat_col_1"] = X["text_col"].apply(lambda s: s[0:3])
    y = 1 * (np.random.randn(100) > 0)
    np.random.seed(123)

    encoder = NumericalEncoder()
    X_no_cat_dtype_encoded = encoder.fit_transform(X)

    X_cat_dtype = X.copy()
    X_cat_dtype['cat_col_1'] = X_cat_dtype['cat_col_1'].astype('category')
    X_with_cat_dtype_encoded = encoder.fit_transform(X_cat_dtype)

    assert (X_with_cat_dtype_encoded == X_no_cat_dtype_encoded).all().all()
    assert (X_with_cat_dtype_encoded.dtypes == X_no_cat_dtype_encoded.dtypes
            ).all()
コード例 #14
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_with_cat_dtypes():
    np.random.seed(123)
    X = get_sample_df(100)
    X["cat_col_1"] = X["text_col"].apply(lambda s: s[0:3])

    encoder = NumericalEncoder(columns_to_use=["cat_col_1"])
    X_no_cat_dtype_encoded = encoder.fit_transform(X)

    X_cat_dtype = X.copy()
    X_cat_dtype["cat_col_1"] = X_cat_dtype["cat_col_1"].astype("category")
    X_with_cat_dtype_encoded = encoder.fit_transform(X_cat_dtype)

    assert X_with_cat_dtype_encoded.shape == X_no_cat_dtype_encoded.shape
    assert (X_with_cat_dtype_encoded == X_no_cat_dtype_encoded).all().all()
    assert (X_with_cat_dtype_encoded.dtypes == X_no_cat_dtype_encoded.dtypes
            ).all()
コード例 #15
0
ファイル: test_categories.py プロジェクト: mabdelsayed/aikit
def test_NumericalEncoder_num_output_dtype():
    np.random.seed(123)
    df = get_sample_df(100, seed=123)
    ind = np.arange(len(df))
    df.index = ind

    np.random.shuffle(ind)
    df["cat_col_1"] = df["text_col"].apply(lambda s: s[0:3])
    df["cat_col_2"] = df["text_col"].apply(lambda s: s[3:6])

    encoder = NumericalEncoder(encoding_type="num")
    encoder.fit(df)
    res = encoder.transform(df)

    assert res.dtypes["cat_col_1"] == "int32"
    assert res.dtypes["cat_col_2"] == "int32"
コード例 #16
0
ファイル: test_base.py プロジェクト: mabdelsayed/aikit
def test_NumImputer_is_picklable():
    df = get_sample_df(100, seed=123)
    df.loc[[2, 10, 50], "float_col"] = np.nan

    imputer = NumImputer()
    _ = imputer.fit_transform(df)

    pickled_imputer = pickle.dumps(imputer)

    unpickled_imputer = pickle.loads(pickled_imputer)

    assert type(unpickled_imputer) == type(imputer)
    X1 = imputer.transform(df)
    X2 = unpickled_imputer.transform(df)

    assert X1.shape == X2.shape
    assert (X1 == X2).all().all()
コード例 #17
0
ファイル: test_text.py プロジェクト: gheeraej/aikit
def test_CountVectorizerWrapper_on_Serie():

    df = get_sample_df(size=100, seed=123)

    X = df["text_col"]
    vect = CountVectorizerWrapper()

    Xres = vect.fit_transform(X)

    assert len(Xres.shape) == 2
    assert Xres.shape[0] == X.shape[0]
    assert Xres.shape[1] == len(vect.get_feature_names())

    Xres = vect.transform(X)
    assert len(Xres.shape) == 2
    assert Xres.shape[0] == X.shape[0]
    assert Xres.shape[1] == len(vect.get_feature_names())
コード例 #18
0
def test_TargetEncoderClassifier_is_picklable():
    df = get_sample_df(100)
    df["cat_col"] = df["text_col"].apply(lambda s: s[0:3])
    np.random.seed(123)
    y = 1 * (np.random.randn(100) > 0)

    encoder = TargetEncoderClassifier(cv=2)
    encoder.fit(df, y)

    pickled_encoder = pickle.dumps(encoder)
    unpickled_encoder = pickle.loads(pickled_encoder)
    
    assert type(unpickled_encoder) == type(encoder)
    X1 = encoder.transform(df)
    X2 = unpickled_encoder.transform(df)
    
    assert X1.shape == X2.shape
    assert (X1 == X2).all().all()
コード例 #19
0
def test_NumericalEncoder_num():

    ######################
    ### Numerical Mode ###
    ######################

    np.random.seed(123)
    df = get_sample_df(100, seed=123)
    ind = np.arange(len(df))
    df.index = ind

    np.random.shuffle(ind)
    df["cat_col_1"] = df["text_col"].apply(lambda s: s[0:3])
    df["cat_col_2"] = df["text_col"].apply(lambda s: s[3:6])

    encoder = NumericalEncoder(encoding_type="num")
    encoder.fit(df)
    res = encoder.transform(df)

    assert res.shape == df.shape
    assert (res.index == df.index).all()

    assert encoder.get_feature_names() == encoder.model._feature_names
    assert encoder.get_feature_names() == list(res.columns)

    df2 = df.copy()
    df2.loc[0, "cat_col_1"] = "something-new"
    df2.loc[1, "cat_col_2"] = None  # Something None

    res2 = encoder.transform(df2)
    assert res2.loc[0, "cat_col_1"] == -1
    assert res2.loc[1, "cat_col_2"] == -1

    df_with_none = df.copy()
    df_with_none["cat_col_3"] = df_with_none["cat_col_1"]
    df_with_none.loc[list(range(25)), "cat_col_3"] = None

    encoder2 = NumericalEncoder(encoding_type="num")
    res2 = encoder2.fit_transform(df_with_none)

    assert (df_with_none["cat_col_3"].isnull() == (
        res2["cat_col_3"] == 0)).all()
コード例 #20
0
def test_FeaturesSelectorClassifier_get_feature_names():

    vect = CountVectorizer(analyzer="char", ngram_range=(1, 3))

    df = get_sample_df(100, seed=123)
    xx = vect.fit_transform(df["text_col"])
    y = 1 * (np.random.rand(xx.shape[0]) > 0.5)

    sel = FeaturesSelectorClassifier(n_components=10)
    sel.fit_transform(xx, y)

    ff0 = vect.get_feature_names()
    ff1 = sel.get_feature_names()

    assert len(diff(ff1, list(range(xx.shape[1])))) == 0

    ff2 = sel.get_feature_names(input_features=ff0)

    assert len(ff1) == len(ff2)

    for f1, f2 in zip(ff1, ff2):
        assert ff0[f1] == f2
コード例 #21
0
ファイル: test_text.py プロジェクト: gheeraej/aikit
def test_CountVectorizerWrapper():

    df = get_sample_df(size=100, seed=123)

    vect = CountVectorizerWrapper(columns_to_use=["text_col"])
    vect.fit(df)

    cols = vect.get_feature_names()
    for c in cols:
        assert c.startswith("text_col__BAG")

    vect = CountVectorizerWrapper(columns_to_use=[2])
    vect.fit(df)

    cols = vect.get_feature_names()
    for c in cols:
        assert c.startswith("text_col__BAG")

    X = df.values
    vect = CountVectorizerWrapper(columns_to_use=[2])
    vect.fit(X)
    cols = vect.get_feature_names()
    for c in cols:
        assert c.startswith("2__BAG")
コード例 #22
0
ファイル: test_base.py プロジェクト: mabdelsayed/aikit
def test_TruncatedSVDWrapper():

    df = get_sample_df(100, seed=123)
    cols = []
    for j in range(10):
        cols.append("num_col_%d" % j)
        df["num_col_%d" % j] = np.random.randn(df.shape[0])

    # 1) regular case : drop other columns
    svd = TruncatedSVDWrapper(n_components=5, columns_to_use=cols)
    res1 = svd.fit_transform(df)

    assert res1.shape == (100, 5)
    assert get_type(res1) == DataTypes.DataFrame
    assert list(res1.columns) == ["SVD__%d" % j for j in range(5)]
    assert not res1.isnull().any().any()
    assert svd.get_feature_names() == list(res1.columns)

    # 2) we keep the original columns as well
    svd = TruncatedSVDWrapper(n_components=5,
                              columns_to_use=cols,
                              drop_used_columns=False,
                              drop_unused_columns=False)
    res2 = svd.fit_transform(df)

    assert res2.shape == (100, 5 + df.shape[1])

    assert get_type(res2) == DataTypes.DataFrame
    assert list(
        res2.columns) == list(df.columns) + ["SVD__%d" % j for j in range(5)]
    assert svd.get_feature_names() == list(
        df.columns) + ["SVD__%d" % j for j in range(5)]
    assert not res2.isnull().any().any()
    assert (res2.loc[:, list(df.columns)] == df).all().all()

    # 3) we keep only untouch columns
    svd = TruncatedSVDWrapper(n_components=5,
                              columns_to_use=cols,
                              drop_used_columns=True,
                              drop_unused_columns=False)
    res3 = svd.fit_transform(df)
    assert res3.shape == (100, 3 + 5)
    assert list(res3.columns) == ["float_col", "int_col", "text_col"
                                  ] + ["SVD__%d" % j for j in range(5)]
    assert svd.get_feature_names() == ["float_col", "int_col", "text_col"
                                       ] + ["SVD__%d" % j for j in range(5)]
    assert ((res3.loc[:, ["float_col", "int_col", "text_col"]] ==
             df.loc[:, ["float_col", "int_col", "text_col"]]).all().all())

    ###################################
    ###  same thing but with regex  ###
    ###################################

    # 1) Regular case : 'drop' other columns
    svd = TruncatedSVDWrapper(n_components=5,
                              columns_to_use=["num_col_"],
                              regex_match=True)
    res1 = svd.fit_transform(df)
    assert res1.shape == (100, 5)
    assert get_type(res1) == DataTypes.DataFrame
    assert list(res1.columns) == ["SVD__%d" % j for j in range(5)]
    assert not res1.isnull().any().any()
    assert svd.get_feature_names() == list(res1.columns)

    # 2) Keep original columns
    svd = TruncatedSVDWrapper(
        n_components=5,
        columns_to_use=["num_col_"],
        drop_used_columns=False,
        drop_unused_columns=False,
        regex_match=True,
    )
    res2 = svd.fit_transform(df)

    assert res2.shape == (100, 5 + df.shape[1])

    assert get_type(res2) == DataTypes.DataFrame
    assert list(
        res2.columns) == list(df.columns) + ["SVD__%d" % j for j in range(5)]
    assert svd.get_feature_names() == list(
        df.columns) + ["SVD__%d" % j for j in range(5)]
    assert not res2.isnull().any().any()
    assert (res2.loc[:, list(df.columns)] == df).all().all()

    # 3) Keep only the un-touch column
    svd = TruncatedSVDWrapper(n_components=5,
                              columns_to_use=["num_col_"],
                              drop_used_columns=True,
                              drop_unused_columns=False,
                              regex_match=True)
    res3 = svd.fit_transform(df)
    assert res3.shape == (100, 3 + 5)
    assert list(res3.columns) == ["float_col", "int_col", "text_col"
                                  ] + ["SVD__%d" % j for j in range(5)]
    assert svd.get_feature_names() == ["float_col", "int_col", "text_col"
                                       ] + ["SVD__%d" % j for j in range(5)]
    assert ((res3.loc[:, ["float_col", "int_col", "text_col"]] ==
             df.loc[:, ["float_col", "int_col", "text_col"]]).all().all())

    # Delta with numpy ###
    xx = df.values
    columns_to_use = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
    svd = TruncatedSVDWrapper(n_components=5,
                              columns_to_use=columns_to_use,
                              drop_used_columns=True,
                              drop_unused_columns=False)
    res4 = svd.fit_transform(xx)
    assert list(res4.columns) == [0, 1, 2] + ["SVD__%d" % i for i in range(5)]
    assert svd.get_feature_names() == [0, 1, 2
                                       ] + ["SVD__%d" % i for i in range(5)]

    input_features = ["COL_%d" % i for i in range(xx.shape[1])]
    assert svd.get_feature_names(input_features) == [
        "COL_0", "COL_1", "COL_2"
    ] + ["SVD__%d" % i for i in range(5)]

    # Keep
    svd = TruncatedSVDWrapper(n_components=5,
                              columns_to_use=columns_to_use,
                              drop_used_columns=False,
                              drop_unused_columns=False)
    res2 = svd.fit_transform(xx)
    assert list(res2.columns) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
                                  ] + ["SVD__%d" % i for i in range(5)]
    assert svd.get_feature_names() == [
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
    ] + ["SVD__%d" % i for i in range(5)]
    assert svd.get_feature_names(
        input_features) == input_features + ["SVD__%d" % i for i in range(5)]
コード例 #23
0
def test_TargetEncoderClassifier(cv, noise_level):
    df = get_sample_df(100)
    df["cat_col"] = df["text_col"].apply(lambda s: s[0:3])

    np.random.seed(123)
    y = 1 * (np.random.randn(100) > 0)

    encoder = TargetEncoderClassifier(noise_level=noise_level, cv=cv)
    encoder.fit(df, y)
    res = encoder.transform(df)

    assert encoder.get_feature_names() == ["float_col", "int_col", "text_col", "cat_col__target_1"]
    assert list(res.columns) == ["float_col", "int_col", "text_col", "cat_col__target_1"]
    assert res["cat_col__target_1"].isnull().sum() == 0
    assert res["cat_col__target_1"].isnull().max() <= 1
    assert res["cat_col__target_1"].isnull().min() >= 0

    temp = pd.DataFrame({"cat_col": df["cat_col"], "cat_col__target_1": res["cat_col__target_1"]})
    assert temp.groupby("cat_col")["cat_col__target_1"].std().max() == 0

    assert (res.index == df.index).all()
    assert encoder._columns_informations["input_columns"] == ["cat_col"]

    encoder = TargetEncoderClassifier(noise_level=noise_level, cv=cv)
    res = encoder.fit_transform(df, y)

    assert encoder.get_feature_names() == ["float_col", "int_col", "text_col", "cat_col__target_1"]
    assert list(res.columns) == ["float_col", "int_col", "text_col", "cat_col__target_1"]

    assert res["cat_col__target_1"].isnull().sum() == 0
    assert res["cat_col__target_1"].isnull().max() <= 1
    assert res["cat_col__target_1"].isnull().min() >= 0

    assert (res.index == df.index).all()
    assert encoder._columns_informations["input_columns"] == ["cat_col"]

    np.random.seed(123)
    y = np.array(["aa", "bb", "cc"])[np.random.randint(0, 3, size=100)]

    encoder = TargetEncoderClassifier(noise_level=noise_level, cv=cv)
    encoder.fit(df, y)
    res = encoder.transform(df)

    assert encoder.get_feature_names() == [
        "float_col",
        "int_col",
        "text_col",
        "cat_col__target_aa",
        "cat_col__target_bb",
        "cat_col__target_cc",
    ]
    assert list(res.columns) == [
        "float_col",
        "int_col",
        "text_col",
        "cat_col__target_aa",
        "cat_col__target_bb",
        "cat_col__target_cc",
    ]

    for col in ("cat_col__target_aa", "cat_col__target_bb", "cat_col__target_cc"):
        assert res[col].isnull().sum() == 0
        assert res[col].isnull().max() <= 1
        assert res[col].isnull().min() >= 0

        temp = pd.DataFrame({"cat_col": df["cat_col"], col: res[col]})
        assert temp.groupby("cat_col")[col].std().max() == 0

    assert encoder._columns_informations["input_columns"] == ["cat_col"]
    assert (res.index == df.index).all()

    encoder = TargetEncoderClassifier(noise_level=noise_level, cv=cv)
    res = encoder.fit_transform(df, y)

    assert encoder.get_feature_names() == [
        "float_col",
        "int_col",
        "text_col",
        "cat_col__target_aa",
        "cat_col__target_bb",
        "cat_col__target_cc",
    ]
    assert list(res.columns) == [
        "float_col",
        "int_col",
        "text_col",
        "cat_col__target_aa",
        "cat_col__target_bb",
        "cat_col__target_cc",
    ]
    for col in ("cat_col__target_aa", "cat_col__target_bb", "cat_col__target_cc"):
        assert res[col].isnull().sum() == 0
        assert res[col].isnull().max() <= 1
        assert res[col].isnull().min() >= 0
    assert (res.index == df.index).all()
    assert encoder._columns_informations["input_columns"] == ["cat_col"]