Ejemplo n.º 1
0
def get_single_encoder(encoder_name: str, cat_cols: list):
    """
    Get encoder by its name
    :param encoder_name: Name of desired encoder
    :param cat_cols: Cat columns for encoding
    :return: Categorical encoder
    """
    if encoder_name == "FrequencyEncoder":
        encoder = FrequencyEncoder(cols=cat_cols)

    if encoder_name == "WOEEncoder":
        encoder = WOEEncoder(cols=cat_cols)

    if encoder_name == "TargetEncoder":
        encoder = TargetEncoder(cols=cat_cols)

    if encoder_name == "SumEncoder":
        encoder = SumEncoder(cols=cat_cols)

    if encoder_name == "MEstimateEncoder":
        encoder = MEstimateEncoder(cols=cat_cols)

    if encoder_name == "LeaveOneOutEncoder":
        encoder = LeaveOneOutEncoder(cols=cat_cols)

    if encoder_name == "HelmertEncoder":
        encoder = HelmertEncoder(cols=cat_cols)

    if encoder_name == "BackwardDifferenceEncoder":
        encoder = BackwardDifferenceEncoder(cols=cat_cols)

    if encoder_name == "JamesSteinEncoder":
        encoder = JamesSteinEncoder(cols=cat_cols)

    if encoder_name == "OrdinalEncoder":
        encoder = OrdinalEncoder(cols=cat_cols)

    if encoder_name == "CatBoostEncoder":
        encoder = CatBoostEncoder(cols=cat_cols)

    if encoder_name == "MEstimateEncoder":
        encoder = MEstimateEncoder(cols=cat_cols)
    if encoder_name == "OneHotEncoder":
        encoder = OneHotEncoder(cols=cat_cols)
    if encoder is None:
        raise NotImplementedError("To be implemented")
    return encoder
Ejemplo n.º 2
0
    def mestimate_encoder(self, df, configger):
        """

        :param df: the train dataset.
        :param configger: the json str of configger setting, the params means:
            verbose: int
                integer indicating verbosity of the output. 0 for none.
            cols: list
                a list of columns to encode, if None, all string columns will be encoded.
            drop_invariant: bool
                boolean for whether or not to drop encoded columns with 0 variance.
            return_df: bool
                boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
            handle_missing: str
                options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
            handle_unknown: str
                options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
            randomized: bool,
                adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
            sigma: float
                standard deviation (spread or "width") of the normal distribution.
            m: float
                this is the "m" in the m-probability estimate. Higher value of m results into stronger shrinking.
                M is non-negative.

        :return: the transform result
        """
        X, y, encode_col = self.get_Xy(df, configger)

        drop_invariant = set_default_vale("drop_invariant", configger, False, is_bool=True)
        handle_missing = set_default_vale("handle_missing", configger, "value")
        handle_unknown = set_default_vale("handle_unknown", configger, "value")
        random_state = set_default_vale("random_state", configger, None)
        randomized = set_default_vale("randomized", configger, False, is_bool=True)
        sigma = set_default_vale("sigma", configger, 0.05)
        m = set_default_vale("m", configger, 1.0)

        encoder = MEstimateEncoder(verbose=1, cols=encode_col, drop_invariant=drop_invariant, return_df=True,
                                   handle_unknown=handle_unknown, handle_missing=handle_missing,
                                   random_state=random_state, randomized=randomized, sigma=sigma, m=m)

        res = encoder.fit_transform(X, y)

        return res
Ejemplo n.º 3
0
def get_single_encoder(encoder_name: str, cat_cols: list):
    if encoder_name == "FrequencyEncoder":
        encoder = FrequencyEncoder(cols=cat_cols)

    if encoder_name == "WOEEncoder":
        encoder = WOEEncoder(cols=cat_cols)

    if encoder_name == "TargetEncoder":
        encoder = TargetEncoder(cols=cat_cols)

    if encoder_name == "SumEncoder":
        encoder = SumEncoder(cols=cat_cols)

    if encoder_name == "MEstimateEncoder":
        encoder = MEstimateEncoder(cols=cat_cols)

    if encoder_name == "LeaveOneOutEncoder":
        encoder = LeaveOneOutEncoder(cols=cat_cols)

    if encoder_name == "HelmertEncoder":
        encoder = HelmertEncoder(cols=cat_cols)

    if encoder_name == "BackwardDifferenceEncoder":
        encoder = BackwardDifferenceEncoder(cols=cat_cols)

    if encoder_name == "JamesSteinEncoder":
        encoder = JamesSteinEncoder(cols=cat_cols)

    if encoder_name == "OrdinalEncoder":
        encoder = OrdinalEncoder(cols=cat_cols)

    if encoder_name == "CatBoostEncoder":
        encoder = CatBoostEncoder(cols=cat_cols)

    if encoder_name == "MEstimateEncoder":
        encoder = MEstimateEncoder(cols=cat_cols)

    if encoder_name == 'OneHotEncoder':
        encoder = OneHotEncoder(cols=cat_cols)

    # assert encoder is not None
    return encoder
Ejemplo n.º 4
0
    # Fillna
    df.fillna(0, inplace=True)

    print(df.shape)
    # Train-Test Split
    X_tr, X_te, y_tr, y_te = train_test_split(df.drop(columns=target[i]),
                                              df[target[i]])

    results_dict[data_i] = {}

    # Elastic Net + target encoding
    scaler = StandardScaler()
    lm = ElasticNet()
    lgbm = LGBMRegressor(verbose=-1)
    te = MEstimateEncoder(cols=cols_enc[i])
    pe = QuantileEncoder(cols=cols_enc[i], quantile=0.50)
    se = SummaryEncoder(cols=cols_enc[i], quantiles=[0.25, 0.50, 0.75], m=100)

    encoders = {"te": te, "pe": pe}
    learners = {"lm": lm}

    for learner_name, learner in learners.items():

        results_dict[data_i][learner_name] = {}

        for encoder_name, encoder in encoders.items():
            results_dict[data_i][learner_name][encoder_name] = {}

            pipe = Pipeline([
                ("enc", encoder),
Ejemplo n.º 5
0
class NestedTargetEncoder(BaseEstimator, util.TransformerWithTargetMixin):
    """Estimate of likelihood for nested data.

    This is a generalization of the m-probability estimate. The main difference
    is that instead of using a global prior, it can use a more fine-tuned prior.
    This only works for nested data. For instance, I have individuals who live
    in counties, that are inside states. If I want to estimate the likelihood
    encoding for a county, it is better to use as prior the estimate for the
    state instead of the global estimate.

    Parameters
    ----------

    verbose: int
        integer indicating verbosity of the output. 0 for none.
    cols: list
        a list of columns to encode, if None, all string columns will be encoded.
    drop_invariant: bool
        boolean for whether or not to drop encoded columns with 0 variance.
    feature_mapping: dict
        dictionary representing the child - parent relationship. keys are children.
    return_df: bool
        boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
    handle_missing: str
        options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
    handle_unknown: str
        options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
    randomized: bool,
        adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
    sigma: float
        standard deviation (spread or "width") of the normal distribution.
    m_prior: float
        this is the "m" in the m-probability estimate for the global mean. Higher value of m results into stronger shrinking.
        It is used whenever we estimate a likelihood using the global mean as a prior.
        M is non-negative.
    m_parent: float
        this is the "m" in the m-probability estimate. Higher value of m results into stronger shrinking.
        It is used whenever we estimate a likelihood using the parent mean as a prior.
        M is non-negative.

    Example
    -------
    >>> from sktools import NestedTargetEncoder
    >>> import pandas as pd
    >>> X = pd.DataFrame(
    >>>     {
    >>>         "child": ["a", "a", "b", "b", "b", "c", "c", "d", "d", "d"],
    >>>         "parent": ["e", "e", "e", "e", "e", "f", "f", "f", "f", "f",]
    >>>     }
    >>> )
    >>> y = pd.Series([1, 2, 3, 1, 2, 4, 4, 5, 4, 4.5])
    >>> ne = NestedTargetEncoder(feature_mapping={"child": "parent"}, m_prior=0)
    >>> ne.fit_transform(X, y)
          child  parent
    0  2.016667     1.8
    1  2.016667     1.8
    2  2.262500     1.8
    3  2.262500     1.8
    4  2.262500     1.8
    5  3.683333     4.3
    6  3.683333     4.3
    7  4.137500     4.3
    8  4.137500     4.3
    9  4.137500     4.3

    References
    ----------

    .. [1] Additive smoothing, from https://en.wikipedia.org/wiki/Additive_smoothing#Generalized_to_the_case_of_known_incidence_rates

    """
    def __init__(
        self,
        verbose=0,
        cols=None,
        drop_invariant=False,
        feature_mapping={},
        return_df=True,
        handle_unknown="value",
        handle_missing="value",
        random_state=None,
        randomized=False,
        sigma=0.05,
        m_prior=1.0,
        m_parent=1.0,
    ):
        self.verbose = verbose
        self.return_df = return_df
        self.drop_invariant = drop_invariant
        self.drop_cols = []
        self.cols = cols
        self.feature_mapping = feature_mapping
        self.ordinal_encoder = None
        self._dim = None
        self.mapping = None
        self.handle_unknown = handle_unknown
        self.handle_missing = handle_missing
        self._sum = None
        self._count = None
        self.random_state = random_state
        self.randomized = randomized
        self.sigma = sigma
        self.m_prior = m_prior
        self.m_parent = m_parent
        self.feature_names = None
        self.parent_cols = None
        self.parent_encoder = None

    # noinspection PyUnusedLocal
    def fit(self, X, y, **kwargs):
        """Fit encoder according to X and binary y.

        Parameters
        ----------

        X : array-like, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.
        y : array-like, shape = [n_samples]
            Binary target values.

        Returns
        -------

        self : encoder
            Returns self.

        """

        # Create parent encoder and fit it
        self.parent_cols = list(self.feature_mapping.values())
        self.parent_encoder = MEstimateEncoder(
            verbose=self.verbose,
            cols=self.parent_cols,
            drop_invariant=self.drop_invariant,
            return_df=self.return_df,
            handle_unknown=self.handle_unknown,
            handle_missing=self.handle_missing,
            random_state=self.random_state,
            randomized=self.randomized,
            sigma=self.sigma,
            m=self.m_prior,
        )
        self.parent_encoder.fit(X, y)

        # Unite parameters into pandas types
        X = util.convert_input(X)
        y = util.convert_input_vector(y, X.index).astype(float)

        # The lengths must be equal
        if X.shape[0] != y.shape[0]:
            raise ValueError("The length of X is " + str(X.shape[0]) +
                             " but length of y is " + str(y.shape[0]) + ".")

        self._dim = X.shape[1]

        # If columns aren't passed, just use every string column
        if self.cols is None:
            self.cols = util.get_obj_cols(X)
        else:
            self.cols = util.convert_cols_to_list(self.cols)

        if self.handle_missing == "error":
            if X[self.cols].isnull().any().any():
                raise ValueError("Columns to be encoded can not contain null")

        # Check that children and parents are disjoint
        children = set(self.feature_mapping.keys())
        parents = set(self.feature_mapping.values())
        if len(children.intersection(parents)) > 0:
            raise ValueError("No column should be a child and a parent")

        self.ordinal_encoder = OrdinalEncoder(
            verbose=self.verbose,
            cols=self.cols,
            handle_unknown="value",
            handle_missing="value",
        )
        self.ordinal_encoder = self.ordinal_encoder.fit(X)
        X_ordinal = self.ordinal_encoder.transform(X)

        # Training
        self.mapping = self._train(X_ordinal, y)

        X_temp = self.transform(X, override_return_df=True)
        self.feature_names = X_temp.columns.tolist()

        # Store column names with approximately constant variance on the training data
        if self.drop_invariant:
            self.drop_cols = []
            generated_cols = util.get_generated_cols(X, X_temp, self.cols)
            self.drop_cols = [
                x for x in generated_cols if X_temp[x].var() <= 10e-5
            ]
            try:
                [self.feature_names.remove(x) for x in self.drop_cols]
            except KeyError as e:
                if self.verbose > 0:
                    print("Could not remove column from feature names."
                          "Not found in generated cols.\n{}".format(e))
        return self

    def transform(self, X, y=None, override_return_df=False):
        """Perform the transformation to new categorical data.

        When the data are used for model training, it is important to also pass the target in order to apply leave one out.

        Parameters
        ----------

        X : array-like, shape = [n_samples, n_features]
        y : array-like, shape = [n_samples] when transform by leave one out
            None, when transform without target information (such as transform test set)


        Returns
        -------

        p : array, shape = [n_samples, n_numeric + N]
            Transformed values with encoding applied.

        """

        if self.handle_missing == "error":
            if X[self.cols].isnull().any().any():
                raise ValueError("Columns to be encoded can not contain null")

        if self._dim is None:
            raise ValueError(
                "Must train encoder before it can be used to transform data.")

        # Unite the input into pandas types
        X = util.convert_input(X)

        # Then make sure that it is the right size
        if X.shape[1] != self._dim:
            raise ValueError("Unexpected input dimension %d, expected %d" % (
                X.shape[1],
                self._dim,
            ))

        # If we are encoding the training data, we have to check the target
        if y is not None:
            y = util.convert_input_vector(y, X.index).astype(float)
            if X.shape[0] != y.shape[0]:
                raise ValueError("The length of X is " + str(X.shape[0]) +
                                 " but length of y is " + str(y.shape[0]) +
                                 ".")

        if not list(self.cols):
            return X

        # Do not modify the input argument
        X = X.copy(deep=True)

        X = self.ordinal_encoder.transform(X)

        if self.handle_unknown == "error":
            if X[self.cols].isin([-1]).any().any():
                raise ValueError("Unexpected categories found in dataframe")

        # Loop over the columns and replace the nominal values with the numbers
        X = self._score(X, y)

        # Postprocessing
        # Note: We should not even convert these columns.
        if self.drop_invariant:
            for col in self.drop_cols:
                X.drop(col, 1, inplace=True)

        if self.return_df or override_return_df:
            return X
        else:
            return X.values

    def _train(self, X, y):
        # Initialize the output
        mapping = {}

        # Calculate global statistics
        self._sum = y.sum()
        self._count = y.count()
        prior = self._sum / self._count

        for switch in self.ordinal_encoder.category_mapping:
            col = switch.get("col")
            values = switch.get("mapping")

            # Easy case, the child is not in the child - parent dictionary.
            # We just use the plain m-estimator with the global prior
            if col not in self.feature_mapping:
                stats = y.groupby(X[col]).agg(["sum", "count", "mean"])

                estimate = (stats["sum"] + prior * self.m_prior) / (
                    stats["count"] + self.m_prior)

            # Not so easy case, we have to deal with the parent
            else:
                parent_col = self.feature_mapping[col]

                # Check son-parent unique relation
                unique_parents = X.groupby([col]).agg({parent_col:
                                                       "nunique"})[parent_col]

                more_1_parent = unique_parents[unique_parents > 1]

                if any(unique_parents > 1) and more_1_parent.index >= 0:
                    raise ValueError(
                        f"There are children with more than one parent, {more_1_parent}"
                    )

                # Get parent stats
                te_parent = self.parent_encoder.transform(X)[parent_col]
                parent_mapping = pd.DataFrame({
                    "te_parent": te_parent,
                    parent_col: X[parent_col]
                }).drop_duplicates()

                # Compute child statistics
                stats = y.groupby(X[col]).agg(["sum", "count", "mean"])

                # Relate parent and child stats
                groups = X.loc[:, [parent_col, col]].drop_duplicates()

                stats = stats.merge(groups, how="left",
                                    on=col).merge(parent_mapping,
                                                  how="left",
                                                  on=parent_col)

                # In case of numpy array
                stats = stats.rename(columns={"key_0": col})
                stats = stats.set_index(col)

                # Calculate the m-probability estimate using the parent prior
                estimate = (stats["sum"] + stats["te_parent"] *
                            self.m_parent) / (stats["count"] + self.m_parent)

            # Ignore unique columns. This helps to prevent overfitting on id-like columns
            if len(stats["count"]) == self._count:
                estimate[:] = prior

            # Column doesn't have parent - handle imputation as always
            if col not in self.feature_mapping:
                if self.handle_unknown == "return_nan":
                    estimate.loc[-1] = np.nan
                elif self.handle_unknown == "value":
                    estimate.loc[-1] = prior

                if self.handle_missing == "return_nan":
                    estimate.loc[values.loc[np.nan]] = np.nan
                elif self.handle_missing == "value":
                    estimate.loc[-2] = prior
            # With parents - we leave the imputation for afterwards
            else:
                # Unknown
                estimate.loc[-1] = np.nan
                # Missing
                estimate.loc[values.loc[np.nan]] = np.nan

            # Store the m-probability estimate for transform() function
            mapping[col] = estimate

        return mapping

    def _score(self, X, y):

        X_parents = self.parent_encoder.transform(X)

        for col in self.cols:

            # Easy case - not having parents (as m estimator)
            if col not in self.feature_mapping:
                # Score the column
                X[col] = X[col].map(self.mapping[col])

            # Harder case - having parents
            else:

                # Split missing and unknown values
                unknown = X[col] == -1
                missing = X[col] == -2

                # Apply regular transformation
                X[col] = X[col].map(self.mapping[col].drop_duplicates())

                # Impute unknown with parent
                parent_col = self.feature_mapping[col]
                if self.handle_unknown == "value":
                    X[col] = X[col].mask(unknown, X_parents[parent_col])

                # Impute missing with parent
                if self.handle_missing == "value":
                    X[col] = X[col].mask(missing, X_parents[parent_col])

            # Randomization is meaningful only for training data -> we do it only if y is present
            if self.randomized and y is not None:
                random_state_generator = check_random_state(self.random_state)
                X[col] = X[col] * random_state_generator.normal(
                    1.0, self.sigma, X[col].shape[0])

        return X

    def get_feature_names(self):
        """
        Returns the names of all transformed / added columns.

        Returns
        -------
        feature_names: list
            A list with all feature names transformed or added.
            Note: potentially dropped features are not included!

        """
        if not isinstance(self.feature_names, list):
            raise ValueError(
                "Estimator has to be fitted to return feature names.")
        else:
            return self.feature_names
Ejemplo n.º 6
0
    def fit(self, X, y, **kwargs):
        """Fit encoder according to X and binary y.

        Parameters
        ----------

        X : array-like, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.
        y : array-like, shape = [n_samples]
            Binary target values.

        Returns
        -------

        self : encoder
            Returns self.

        """

        # Create parent encoder and fit it
        self.parent_cols = list(self.feature_mapping.values())
        self.parent_encoder = MEstimateEncoder(
            verbose=self.verbose,
            cols=self.parent_cols,
            drop_invariant=self.drop_invariant,
            return_df=self.return_df,
            handle_unknown=self.handle_unknown,
            handle_missing=self.handle_missing,
            random_state=self.random_state,
            randomized=self.randomized,
            sigma=self.sigma,
            m=self.m_prior,
        )
        self.parent_encoder.fit(X, y)

        # Unite parameters into pandas types
        X = util.convert_input(X)
        y = util.convert_input_vector(y, X.index).astype(float)

        # The lengths must be equal
        if X.shape[0] != y.shape[0]:
            raise ValueError("The length of X is " + str(X.shape[0]) +
                             " but length of y is " + str(y.shape[0]) + ".")

        self._dim = X.shape[1]

        # If columns aren't passed, just use every string column
        if self.cols is None:
            self.cols = util.get_obj_cols(X)
        else:
            self.cols = util.convert_cols_to_list(self.cols)

        if self.handle_missing == "error":
            if X[self.cols].isnull().any().any():
                raise ValueError("Columns to be encoded can not contain null")

        # Check that children and parents are disjoint
        children = set(self.feature_mapping.keys())
        parents = set(self.feature_mapping.values())
        if len(children.intersection(parents)) > 0:
            raise ValueError("No column should be a child and a parent")

        self.ordinal_encoder = OrdinalEncoder(
            verbose=self.verbose,
            cols=self.cols,
            handle_unknown="value",
            handle_missing="value",
        )
        self.ordinal_encoder = self.ordinal_encoder.fit(X)
        X_ordinal = self.ordinal_encoder.transform(X)

        # Training
        self.mapping = self._train(X_ordinal, y)

        X_temp = self.transform(X, override_return_df=True)
        self.feature_names = X_temp.columns.tolist()

        # Store column names with approximately constant variance on the training data
        if self.drop_invariant:
            self.drop_cols = []
            generated_cols = util.get_generated_cols(X, X_temp, self.cols)
            self.drop_cols = [
                x for x in generated_cols if X_temp[x].var() <= 10e-5
            ]
            try:
                [self.feature_names.remove(x) for x in self.drop_cols]
            except KeyError as e:
                if self.verbose > 0:
                    print("Could not remove column from feature names."
                          "Not found in generated cols.\n{}".format(e))
        return self
#train_mee = MEE_encoder.fit_transform(train_set[feature_list], target)
#test_mee = MEE_encoder.transform(test_set[feature_list])
#print(train_mee.head())
X_train, X_val, y_train, y_val = train_test_split(train_set, target, test_size=0.2, random_state=97)
lr = LinearRegression()
rf = RandomForestRegressor()


# In[48]:


TE_encoder = TargetEncoder()
train_te = TE_encoder.fit_transform(train_set[feature_list], target)
test_te = TE_encoder.transform(test_set[feature_list])
#print(train_te.head())
encoder_list = [ TargetEncoder(), MEstimateEncoder()]
X_train, X_val, y_train, y_val = train_test_split(train_set, target, test_size=0.2, random_state=97)
#X_train, X_val, y_train, y_val = dataset_test()
lr = LinearRegression()

for encoder in encoder_list:
#    print("Test {} : ".format(str(encoder).split('(')[0]), end=" ")

    train_enc = encoder.fit_transform(X_train[feature_list], y_train)
      # test_enc = encoder.transform(test[feature_list])
    val_enc = encoder.transform(X_val[feature_list])
    
    lr.fit(train_enc, y_train)
#    print(lr.score(train_enc, y_train))
    
    
Ejemplo n.º 8
0
# We'll start by creating a 25% split to train the target encoder.

X = df.copy()
y = X.pop('Rating')

X_encode = X.sample(frac=0.25)
y_encode = y[X_encode.index]
X_pretrain = X.drop(X_encode.index)
y_train = y[X_pretrain.index]

# The category_encoders package in scikit-learn-contrib implements an m-estimate encoder,
# which we'll use to encode our Zipcode feature.

# Create the encoder instance. Choose m to control noise.
encoder = MEstimateEncoder(cols=["Zipcode"], m=5.0)

# Fit the encoder on the encoding split.
encoder.fit(X_encode, y_encode)

# Encode the Zipcode column to create the final training data
X_train = encoder.transform(X_pretrain)

# Let's compare the encoded values to the target to see how informative our encoding might be.
plt.figure(dpi=90)
ax = sns.distplot(y, kde=False, norm_hist=True)
ax = sns.kdeplot(X_train.Zipcode, color='r', ax=ax)
ax.set_xlabel("Rating")
ax.legend(labels=['Zipcode', 'Rating'])
plt.show()