class AsTypeFeatureGenerator(AbstractFeatureGenerator):
    """
    Enforces type conversion on the data to match the types seen during fitting.
    If a feature cannot be converted to the correct type, an exception will be raised.
    """
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._feature_metadata_in_real: FeatureMetadata = None  # FeatureMetadata object based on the original input features real dtypes (will contain dtypes such as 'int16' and 'float32' instead of 'int' and 'float').
        # self.inplace = inplace  # TODO, also add check if dtypes are same as expected and skip .astype

    # TODO: consider returning self._transform(X) if we allow users to specify real dtypes as input
    def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
        return X, self.feature_metadata_in.type_group_map_special

    def _transform(self, X: DataFrame) -> DataFrame:
        int_features = self.feature_metadata_in.get_features(valid_raw_types=[R_INT])
        if int_features:
            null_count = X[int_features].isnull().sum()
            with_null = null_count[null_count != 0]
            # If int feature contains null during inference but not during fit.
            if len(with_null) > 0:
                # TODO: Consider imputing to mode? This is tricky because training data had no missing values.
                # TODO: Add unit test for this situation, to confirm it is handled properly.
                with_null_features = list(with_null.index)
                logger.warning(f'WARNING: Int features without null values at train time contain null values at inference time! Imputing nulls to 0. To avoid this, pass the features as floats during fit!')
                logger.warning(f'WARNING: Int features with nulls: {with_null_features}')
                X[with_null_features] = X[with_null_features].fillna(0)
        if self._feature_metadata_in_real.type_map_raw:
            # TODO: Confirm this works with sparse and other feature types!
            X = X.astype(self._feature_metadata_in_real.type_map_raw)
        return X

    @staticmethod
    def get_default_infer_features_in_args() -> dict:
        return dict()

    def _infer_features_in_full(self, X: DataFrame, feature_metadata_in: FeatureMetadata = None):
        super()._infer_features_in_full(X=X, feature_metadata_in=feature_metadata_in)
        type_map_real = get_type_map_real(X[self.feature_metadata_in.get_features()])
        self._feature_metadata_in_real = FeatureMetadata(type_map_raw=type_map_real, type_group_map_special=self.feature_metadata_in.get_type_group_map_raw())

    def _remove_features_in(self, features):
        super()._remove_features_in(features)
        if features:
            self._feature_metadata_in_real = self._feature_metadata_in_real.remove_features(features=features)

    def print_feature_metadata_info(self, log_level=20):
        self._log(log_level, '\tOriginal Features (exact raw dtype, raw dtype):')
        self._feature_metadata_in_real.print_feature_metadata_full(self.log_prefix + '\t\t', print_only_one_special=True, log_level=log_level)
        super().print_feature_metadata_info(log_level=log_level)

    def _more_tags(self):
        return {'feature_interactions': False}
예제 #2
0
파일: astype.py 프로젝트: sackoh/autogluon
 def _infer_features_in_full(self,
                             X: DataFrame,
                             feature_metadata_in: FeatureMetadata = None):
     super()._infer_features_in_full(
         X=X, feature_metadata_in=feature_metadata_in)
     type_map_real = get_type_map_real(
         X[self.feature_metadata_in.get_features()])
     self._type_map_real_opt = X[
         self.feature_metadata_in.get_features()].dtypes.to_dict()
     self._feature_metadata_in_real = FeatureMetadata(
         type_map_raw=type_map_real,
         type_group_map_special=self.feature_metadata_in.
         get_type_group_map_raw())
    def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
        feature_metadata = self.feature_metadata_in
        for i in range(len(self.generators)):
            self._log(20, f'\tStage {i + 1} Generators:')
            feature_df_list = []
            generator_group_valid = []
            for generator in self.generators[i]:
                if generator.is_valid_metadata_in(feature_metadata):
                    if generator.verbosity > self.verbosity:
                        generator.verbosity = self.verbosity
                    generator.set_log_prefix(log_prefix=self.log_prefix + '\t\t', prepend=True)
                    feature_df_list.append(generator.fit_transform(X, feature_metadata_in=feature_metadata, **kwargs))
                    generator_group_valid.append(generator)
                else:
                    self._log(15, f'\t\tSkipping {generator.__class__.__name__}: No input feature with required dtypes.')

            self.generators[i] = generator_group_valid

            self.generators[i] = [generator for j, generator in enumerate(self.generators[i]) if feature_df_list[j] is not None and len(feature_df_list[j].columns) > 0]
            feature_df_list = [feature_df for feature_df in feature_df_list if feature_df is not None and len(feature_df.columns) > 0]

            if self.generators[i]:
                # Raise an exception if generators expect different raw input types for the same feature.
                FeatureMetadata.join_metadatas([generator.feature_metadata_in for generator in self.generators[i]], shared_raw_features='error_if_diff')

            if self.generators[i]:
                feature_metadata = FeatureMetadata.join_metadatas([generator.feature_metadata for generator in self.generators[i]], shared_raw_features='error')
            else:
                feature_metadata = FeatureMetadata(type_map_raw=dict())

            if not feature_df_list:
                X = DataFrame(index=X.index)
            elif len(feature_df_list) == 1:
                X = feature_df_list[0]
            else:
                X = pd.concat(feature_df_list, axis=1, ignore_index=False, copy=False)

        self._remove_features_out(features=[])
        # Remove useless generators
        # TODO: consider moving to self._remove_features_out
        for i in range(len(self.generators)):
            generator_group_valid = []
            for j in range(len(self.generators[i])):
                if self.generators[i][j].features_out:
                    generator_group_valid.append(self.generators[i][j])
            self.generators[i] = generator_group_valid

        return X, feature_metadata.type_group_map_special
예제 #4
0
    def train(self,
              train_data,
              eval_metric=EVAL_METRIC,
              quality=QUALITY,
              time_limit=TIME_LIMIT,
              verbosity=VERBOSITY):
        """Train prospective models."""
        # predictor gives us default access to the *best* predictor that
        # was trained on the task (otherwise we're just wrapping AutoGluon)

        # create custom feature generator to force autogluon to use our features
        # as they are
        fg = AutoMLPipelineFeatureGenerator(enable_categorical_features=False,
                                            enable_datetime_features=False,
                                            enable_text_special_features=False,
                                            enable_text_ngram_features=False)
        # create our own feature metadata object as we know what the type of every
        # feature we have. Skip the label column in the training data when doing so
        fmd = FeatureMetadata(dict.fromkeys(train_data.columns[:-1], 'int'))

        task = TabularPredictor(
            label='label',
            eval_metric=eval_metric,
            path=self.outpath,
            verbosity=verbosity,
        )
        return task.fit(train_data=train_data,
                        time_limit=time_limit,
                        presets=self.QUALITY_PRESETS[quality],
                        feature_generator=fg,
                        feature_metadata=fmd)
예제 #5
0
def spunge_augment(X,
                   feature_metadata: FeatureMetadata,
                   num_augmented_samples=10000,
                   frac_perturb=0.1,
                   continuous_feature_noise=0.1,
                   **kwargs):
    """ Generates synthetic datapoints for learning to mimic teacher model in distillation
        via simplified version of MUNGE strategy (that does not require near-neighbor search).

        Args:
            num_augmented_samples: number of additional augmented data points to return
            frac_perturb: fraction of features/examples that are perturbed during augmentation. Set near 0 to ensure augmented sample distribution remains closer to real data.
            continuous_feature_noise: we noise numeric features by this factor times their std-dev. Set near 0 to ensure augmented sample distribution remains closer to real data.
    """
    if frac_perturb > 1.0:
        raise ValueError("frac_perturb must be <= 1")
    logger.log(
        20,
        f"SPUNGE: Augmenting training data with {num_augmented_samples} synthetic samples for distillation..."
    )
    num_feature_perturb = max(1, int(frac_perturb * len(X.columns)))
    X_aug = pd.concat([X.iloc[[0]].copy()] * num_augmented_samples)
    X_aug.reset_index(drop=True, inplace=True)
    continuous_types = ['float', 'int']
    continuous_featnames = feature_metadata.get_features(
        valid_raw_types=continuous_types
    )  # these features will have shuffled values with added noise

    for i in range(num_augmented_samples
                   ):  # hot-deck sample some features per datapoint
        og_ind = i % len(X)
        augdata_i = X.iloc[og_ind].copy()
        num_feature_perturb_i = np.random.choice(
            range(1, num_feature_perturb +
                  1))  # randomly sample number of features to perturb
        cols_toperturb = np.random.choice(list(X.columns),
                                          size=num_feature_perturb_i,
                                          replace=False)
        for feature in cols_toperturb:
            feature_data = X[feature]
            augdata_i[feature] = feature_data.sample(n=1).values[0]
        X_aug.iloc[i] = augdata_i

    for feature in X.columns:
        if feature in continuous_featnames:
            feature_data = X[feature]
            aug_data = X_aug[feature]
            noise = np.random.normal(scale=np.nanstd(feature_data) *
                                     continuous_feature_noise,
                                     size=num_augmented_samples)
            mask = np.random.binomial(n=1,
                                      p=frac_perturb,
                                      size=num_augmented_samples)
            aug_data = aug_data + noise * mask
            X_aug[feature] = pd.Series(aug_data, index=X_aug.index)

    return X_aug
 def _drop_unique_features(X: DataFrame, feature_metadata: FeatureMetadata,
                           max_unique_ratio) -> list:
     features_to_drop = []
     X_len = len(X)
     max_unique_value_count = X_len * max_unique_ratio
     for column in X:
         unique_value_count = len(X[column].unique())
         # Drop features that are always the same
         if unique_value_count == 1:
             features_to_drop.append(column)
         elif feature_metadata.get_feature_type_raw(column) in [R_CATEGORY, R_OBJECT]\
                 and (unique_value_count > max_unique_value_count):
             if S_TEXT in feature_metadata.get_feature_types_special(
                     column):
                 # We should not drop a text column
                 continue
             else:
                 features_to_drop.append(column)
     return features_to_drop
예제 #7
0
def munge_augment(X, feature_metadata: FeatureMetadata, num_augmented_samples=10000, perturb_prob=0.5, s=1.0, **kwargs):
    """ Uses MUNGE algorithm to generate synthetic datapoints for learning to mimic teacher model in distillation: https://www.cs.cornell.edu/~caruana/compression.kdd06.pdf
        Args:
            num_augmented_samples: number of additional augmented data points to return
            perturb_prob: probability of perturbing each feature during augmentation. Set near 0 to ensure augmented sample distribution remains closer to real data.
            s: We noise numeric features by their std-dev divided by this factor (inverse of continuous_feature_noise). Set large to ensure augmented sample distribution remains closer to real data.
    """
    from ..models.tabular_nn.tabular_nn_model import TabularNeuralNetModel
    nn_dummy = TabularNeuralNetModel(path='nn_dummy', name='nn_dummy', problem_type=REGRESSION, eval_metric=mean_squared_error,
                                     hyperparameters={'num_dataloading_workers': 0, 'proc.embed_min_categories': np.inf},
                                     features = list(X.columns), feature_metadata=feature_metadata)
    processed_data = nn_dummy.process_train_data(df=nn_dummy.preprocess(X), labels=pd.Series([1]*len(X)), batch_size=nn_dummy.params['batch_size'],
                        num_dataloading_workers=0, impute_strategy=nn_dummy.params['proc.impute_strategy'],
                        max_category_levels=nn_dummy.params['proc.max_category_levels'], skew_threshold=nn_dummy.params['proc.skew_threshold'],
                        embed_min_categories=nn_dummy.params['proc.embed_min_categories'], use_ngram_features=nn_dummy.params['use_ngram_features'])
    X_vector = processed_data.dataset._data[processed_data.vectordata_index].asnumpy()
    processed_data = None
    nn_dummy = None
    gc.collect()

    neighbor_finder = NearestNeighbors(n_neighbors=2)
    neighbor_finder.fit(X_vector)
    neigh_dist, neigh_ind = neighbor_finder.kneighbors(X_vector)
    neigh_ind = neigh_ind[:,1]  # contains indices of nearest neighbors
    neigh_dist = None
    # neigh_dist = neigh_dist[:,1]  # contains distances to nearest neighbors
    neighbor_finder = None
    gc.collect()

    if perturb_prob > 1.0:
        raise ValueError("frac_perturb must be <= 1")
    logger.log(20, f"MUNGE: Augmenting training data with {num_augmented_samples} synthetic samples for distillation...")
    X = X.copy()
    X_aug = pd.concat([X.iloc[[0]].copy()]*num_augmented_samples)
    X_aug.reset_index(drop=True, inplace=True)
    continuous_types = ['float', 'int']
    continuous_featnames = feature_metadata.get_features(valid_raw_types=continuous_types)  # these features will have shuffled values with added noise
    for col in continuous_featnames:
        X_aug[col] = X_aug[col].astype(float)
        X[col] = X[col].astype(float)

    for i in range(num_augmented_samples):
        og_ind = i % len(X)
        augdata_i = X.iloc[og_ind].copy()
        neighbor_i = X.iloc[neigh_ind[og_ind]].copy()
        # dist_i = neigh_dist[og_ind]
        cols_toperturb = np.random.choice(list(X.columns), size=np.random.binomial(X.shape[1], p=perturb_prob, size=1)[0], replace=False)
        for col in cols_toperturb:
            new_val = neighbor_i[col]
            if col in continuous_featnames:
                new_val += np.random.normal(scale=np.abs(augdata_i[col]-new_val)/s)
            augdata_i[col] = new_val
        X_aug.iloc[i] = augdata_i

    return X_aug
 def __init__(self,
              features_in='empty',
              feature_metadata_in='empty',
              **kwargs):
     if features_in == 'empty':
         features_in = []
     if feature_metadata_in == 'empty':
         feature_metadata_in = FeatureMetadata(type_map_raw={})
     super().__init__(features_in=features_in,
                      feature_metadata_in=feature_metadata_in,
                      **kwargs)
 def _get_unused_features(self, feature_links_chain):
     features_in_list = []
     for i in range(len(self.generators)):
         stage = i + 1
         if stage > 1:
             if self.generators[stage - 2]:
                 features_in = FeatureMetadata.join_metadatas([generator.feature_metadata for generator in self.generators[stage - 2]], shared_raw_features='error').get_features()
             else:
                 features_in = []
         else:
             features_in = self.features_in
         features_in_list.append(features_in)
     return self._get_unused_features_generic(feature_links_chain=feature_links_chain, features_in_list=features_in_list)
예제 #10
0
def test_image_predictor(fit_helper):
    train_data, _, test_data = ImageDataset.from_folders(
        'https://autogluon.s3.amazonaws.com/datasets/shopee-iet.zip')
    feature_metadata = FeatureMetadata.from_df(train_data).add_special_types(
        {'image': ['image_path']})
    predictor = TabularPredictor(label='label').fit(
        train_data=train_data,
        hyperparameters={'AG_IMAGE_NN': {
            'epochs': 2
        }},
        feature_metadata=feature_metadata)
    leaderboard = predictor.leaderboard(test_data)
    assert len(leaderboard) > 0
 def is_valid_metadata_in(self, feature_metadata_in: FeatureMetadata):
     """
     True if input data with feature metadata of feature_metadata_in could result in non-empty output.
         This is dictated by `feature_metadata_in.get_features(**self._infer_features_in_args)` not being empty.
     False if the features represented in feature_metadata_in do not contain any usable types for the generator.
         For example, if only numeric features are passed as input to TextSpecialFeatureGenerator which requires text input features, this will return False.
         However, if both numeric and text features are passed, this will return True since the text features would be valid input (the numeric features would simply be dropped).
     """
     features_in = feature_metadata_in.get_features(**self._infer_features_in_args)
     if features_in:
         return True
     else:
         return False
예제 #12
0
파일: astype.py 프로젝트: sackoh/autogluon
    def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
        feature_type_raw_cur_dict = get_type_map_raw(X)
        feature_map_to_update = dict()
        type_map_special = self.feature_metadata_in.get_type_map_special()
        for feature in self.features_in:
            feature_type_raw = self.feature_metadata_in.get_feature_type_raw(
                feature)
            feature_type_raw_cur = feature_type_raw_cur_dict[feature]
            if feature_type_raw != feature_type_raw_cur:
                self._log(
                    30,
                    f'\tWARNING: Actual dtype differs from dtype in FeatureMetadata for feature "{feature}". Actual dtype: {feature_type_raw_cur} | Expected dtype: {feature_type_raw}'
                )
                feature_map_to_update[feature] = feature_type_raw
        if feature_map_to_update:
            self._log(
                30,
                f'\tWARNING: Forcefully converting features to expected dtypes. Please manually align the input data with the expected dtypes if issues occur.'
            )
            X = X.astype(feature_map_to_update)

        self._bool_features = dict()
        if self._convert_bool:
            for feature in self.features_in:
                if S_BOOL not in type_map_special[feature]:
                    if len(X[feature].unique()) == 2:
                        feature_bool_val = get_bool_true_val(X[feature])
                        self._bool_features[feature] = feature_bool_val

        if self._bool_features:
            self._log(
                20,
                f'\tNote: Converting {len(self._bool_features)} features to boolean dtype as they only contain 2 unique values.'
            )
            for feature in self._bool_features:
                type_map_special[feature] = [S_BOOL]
                X[feature] = (
                    X[feature] == self._bool_features[feature]).astype(np.int8)
                self._type_map_real_opt[feature] = np.int8
            type_group_map_special = FeatureMetadata.get_type_group_map_special_from_type_map_special(
                type_map_special)
        else:
            type_group_map_special = self.feature_metadata_in.type_group_map_special
        self._int_features = np.array(
            self.feature_metadata_in.get_features(valid_raw_types=[R_INT]))
        return X, type_group_map_special
    def _infer_feature_metadata_in(X: DataFrame) -> FeatureMetadata:
        """
        Infers the feature_metadata_in of X.
        This is used if feature_metadata_in was not provided by the user prior to fit.
        This can be overwritten in a new generator to use new infer logic, but it is preferred to keep the default logic for consistency with other generators.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.

        Returns
        -------
        feature_metadata_in : FeatureMetadata object inferred from X.
        """
        type_map_raw = get_type_map_raw(X)
        type_group_map_special = get_type_group_map_special(X)
        return FeatureMetadata(type_map_raw=type_map_raw, type_group_map_special=type_group_map_special)
def test_feature_metadata(data_helper):
    # Given
    input_data = data_helper.generate_multi_feature_full()

    expected_feature_metadata_full = {
        ('category', ()): ['cat'],
        ('datetime', ()): ['datetime'],
        ('float', ()): ['float'],
        ('int', ()): ['int'],
        ('object', ()): ['obj'],
        ('object', ('datetime_as_object', )): ['datetime_as_object'],
        ('object', ('text', )): ['text']
    }

    expected_feature_metadata_get_features = [
        'int', 'float', 'obj', 'cat', 'datetime', 'text', 'datetime_as_object'
    ]

    expected_type_map_raw = {
        'cat': 'category',
        'datetime': 'datetime',
        'datetime_as_object': 'object',
        'float': 'float',
        'int': 'int',
        'obj': 'object',
        'text': 'object'
    }

    expected_type_group_map_special = {
        'datetime_as_object': ['datetime_as_object'],
        'text': ['text']
    }

    expected_feature_metadata_renamed_full = {
        ('category', ()): ['cat'],
        ('datetime', ()): ['datetime'],
        ('float', ()): ['obj'],
        ('int', ()): ['int_renamed'],
        ('object', ()): ['float'],
        ('object', ('datetime_as_object', )): ['datetime_as_object'],
        ('object', ('text', )): ['text_renamed']
    }

    expected_feature_metadata_recombined_full_full = {
        ('category', ()): ['cat'],
        ('custom_raw_type', ('custom_special_type', )): ['new_feature'],
        ('datetime', ()): ['datetime'],
        ('float', ()): ['float'],
        ('int', ('custom_special_type', )): ['int'],
        ('object', ()): ['obj'],
        ('object', ('datetime_as_object', )): ['datetime_as_object'],
        ('object', ('text', )): ['text']
    }

    # When
    feature_metadata = FeatureMetadata.from_df(input_data)
    feature_metadata_renamed = feature_metadata.rename_features(
        rename_map={
            'text': 'text_renamed',
            'int': 'int_renamed',
            'obj': 'float',
            'float': 'obj'
        })
    feature_metadata_remove = feature_metadata.remove_features(
        features=['text', 'obj', 'float'])
    feature_metadata_keep = feature_metadata.keep_features(
        features=['text', 'obj', 'float'])
    feature_metadata_custom = FeatureMetadata(
        type_map_raw={
            'int': 'int',
            'new_feature': 'custom_raw_type'
        },
        type_group_map_special={'custom_special_type': ['int', 'new_feature']})
    feature_metadata_recombined = feature_metadata_keep.join_metadata(
        feature_metadata_remove)
    feature_metadata_recombined_alternate = FeatureMetadata.join_metadatas(
        metadata_list=[feature_metadata_keep, feature_metadata_remove])
    feature_metadata_recombined_full = FeatureMetadata.join_metadatas(
        metadata_list=[
            feature_metadata_keep, feature_metadata_remove,
            feature_metadata_custom
        ],
        shared_raw_features='error_if_diff')

    # Therefore
    with pytest.raises(AssertionError):
        # Error because special contains feature not in raw
        FeatureMetadata(type_map_raw={'int': 'int'},
                        type_group_map_special={
                            'custom_special_type': ['int', 'new_feature']
                        })
    with pytest.raises(AssertionError):
        # Error because renaming to another existing feature without also renaming that feature
        feature_metadata.rename_features(rename_map={'text': 'obj'})
    with pytest.raises(KeyError):
        # Error if removing unknown feature
        feature_metadata_remove.remove_features(features=['text'])
    with pytest.raises(KeyError):
        # Error if getting unknown feature type
        feature_metadata_remove.get_feature_type_raw('text')
    with pytest.raises(KeyError):
        # Error if getting unknown feature type
        feature_metadata_remove.get_feature_types_special('text')
    with pytest.raises(AssertionError):
        # Error because feature_metadata_remove and feature_metadata_custom share a raw feature
        FeatureMetadata.join_metadatas(metadata_list=[
            feature_metadata_keep, feature_metadata_remove,
            feature_metadata_custom
        ])

    assert feature_metadata.to_dict(
        inverse=True) == expected_feature_metadata_full
    assert feature_metadata.get_features(
    ) == expected_feature_metadata_get_features
    assert feature_metadata.type_map_raw == expected_type_map_raw
    assert dict(feature_metadata.type_group_map_special
                ) == expected_type_group_map_special

    assert feature_metadata.get_feature_type_raw('text') == 'object'
    assert feature_metadata.get_feature_types_special('text') == ['text']
    assert feature_metadata.get_feature_type_raw('int') == 'int'
    assert feature_metadata.get_feature_types_special('int') == []
    assert feature_metadata_recombined_full.get_feature_types_special(
        'int') == ['custom_special_type']
    assert feature_metadata_recombined_full.get_feature_type_raw(
        'new_feature') == 'custom_raw_type'

    assert feature_metadata_renamed.to_dict(
        inverse=True) == expected_feature_metadata_renamed_full
    assert feature_metadata_recombined.to_dict() == feature_metadata.to_dict()
    assert feature_metadata_recombined_alternate.to_dict(
    ) == feature_metadata.to_dict()
    assert feature_metadata_recombined_full.to_dict(
        inverse=True) == expected_feature_metadata_recombined_full_full
예제 #15
0
class PipelineFeatureGenerator(BulkFeatureGenerator):
    """
    PipelineFeatureGenerator is an implementation of BulkFeatureGenerator with various smart defaults and edge case handling functionality to enable robust data handling.
    It is recommended that users base any custom feature generators meant for end-to-end data transformation from PipelineFeatureGenerator.
        Reference AutoMLPipelineFeatureGenerator for an example of extending PipelineFeatureGenerator.
    It is not recommended that PipelineFeatureGenerator be used as a generator within any other generator's pre or post generators.
    """
    def __init__(self, pre_generators=None, post_generators=None, pre_drop_useless=True, pre_enforce_types=True, reset_index=True, verbosity=3, **kwargs):
        if pre_generators is None:
            pre_generators = [FillNaFeatureGenerator(inplace=True)]
        if post_generators is None:
            post_generators = [DropUniqueFeatureGenerator()]

        super().__init__(pre_generators=pre_generators, post_generators=post_generators, pre_drop_useless=pre_drop_useless, pre_enforce_types=pre_enforce_types, reset_index=reset_index, verbosity=verbosity, **kwargs)

        self._feature_metadata_in_real: FeatureMetadata = None  # FeatureMetadata object based on the original input features real dtypes (will contain dtypes such as 'int16' and 'float32' instead of 'int' and 'float').

        self._is_dummy = False  # If True, returns a single dummy feature as output. Occurs if fit with no useful features.

        self.pre_memory_usage = None
        self.pre_memory_usage_per_row = None
        self.post_memory_usage = None
        self.post_memory_usage_per_row = None

    def fit_transform(self, X: DataFrame, y=None, feature_metadata_in: FeatureMetadata = None, **kwargs) -> DataFrame:
        X_out = super().fit_transform(X=X, y=y, feature_metadata_in=feature_metadata_in, **kwargs)
        self._compute_post_memory_usage(X_out)
        # TODO: Consider adding final check of validity/that features are reasonable.

        return X_out

    def _fit_transform(self, X: DataFrame, y=None, **kwargs):
        X_out, type_group_map_special = super()._fit_transform(X=X, y=y, **kwargs)
        X_out, type_group_map_special = self._fit_transform_custom(X_out=X_out, type_group_map_special=type_group_map_special, y=y)
        return X_out, type_group_map_special

    def _fit_transform_custom(self, X_out: DataFrame, type_group_map_special: dict, y=None) -> (DataFrame, dict):
        if len(list(X_out.columns)) == 0:
            self._is_dummy = True
            self._log(30, f'\tWARNING: No useful features were detected in the data! AutoGluon will train using 0 features, and will always predict the same value. Ensure that you are passing the correct data to AutoGluon!')
            dummy_generator = DummyFeatureGenerator()
            X_out = dummy_generator.fit_transform(X=X_out)
            type_group_map_special = copy.deepcopy(dummy_generator.feature_metadata.type_group_map_special)
            self.generators = [[dummy_generator]]
            self._remove_features_in(features=self.features_in)
        return X_out, type_group_map_special

    def _infer_features_in_full(self, X: DataFrame, feature_metadata_in: FeatureMetadata = None):
        super()._infer_features_in_full(X=X, feature_metadata_in=feature_metadata_in)
        type_map_real = get_type_map_real(X[self.feature_metadata_in.get_features()])
        self._feature_metadata_in_real = FeatureMetadata(type_map_raw=type_map_real, type_group_map_special=self.feature_metadata_in.get_type_group_map_raw())

    def _remove_features_in(self, features: list):
        super()._remove_features_in(features)
        if features:
            self._feature_metadata_in_real = self._feature_metadata_in_real.remove_features(features=features)

    def _pre_fit_validate(self, X: DataFrame, **kwargs):
        super()._pre_fit_validate(X=X, **kwargs)
        self._ensure_no_duplicate_column_names(X=X)  # TODO: Remove this, move pre_memory_usage and post_memory_usage into super().
        self._compute_pre_memory_usage(X)

    def _compute_pre_memory_usage(self, X: DataFrame):
        X_len = len(X)
        self.pre_memory_usage = get_approximate_df_mem_usage(X, sample_ratio=0.2).sum()
        self.pre_memory_usage_per_row = self.pre_memory_usage / X_len
        available_mem = psutil.virtual_memory().available
        pre_memory_usage_percent = self.pre_memory_usage / (available_mem + self.pre_memory_usage)
        self._log(20, f'\tAvailable Memory:                    {(round((self.pre_memory_usage + available_mem) / 1e6, 2))} MB')
        self._log(20, f'\tTrain Data (Original)  Memory Usage: {round(self.pre_memory_usage / 1e6, 2)} MB ({round(pre_memory_usage_percent * 100, 1)}% of available memory)')
        if pre_memory_usage_percent > 0.05:
            self._log(30, f'\tWarning: Data size prior to feature transformation consumes {round(pre_memory_usage_percent * 100, 1)}% of available memory. Consider increasing memory or subsampling the data to avoid instability.')

    def _compute_post_memory_usage(self, X: DataFrame):
        X_len = len(X)
        self.post_memory_usage = get_approximate_df_mem_usage(X, sample_ratio=0.2).sum()
        self.post_memory_usage_per_row = self.post_memory_usage / X_len

        available_mem = psutil.virtual_memory().available
        post_memory_usage_percent = self.post_memory_usage / (available_mem + self.post_memory_usage + self.pre_memory_usage)
        self._log(20, f'\tTrain Data (Processed) Memory Usage: {round(self.post_memory_usage / 1e6, 2)} MB ({round(post_memory_usage_percent * 100, 1)}% of available memory)')
        if post_memory_usage_percent > 0.15:
            self._log(30, f'\tWarning: Data size post feature transformation consumes {round(post_memory_usage_percent * 100, 1)}% of available memory. Consider increasing memory or subsampling the data to avoid instability.')

    def print_feature_metadata_info(self, log_level=20):
        if self._useless_features_in:
            self._log(log_level, f'\tUseless Original Features (Count: {len(self._useless_features_in)}): {list(self._useless_features_in)}')
            self._log(log_level, f'\t\tThese features carry no predictive signal and should be manually investigated.')  # TODO: What about features with 1 unique value but also np.nan?
            self._log(log_level, f'\t\tThis is typically a feature which has the same value for all rows.')
            self._log(log_level, f'\t\tThese features do not need to be present at inference time.')
        if self._feature_metadata_in_unused.get_features():
            # TODO: Consider highlighting why a feature was unused (complex to implement, can check if was valid input to any generator in a generator group through feature chaining)
            self._log(log_level, f'\tUnused Original Features (Count: {len(self._feature_metadata_in_unused.get_features())}): {self._feature_metadata_in_unused.get_features()}')
            self._log(log_level, f'\t\tThese features were not used to generate any of the output features. Add a feature generator compatible with these features to utilize them.')
            self._log(log_level, f'\t\tFeatures can also be unused if they carry very little information, such as being categorical but having almost entirely unique values or being duplicates of other features.')
            self._log(log_level, f'\t\tThese features do not need to be present at inference time.')
            self._feature_metadata_in_unused.print_feature_metadata_full(self.log_prefix + '\t\t', log_level=log_level)
        self._log(log_level-5, '\tTypes of features in original data (exact raw dtype, raw dtype):')
        self._feature_metadata_in_real.print_feature_metadata_full(self.log_prefix + '\t\t', print_only_one_special=True, log_level=log_level-5)
        super().print_feature_metadata_info(log_level=log_level)
class AbstractFeatureGenerator:
    """
    Abstract feature generator implementation from which all AutoGluon feature generators inherit.
    The purpose of a feature generator is to transform data from one form to another in a stateful manner.
    First, the generator is initialized with various arguments that dictate the way features are generated.
    Then, the generator is fit through either the `.fit()` or `.fit_transform()` methods using training data typically in pandas DataFrame format.
    Finally, the generator can transform new data with the same initial format as the training data through the `.transform()` method.

    Parameters
    ----------
    features_in : list, default None
        List of feature names the generator will expect and use in the fit and transform methods.
        Any feature in an incoming DataFrame that is not present in features_in is dropped and will not influence the transformation logic.
        If None, infer during fit from the _infer_features_in method.
        Equivalent to feature_metadata_in.get_features() post-fit.
    feature_metadata_in : :class:`autogluon.core.features.feature_metadata.FeatureMetadata`, default None
        :class:`FeatureMetadata` object corresponding to the training data input features.
        If None, infer during fit from the _infer_feature_metadata_in method.
        Any features not present in features_in (if provided) will be removed from feature_metadata_in.
    post_generators : list of FeatureGenerators, default None
        FeatureGenerators which will fit and transform sequentially after this object's transformation logic, feeding their output into the next generator's input.
        The output of the final FeatureGenerator will be the used as the transformed output.
    pre_enforce_types : bool, default False
        If True, the exact raw types (int64, float32, etc.) of the training data will be enforced on future data, either converting the types to the training types or raising an exception if unable.
        This is important to set to True on the outer feature generator in a feature generation pipeline to ensure incorrect dtypes are not passed downstream, but is often redundant when used on inner feature generators inside a pipeline.
    pre_drop_useless : bool, default False
        If True, features_in will be pruned at fit time of features containing only a single unique value across all rows.
    post_drop_duplicates : bool, default False
        If True, a :class:`DropDuplicatesFeatureGenerator` will be appended to post_generators.
        This feature generator will drop any duplicate features found in the data, keeping only one feature within any duplicate feature sets.
        Warning: For large datasets with many features, this may be very computationally expensive or even computationally infeasible.
    reset_index : bool, default False
        If True, for the duration of fit and transform, the input data's index is reset to be monotonically increasing from 0 to N-1 for a dataset of N rows.
        At the end of fit and transform, the original index is re-applied to the output data.
        This is important to set to True on the outer feature generator in a feature generation pipeline to ensure that a non-default index does not cause corruption of the inner feature generation if any inner feature generator does not properly handle non-default indices.
        This index reset is also applied to the y label data if provided during fit.
    column_names_as_str : bool, default True
        If True, the column names of the input data are converted to string if they were not already.
        This solves any issues related to downstream FeatureGenerators and models which cannot handle integer column names, and allows column name prefix and suffix operations to avoid errors.
        Note that for performance purposes, column names are only converted at transform time if they were not strings at fit time. Ensure consistent column names as input to avoid errors.
    name_prefix : str, default None
        Name prefix to add to all output feature names.
    name_suffix : str, default None
        Name suffix to add to all output feature names.
    infer_features_in_args : dict, default None
        Used as the kwargs input to FeatureMetadata.get_features(**kwargs) when inferring self.features_in.
        This is merged with the output dictionary of self.get_default_infer_features_in_args() depending on the value of infer_features_in_args_strategy.
        Only used when features_in is None.
        If None, then self.get_default_infer_features_in_args() is used directly.
        Refer to FeatureMetadata.get_features documentation for a full description of valid keys.
        Note: This is advanced functionality that is not necessary for most situations.
    infer_features_in_args_strategy : str, default 'overwrite'
        Determines how infer_features_in_args and self.get_default_infer_features_in_args() are combined to result in self._infer_features_in_args which dictates the features_in inference logic.
        If 'overwrite': infer_features_in_args is used exclusively and self.get_default_infer_features_in_args() is ignored.
        If 'update': self.get_default_infer_features_in_args() is dictionary updated by infer_features_in_args.
        If infer_features_in_args is None, this is ignored.
    log_prefix : str, default ''
        Prefix string added to all logging statements made by the generator.
    verbosity : int, default 2
        Controls the verbosity of logging.
        0 will silence logs, 1 will only log warnings, 2 will log info level information, and 3 will log info level information and provide detailed feature type input and output information.
        Logging is still controlled by the global logger configuration, and therefore a verbosity of 3 does not guarantee that logs will be output.

    Attributes
    ----------
    features_in : list of str
        List of feature names the generator will expect and use in the fit and transform methods.
        Equivalent to feature_metadata_in.get_features() post-fit.
    features_out : list of str
        List of feature names present in the output of fit_transform and transform methods.
        Equivalent to feature_metadata.get_features() post-fit.
    feature_metadata_in : FeatureMetadata
        The FeatureMetadata of data pre-transformation (data used as input to fit and transform methods).
    feature_metadata : FeatureMetadata
        The FeatureMetadata of data post-transformation (data outputted by fit_transform and transform methods).
    feature_metadata_real : FeatureMetadata
        The FeatureMetadata of data post-transformation consisting of the exact dtypes as opposed to the grouped raw dtypes found in feature_metadata_in, with grouped raw dtypes substituting for the special dtypes.
        This is only used in the print_feature_metadata_info method and is intended for introspection. It can be safely set to None to reduce memory and disk usage post-fit.
    """
    def __init__(
        self,
        features_in: list = None,
        feature_metadata_in: FeatureMetadata = None,
        post_generators: list = None,
        pre_enforce_types=False,
        pre_drop_useless=False,
        post_drop_duplicates=False,
        reset_index=False,
        column_names_as_str=True,
        name_prefix: str = None,
        name_suffix: str = None,
        infer_features_in_args: dict = None,
        infer_features_in_args_strategy='overwrite',
        log_prefix='',
        verbosity=2
    ):
        self._is_fit = False  # Whether the feature generator has been fit
        self.features_in = features_in  # Original features to use as input to feature generation
        self.features_out = None  # Final list of features after transformation
        self.feature_metadata_in: FeatureMetadata = feature_metadata_in  # FeatureMetadata object based on the original input features.
        self.feature_metadata: FeatureMetadata = None  # FeatureMetadata object based on the processed features. Pass to models to enable advanced functionality.
        # TODO: Consider merging feature_metadata and feature_metadata_real, have FeatureMetadata contain exact dtypes, grouped raw dtypes, and special dtypes all at once.
        self.feature_metadata_real: FeatureMetadata = None  # FeatureMetadata object based on the processed features, containing the true raw dtype information (such as int32, float64, etc.). Pass to models to enable advanced functionality.
        self._feature_metadata_before_post = None  # FeatureMetadata directly prior to applying self._post_generators.
        self._infer_features_in_args = self.get_default_infer_features_in_args()
        if infer_features_in_args is not None:
            if infer_features_in_args_strategy == 'overwrite':
                self._infer_features_in_args = copy.deepcopy(infer_features_in_args)
            elif infer_features_in_args_strategy == 'update':
                self._infer_features_in_args.update(infer_features_in_args)
            else:
                raise ValueError(f"infer_features_in_args_strategy must be one of: {['overwrite', 'update']}, but was: '{infer_features_in_args_strategy}'")

        if post_generators is None:
            post_generators = []
        elif not isinstance(post_generators, list):
            post_generators = [post_generators]
        self._post_generators: list = post_generators
        if post_drop_duplicates:
            from .drop_duplicates import DropDuplicatesFeatureGenerator
            self._post_generators.append(DropDuplicatesFeatureGenerator(post_drop_duplicates=False))
        if name_prefix or name_suffix:
            from .rename import RenameFeatureGenerator
            self._post_generators.append(RenameFeatureGenerator(name_prefix=name_prefix, name_suffix=name_suffix, inplace=True))

        if self._post_generators:
            if not self.get_tags().get('allow_post_generators', True):
                raise AssertionError(f'{self.__class__.__name__} is not allowed to have post_generators, but found: {[generator.__class__.__name__ for generator in self._post_generators]}')

        self.pre_enforce_types = pre_enforce_types
        self._pre_astype_generator = None
        self.pre_drop_useless = pre_drop_useless
        self.reset_index = reset_index
        self.column_names_as_str = column_names_as_str
        self._useless_features_in: list = None

        self._is_updated_name = False  # If feature names have been altered by name_prefix or name_suffix

        self.log_prefix = log_prefix
        self.verbosity = verbosity

        self.fit_time = None

    def fit(self, X: DataFrame, **kwargs):
        """
        Fit generator to the provided data.
        Because of how the generators track output features and types, it is generally required that the data be transformed during fit, so the fit function is rarely useful to implement beyond a simple call to fit_transform.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.
        **kwargs
            Any additional arguments that a particular generator implementation could use.
            See fit_transform method for common kwargs values.
        """
        self.fit_transform(X, **kwargs)

    def fit_transform(self, X: DataFrame, y: Series = None, feature_metadata_in: FeatureMetadata = None, **kwargs) -> DataFrame:
        """
        Fit generator to the provided data and return the transformed version of the data as if fit and transform were called sequentially with the same data.
        This is generally more efficient than calling fit and transform separately and can be up to twice as fast if the fit process requires transformation of the data.
        This cannot be called after the generator has been fit, and will result in an AssertionError.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.
        y : Series, optional
            Input data's labels used to fit the generator. Most generators do not utilize labels.
            y.index must be equal to X.index to avoid misalignment.
        feature_metadata_in : FeatureMetadata, optional
            Identical to providing feature_metadata_in during generator initialization. Ignored if self.feature_metadata_in is already specified.
            If neither are set, feature_metadata_in will be inferred from the _infer_feature_metadata_in method.
        **kwargs
            Any additional arguments that a particular generator implementation could use. Passed to _fit_transform and _fit_generators methods.

        Returns
        -------
        X_out : DataFrame object which is the transformed version of the input data X.

        """
        start_time = time.time()
        self._log(20, f'Fitting {self.__class__.__name__}...')
        if self._is_fit:
            raise AssertionError(f'{self.__class__.__name__} is already fit.')
        self._pre_fit_validate(X=X, y=y, feature_metadata_in=feature_metadata_in, **kwargs)

        if self.reset_index:
            X_index = copy.deepcopy(X.index)
            X = X.reset_index(drop=True)  # TODO: Theoretically inplace=True avoids data copy, but can lead to altering of original DataFrame outside of method context.
            if y is not None and isinstance(y, Series):
                y = y.reset_index(drop=True)  # TODO: this assumes y and X had matching indices prior
        else:
            X_index = None
        if self.column_names_as_str:
            columns_orig = list(X.columns)
            X.columns = X.columns.astype(str)  # Ensure all column names are strings
            columns_new = list(X.columns)
            if columns_orig != columns_new:
                rename_map = {orig: new for orig, new in zip(columns_orig, columns_new)}
                if feature_metadata_in is not None:
                    feature_metadata_in.rename_features(rename_map=rename_map)
                self._rename_features_in(rename_map)
            else:
                self.column_names_as_str = False  # Columns were already string, so don't do conversion. Better to error if they change types at inference.
        self._ensure_no_duplicate_column_names(X=X)
        self._infer_features_in_full(X=X, feature_metadata_in=feature_metadata_in)
        if self.pre_drop_useless:
            self._useless_features_in = self._get_useless_features(X)
            if self._useless_features_in:
                self._remove_features_in(self._useless_features_in)
        if self.pre_enforce_types:
            from .astype import AsTypeFeatureGenerator
            self._pre_astype_generator = AsTypeFeatureGenerator(features_in=self.features_in, feature_metadata_in=self.feature_metadata_in, log_prefix=self.log_prefix + '\t')
            self._pre_astype_generator.fit(X)

        # TODO: Add option to return feature_metadata instead to avoid data copy
        #  If so, consider adding validation step to check that X_out matches the feature metadata, error/warning if not
        X_out, type_family_groups_special = self._fit_transform(X[self.features_in], y=y, **kwargs)

        type_map_raw = get_type_map_raw(X_out)
        self._feature_metadata_before_post = FeatureMetadata(type_map_raw=type_map_raw, type_group_map_special=type_family_groups_special)
        if self._post_generators:
            X_out, self.feature_metadata, self._post_generators = self._fit_generators(X=X_out, y=y, feature_metadata=self._feature_metadata_before_post, generators=self._post_generators, **kwargs)
        else:
            self.feature_metadata = self._feature_metadata_before_post
        type_map_real = get_type_map_real(X_out)
        self.features_out = list(X_out.columns)
        self.feature_metadata_real = FeatureMetadata(type_map_raw=type_map_real, type_group_map_special=self.feature_metadata.get_type_group_map_raw())

        self._post_fit_cleanup()
        if self.reset_index:
            X_out.index = X_index
        self._is_fit = True
        end_time = time.time()
        self.fit_time = end_time - start_time
        if self.verbosity >= 3:
            self.print_feature_metadata_info(log_level=20)
            self.print_generator_info(log_level=20)
        elif self.verbosity == 2:
            self.print_feature_metadata_info(log_level=15)
            self.print_generator_info(log_level=15)
        return X_out

    def transform(self, X: DataFrame) -> DataFrame:
        """
        Transforms input data into the output data format.
        Will raise an AssertionError if called before the generator has been fit using fit or fit_transform methods.

        Parameters
        ----------
        X : DataFrame
            Input data to be transformed by the generator.
            Input data must contain all features in features_in, and should have the same dtypes as in the data provided to fit.
            Extra columns present in X that are not in features_in will be ignored and not affect the output.

        Returns
        -------
        X_out : DataFrame object which is the transformed version of the input data X.
        """
        if not self._is_fit:
            raise AssertionError(f'{self.__class__.__name__} is not fit.')
        if self.reset_index:
            X_index = copy.deepcopy(X.index)
            X = X.reset_index(drop=True)  # TODO: Theoretically inplace=True avoids data copy, but can lead to altering of original DataFrame outside of method context.
        else:
            X_index = None
        if self.column_names_as_str:
            X.columns = X.columns.astype(str)  # Ensure all column names are strings
        try:
            X = X[self.features_in]
        except KeyError:
            missing_cols = []
            for col in self.features_in:
                if col not in X.columns:
                    missing_cols.append(col)
            raise KeyError(f'{len(missing_cols)} required columns are missing from the provided dataset to transform using {self.__class__.__name__}. Missing columns: {missing_cols}')
        if self._pre_astype_generator:
            X = self._pre_astype_generator.transform(X)
        X_out = self._transform(X)
        if self._post_generators:
            X_out = self._transform_generators(X=X_out, generators=self._post_generators)
        if self.reset_index:
            X_out.index = X_index
        return X_out

    def _fit_transform(self, X: DataFrame, y: Series, **kwargs) -> (DataFrame, dict):
        """
        Performs the inner fit_transform logic that is non-generic (specific to the generator implementation).
        When creating a new generator class, this should be implemented.
        At the point this method is called, self.features_in and self.features_metadata_in will be set, and can be accessed and altered freely.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.
            This data will have already been limited to only the columns present in self.features_in.
            This data may have been altered by the fit_transform method prior to entering _fit_transform in a variety of ways, but self.features_in and self.features_metadata_in will correctly correspond to X at this point in the generator's fit process.
        y : Series, optional
            Input data's labels used to fit the generator. Most generators do not utilize labels.
            y.index is always equal to X.index.
        **kwargs
            Any additional arguments that a particular generator implementation could use. Received from the fit_transform method.

        Returns
        -------
        (X_out : DataFrame, type_group_map_special : dict)
            X_out is the transformed version of the input data X
            type_group_map_special is the type_group_map_special value of X_out's intended FeatureMetadata object.
                If special types are not relevant to the generator, this can simply be dict()
                If the input and output features are identical in name and type, it may be valid to return self.feature_metadata_in.type_group_map_special to maintain any pre-existing special type information.
                Refer to existing generator implementations for guidance on setting the dict output of _fit_transform.

        """
        raise NotImplementedError

    def _transform(self, X: DataFrame) -> DataFrame:
        """
        Performs the inner transform logic that is non-generic (specific to the generator implementation).
        When creating a new generator class, this should be implemented.
        At the point this method is called, self.features_in and self.features_metadata_in will be set, and can be accessed freely.

        Parameters
        ----------
        X : DataFrame
            Input data to be transformed by the generator.
            This data will have already been limited to only the columns present in self.features_in.
            This data may have been altered by the transform method prior to entering _transform in a variety of ways, but self.features_in and self.features_metadata_in will correctly correspond to X at this point in the generator's transform process.

        Returns
        -------
        X_out : DataFrame object which is the transformed version of the input data X.
        """
        raise NotImplementedError

    def _infer_features_in_full(self, X: DataFrame, feature_metadata_in: FeatureMetadata = None):
        """
        Infers all input related feature information of X.
        This can be extended when additional input information is desired beyond feature_metadata_in and features_in.
            For example, AsTypeFeatureGenerator extends this method to also compute the exact raw feature types of the input for later use.
        After this method returns, self.features_in and self.feature_metadata_in will be set to proper values.
        This method is called by fit_transform prior to calling _fit_transform.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.
        feature_metadata_in : FeatureMetadata, optional
            If passed, then self.feature_metadata_in will be set to feature_metadata_in assuming self.feature_metadata_in was None prior.
            If both are None, then self.feature_metadata_in is inferred through _infer_feature_metadata_in(X)
        """
        if self.feature_metadata_in is None:
            self.feature_metadata_in = feature_metadata_in
        elif feature_metadata_in is not None:
            self._log(30, '\tWarning: feature_metadata_in passed as input to fit_transform, but self.feature_metadata_in was already set. Ignoring feature_metadata_in.')
        if self.feature_metadata_in is None:
            self._log(20, f'\tInferring data type of each feature based on column values. Set feature_metadata_in to manually specify special dtypes of the features.')
            self.feature_metadata_in = self._infer_feature_metadata_in(X=X)
        if self.features_in is None:
            self.features_in = self._infer_features_in(X=X)
            self.features_in = [feature for feature in self.features_in if feature in X.columns]
        self.feature_metadata_in = self.feature_metadata_in.keep_features(features=self.features_in)

    # TODO: Find way to increase flexibility here, possibly through init args
    def _infer_features_in(self, X: DataFrame) -> list:
        """
        Infers the features_in of X.
        This is used if features_in was not provided by the user prior to fit.
        This can be overwritten in a new generator to use new infer logic.
        self.feature_metadata_in is available at the time this method is called.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.

        Returns
        -------
        feature_in : list of str feature names inferred from X.
        """
        return self.feature_metadata_in.get_features(**self._infer_features_in_args)

    # TODO: Use code from problem type detection for column types. Ints/Floats could be Categorical through this method. Maybe try both?
    @staticmethod
    def _infer_feature_metadata_in(X: DataFrame) -> FeatureMetadata:
        """
        Infers the feature_metadata_in of X.
        This is used if feature_metadata_in was not provided by the user prior to fit.
        This can be overwritten in a new generator to use new infer logic, but it is preferred to keep the default logic for consistency with other generators.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.

        Returns
        -------
        feature_metadata_in : FeatureMetadata object inferred from X.
        """
        type_map_raw = get_type_map_raw(X)
        type_group_map_special = get_type_group_map_special(X)
        return FeatureMetadata(type_map_raw=type_map_raw, type_group_map_special=type_group_map_special)

    @staticmethod
    def get_default_infer_features_in_args() -> dict:
        raise NotImplementedError

    def _fit_generators(self, X, y, feature_metadata, generators: list, **kwargs) -> (DataFrame, FeatureMetadata, list):
        """
        Fit a list of AbstractFeatureGenerator objects in sequence, with the output of generators[i] fed as the input to generators[i+1]
        This is called to sequentially fit self._post_generators generators on the output of _fit_transform to obtain the final output of the generator.
        This should not be overwritten by implementations of AbstractFeatureGenerator.
        """
        for generator in generators:
            generator.verbosity = min(self.verbosity, generator.verbosity)
            generator.set_log_prefix(log_prefix=self.log_prefix + '\t', prepend=True)
            X = generator.fit_transform(X=X, y=y, feature_metadata_in=feature_metadata, **kwargs)
            feature_metadata = generator.feature_metadata
        return X, feature_metadata, generators

    @staticmethod
    def _transform_generators(X, generators: list) -> DataFrame:
        """
        Transforms X through a list of AbstractFeatureGenerator objects in sequence, with the output of generators[i] fed as the input to generators[i+1]
        This is called to sequentially transform self._post_generators generators on the output of _transform to obtain the final output of the generator.
        This should not be overwritten by implementations of AbstractFeatureGenerator.
        """
        for generator in generators:
            X = generator.transform(X=X)
        return X

    def _remove_features_in(self, features: list):
        """
        Removes features from all relevant objects which represent the content of the input data or how the input features are used.
        For example, DropDuplicatesFeatureGenerator calls this method during _fit_transform with the list of duplicate features.
            This allows DropDuplicatesFeatureGenerator's _transform method to simply return X, as the duplicate features are already dropped in the transform method due to not being in self.features_in.

        Parameters
        ----------
        features : list of str
            List of feature names to remove from the expected input.
        """
        if features:
            if self._feature_metadata_before_post:
                feature_links_chain = self.get_feature_links_chain()
                for feature in features:
                    feature_links_chain[0].pop(feature)
                features_to_keep = set()
                for features_out in feature_links_chain[0].values():
                    features_to_keep = features_to_keep.union(features_out)
                self._feature_metadata_before_post = self._feature_metadata_before_post.keep_features(features_to_keep)

            self.feature_metadata_in = self.feature_metadata_in.remove_features(features=features)
            self.features_in = self.feature_metadata_in.get_features()
            if self._pre_astype_generator:
                self._pre_astype_generator._remove_features_out(features)

    # TODO: Ensure arbitrary feature removal does not result in inconsistencies (add unit test)
    def _remove_features_out(self, features: list):
        """
        Removes features from the output data.
        This is used for cleaning complex pipelines of unnecessary operations after fitting a sequence of generators.
        Implementations of AbstractFeatureGenerator should not need to alter this method.

        Parameters
        ----------
        features : list of str
            List of feature names to remove from the output of self.transform().
        """
        feature_links_chain = self.get_feature_links_chain()
        if features:
            self.feature_metadata = self.feature_metadata.remove_features(features=features)
            self.feature_metadata_real = self.feature_metadata_real.remove_features(features=features)
            self.features_out = self.feature_metadata.get_features()
            feature_links_chain[-1] = {feature_in: [feature_out for feature_out in features_out if feature_out not in features] for feature_in, features_out in feature_links_chain[-1].items()}
        self._remove_unused_features(feature_links_chain=feature_links_chain)

    def _remove_unused_features(self, feature_links_chain):
        unused_features = self._get_unused_features(feature_links_chain=feature_links_chain)
        self._remove_features_in(features=unused_features[0])
        for i, generator in enumerate(self._post_generators):
            for feature in unused_features[i + 1]:
                if feature in feature_links_chain[i + 1]:
                    feature_links_chain[i + 1].pop(feature)
            generated_features = set()
            for feature_in in feature_links_chain[i + 1]:
                generated_features = generated_features.union(feature_links_chain[i + 1][feature_in])
            features_out_to_remove = [feature for feature in generator.features_out if feature not in generated_features]
            generator._remove_features_out(features_out_to_remove)

    def _rename_features_in(self, column_rename_map: dict):
        if self.feature_metadata_in is not None:
            self.feature_metadata_in = self.feature_metadata_in.rename_features(column_rename_map)
        if self.features_in is not None:
            self.features_in = [column_rename_map.get(col, col) for col in self.features_in]

    def _pre_fit_validate(self, X: DataFrame, y: Series, **kwargs):
        """
        Any data validation checks prior to fitting the data should be done here.
        """
        if y is not None and isinstance(y, Series):
            if list(y.index) != list(X.index):
                raise AssertionError(f'y.index and X.index must be equal when fitting {self.__class__.__name__}, but they differ.')

    def _post_fit_cleanup(self):
        """
        Any cleanup operations after all metadata objects have been constructed, but prior to feature renaming, should be done here.
        This includes removing keys from internal lists and dictionaries of features which have been removed, and deletion of any temp variables.
        """
        pass

    def _ensure_no_duplicate_column_names(self, X: DataFrame):
        if len(X.columns) != len(set(X.columns)):
            count_dict = defaultdict(int)
            invalid_columns = []
            for column in list(X.columns):
                count_dict[column] += 1
            for column in count_dict:
                if count_dict[column] > 1:
                    invalid_columns.append(column)
            raise AssertionError(f'Columns appear multiple times in X. Columns must be unique. Invalid columns: {invalid_columns}')

    # TODO: Move to a generator
    @staticmethod
    def _get_useless_features(X: DataFrame) -> list:
        useless_features = []
        for column in X:
            if is_useless_feature(X[column]):
                useless_features.append(column)
        return useless_features

    # TODO: Consider adding _log and verbosity methods to mixin
    def set_log_prefix(self, log_prefix, prepend=False):
        if prepend:
            self.log_prefix = log_prefix + self.log_prefix
        else:
            self.log_prefix = log_prefix

    def set_verbosity(self, verbosity: int):
        self.verbosity = verbosity

    def _log(self, level, msg, log_prefix=None, verb_min=None):
        if self.verbosity == 0:
            return
        if verb_min is None or self.verbosity >= verb_min:
            if log_prefix is None:
                log_prefix = self.log_prefix
            logger.log(level, f'{log_prefix}{msg}')

    def is_fit(self):
        return self._is_fit

    # TODO: Handle cases where self.features_in or self.feature_metadata_in was already set at init.
    def is_valid_metadata_in(self, feature_metadata_in: FeatureMetadata):
        """
        True if input data with feature metadata of feature_metadata_in could result in non-empty output.
            This is dictated by `feature_metadata_in.get_features(**self._infer_features_in_args)` not being empty.
        False if the features represented in feature_metadata_in do not contain any usable types for the generator.
            For example, if only numeric features are passed as input to TextSpecialFeatureGenerator which requires text input features, this will return False.
            However, if both numeric and text features are passed, this will return True since the text features would be valid input (the numeric features would simply be dropped).
        """
        features_in = feature_metadata_in.get_features(**self._infer_features_in_args)
        if features_in:
            return True
        else:
            return False

    def get_feature_links(self) -> Dict[str, List[str]]:
        """Returns feature links including all pre and post generators."""
        return self._get_feature_links_from_chain(self.get_feature_links_chain())

    def _get_feature_links(self, features_in: List[str], features_out: List[str]) -> Dict[str, List[str]]:
        """Returns feature links ignoring all pre and post generators."""
        feature_links = {}
        if self.get_tags().get('feature_interactions', True):
            for feature_in in features_in:
                feature_links[feature_in] = features_out
        else:
            for feat_old, feat_new in zip(features_in, features_out):
                feature_links[feat_old] = feature_links.get(feat_old, []) + [feat_new]
        return feature_links

    def get_feature_links_chain(self) -> List[Dict[str, List[str]]]:
        """Get the feature dependence chain between this generator and all of its post generators."""
        features_out_internal = self._feature_metadata_before_post.get_features()

        generators = [self] + self._post_generators
        features_in_list = [self.features_in] + [generator.features_in for generator in self._post_generators]
        features_out_list = [features_out_internal] + [generator.features_out for generator in self._post_generators]

        feature_links_chain = []
        for i in range(len(features_in_list)):
            generator = generators[i]
            features_in = features_in_list[i]
            features_out = features_out_list[i]
            feature_chain = generator._get_feature_links(features_in=features_in, features_out=features_out)
            feature_links_chain.append(feature_chain)
        return feature_links_chain

    @staticmethod
    def _get_feature_links_from_chain(feature_links_chain: List[Dict[str, List[str]]]) -> Dict[str, List[str]]:
        """Get the final input and output feature links by travelling the feature link chain"""
        features_out = []
        for val in feature_links_chain[-1].values():
            if val not in features_out:
                features_out.append(val)
        features_in = list(feature_links_chain[0].keys())
        feature_links = feature_links_chain[0]
        for i in range(1, len(feature_links_chain)):
            feature_links_new = {}
            for feature in features_in:
                feature_links_new[feature] = set()
                for feature_out in feature_links[feature]:
                    feature_links_new[feature] = feature_links_new[feature].union(feature_links_chain[i].get(feature_out, []))
                feature_links_new[feature] = list(feature_links_new[feature])
            feature_links = feature_links_new
        return feature_links

    def _get_unused_features(self, feature_links_chain: List[Dict[str, List[str]]]):
        features_in_list = [self.features_in]
        if self._post_generators:
            for i in range(len(self._post_generators)):
                if i == 0:
                    features_in = self._feature_metadata_before_post.get_features()
                else:
                    features_in = self._post_generators[i-1].features_out
                features_in_list.append(features_in)
        return self._get_unused_features_generic(feature_links_chain=feature_links_chain, features_in_list=features_in_list)

    # TODO: Unit test this
    @staticmethod
    def _get_unused_features_generic(feature_links_chain: List[Dict[str, List[str]]], features_in_list: List[List[str]]) -> List[List[str]]:
        unused_features = []
        unused_features_by_stage = []
        for i, chain in enumerate(reversed(feature_links_chain)):
            stage = len(feature_links_chain) - i
            used_features = set()
            for key in chain.keys():
                new_val = [val for val in chain[key] if val not in unused_features]
                if new_val:
                    used_features.add(key)
            features_in = features_in_list[stage - 1]
            unused_features = []
            for feature in features_in:
                if feature not in used_features:
                    unused_features.append(feature)
            unused_features_by_stage.append(unused_features)
        unused_features_by_stage = list(reversed(unused_features_by_stage))
        return unused_features_by_stage

    def print_generator_info(self, log_level: int = 20):
        """
        Outputs detailed logs of the generator, such as the fit runtime.

        Parameters
        ----------
        log_level : int, default 20
            Log level of the logging statements.
        """
        if self.fit_time:
            self._log(log_level, f'\t{round(self.fit_time, 1)}s = Fit runtime')
            self._log(log_level, f'\t{len(self.features_in)} features in original data used to generate {len(self.features_out)} features in processed data.')

    def print_feature_metadata_info(self, log_level: int = 20):
        """
        Outputs detailed logs of a fit feature generator including the input and output FeatureMetadata objects' feature types.

        Parameters
        ----------
        log_level : int, default 20
            Log level of the logging statements.
        """
        self._log(log_level, '\tTypes of features in original data (raw dtype, special dtypes):')
        self.feature_metadata_in.print_feature_metadata_full(self.log_prefix + '\t\t', log_level=log_level)
        if self.feature_metadata_real:
            self._log(log_level-5, '\tTypes of features in processed data (exact raw dtype, raw dtype):')
            self.feature_metadata_real.print_feature_metadata_full(self.log_prefix + '\t\t', print_only_one_special=True, log_level=log_level-5)
        self._log(log_level, '\tTypes of features in processed data (raw dtype, special dtypes):')
        self.feature_metadata.print_feature_metadata_full(self.log_prefix + '\t\t', log_level=log_level)

    def save(self, path: str):
        save_pkl.save(path=path, object=self)

    def _more_tags(self) -> dict:
        """
        Special values to enable advanced functionality.

        Tags
        ----
        feature_interactions : bool, default True
            If True, then treat all features_out as if they depend on all features_in.
            If False, then treat each features_out as if it was generated by a 1:1 mapping (no feature interactions).
                This enables advanced functionality regarding automated feature pruning, but is only valid for generators which only transform each feature and do not perform interactions.
        allow_post_generators : bool, default True
            If False, will raise an AssertionError if post_generators is specified during init.
                This is reserved for very simple generators where including post_generators would not be sensible, such as in RenameFeatureGenerator.
        """
        return {}

    def get_tags(self) -> dict:
        """Gets the tags for this generator."""
        collected_tags = {}
        for base_class in reversed(inspect.getmro(self.__class__)):
            if hasattr(base_class, '_more_tags'):
                # need the if because mixins might not have _more_tags
                # but might do redundant work in estimators
                # (i.e. calling more tags on BaseEstimator multiple times)
                more_tags = base_class._more_tags(self)
                collected_tags.update(more_tags)
        return collected_tags
    def fit_transform(self, X: DataFrame, y: Series = None, feature_metadata_in: FeatureMetadata = None, **kwargs) -> DataFrame:
        """
        Fit generator to the provided data and return the transformed version of the data as if fit and transform were called sequentially with the same data.
        This is generally more efficient than calling fit and transform separately and can be up to twice as fast if the fit process requires transformation of the data.
        This cannot be called after the generator has been fit, and will result in an AssertionError.

        Parameters
        ----------
        X : DataFrame
            Input data used to fit the generator.
        y : Series, optional
            Input data's labels used to fit the generator. Most generators do not utilize labels.
            y.index must be equal to X.index to avoid misalignment.
        feature_metadata_in : FeatureMetadata, optional
            Identical to providing feature_metadata_in during generator initialization. Ignored if self.feature_metadata_in is already specified.
            If neither are set, feature_metadata_in will be inferred from the _infer_feature_metadata_in method.
        **kwargs
            Any additional arguments that a particular generator implementation could use. Passed to _fit_transform and _fit_generators methods.

        Returns
        -------
        X_out : DataFrame object which is the transformed version of the input data X.

        """
        start_time = time.time()
        self._log(20, f'Fitting {self.__class__.__name__}...')
        if self._is_fit:
            raise AssertionError(f'{self.__class__.__name__} is already fit.')
        self._pre_fit_validate(X=X, y=y, feature_metadata_in=feature_metadata_in, **kwargs)

        if self.reset_index:
            X_index = copy.deepcopy(X.index)
            X = X.reset_index(drop=True)  # TODO: Theoretically inplace=True avoids data copy, but can lead to altering of original DataFrame outside of method context.
            if y is not None and isinstance(y, Series):
                y = y.reset_index(drop=True)  # TODO: this assumes y and X had matching indices prior
        else:
            X_index = None
        if self.column_names_as_str:
            columns_orig = list(X.columns)
            X.columns = X.columns.astype(str)  # Ensure all column names are strings
            columns_new = list(X.columns)
            if columns_orig != columns_new:
                rename_map = {orig: new for orig, new in zip(columns_orig, columns_new)}
                if feature_metadata_in is not None:
                    feature_metadata_in.rename_features(rename_map=rename_map)
                self._rename_features_in(rename_map)
            else:
                self.column_names_as_str = False  # Columns were already string, so don't do conversion. Better to error if they change types at inference.
        self._ensure_no_duplicate_column_names(X=X)
        self._infer_features_in_full(X=X, feature_metadata_in=feature_metadata_in)
        if self.pre_drop_useless:
            self._useless_features_in = self._get_useless_features(X)
            if self._useless_features_in:
                self._remove_features_in(self._useless_features_in)
        if self.pre_enforce_types:
            from .astype import AsTypeFeatureGenerator
            self._pre_astype_generator = AsTypeFeatureGenerator(features_in=self.features_in, feature_metadata_in=self.feature_metadata_in, log_prefix=self.log_prefix + '\t')
            self._pre_astype_generator.fit(X)

        # TODO: Add option to return feature_metadata instead to avoid data copy
        #  If so, consider adding validation step to check that X_out matches the feature metadata, error/warning if not
        X_out, type_family_groups_special = self._fit_transform(X[self.features_in], y=y, **kwargs)

        type_map_raw = get_type_map_raw(X_out)
        self._feature_metadata_before_post = FeatureMetadata(type_map_raw=type_map_raw, type_group_map_special=type_family_groups_special)
        if self._post_generators:
            X_out, self.feature_metadata, self._post_generators = self._fit_generators(X=X_out, y=y, feature_metadata=self._feature_metadata_before_post, generators=self._post_generators, **kwargs)
        else:
            self.feature_metadata = self._feature_metadata_before_post
        type_map_real = get_type_map_real(X_out)
        self.features_out = list(X_out.columns)
        self.feature_metadata_real = FeatureMetadata(type_map_raw=type_map_real, type_group_map_special=self.feature_metadata.get_type_group_map_raw())

        self._post_fit_cleanup()
        if self.reset_index:
            X_out.index = X_index
        self._is_fit = True
        end_time = time.time()
        self.fit_time = end_time - start_time
        if self.verbosity >= 3:
            self.print_feature_metadata_info(log_level=20)
            self.print_generator_info(log_level=20)
        elif self.verbosity == 2:
            self.print_feature_metadata_info(log_level=15)
            self.print_generator_info(log_level=15)
        return X_out
예제 #18
0
class AsTypeFeatureGenerator(AbstractFeatureGenerator):
    """
    Enforces type conversion on the data to match the types seen during fitting.
    If a feature cannot be converted to the correct type, an exception will be raised.
    """
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._feature_metadata_in_real: FeatureMetadata = None  # FeatureMetadata object based on the original input features real dtypes (will contain dtypes such as 'int16' and 'float32' instead of 'int' and 'float').
        self._type_map_real_opt: dict = None  # Optimized representation of data types, saves a few milliseconds during comparisons in online inference
        # self.inplace = inplace  # TODO, also add check if dtypes are same as expected and skip .astype
        self._int_features = None

    # TODO: consider returning self._transform(X) if we allow users to specify real dtypes as input
    def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
        feature_type_raw_cur_dict = get_type_map_raw(X)
        feature_map_to_update = dict()
        for feature in self.features_in:
            feature_type_raw = self.feature_metadata_in.get_feature_type_raw(feature)
            feature_type_raw_cur = feature_type_raw_cur_dict[feature]
            if feature_type_raw != feature_type_raw_cur:
                self._log(30, f'\tWARNING: Actual dtype differs from dtype in FeatureMetadata for feature "{feature}". Actual dtype: {feature_type_raw_cur} | Expected dtype: {feature_type_raw}')
                feature_map_to_update[feature] = feature_type_raw
        if feature_map_to_update:
            self._log(30, f'\tWARNING: Forcefully converting features to expected dtypes. Please manually align the input data with the expected dtypes if issues occur.')
            X = X.astype(feature_map_to_update)
        self._int_features = np.array(self.feature_metadata_in.get_features(valid_raw_types=[R_INT]))
        return X, self.feature_metadata_in.type_group_map_special

    def _transform(self, X: DataFrame) -> DataFrame:
        # check if not same
        if self._type_map_real_opt != X.dtypes.to_dict():
            if self._int_features.size:
                null_count = X[self._int_features].isnull().any()
                # If int feature contains null during inference but not during fit.
                if null_count.any():
                    # TODO: Consider imputing to mode? This is tricky because training data had no missing values.
                    # TODO: Add unit test for this situation, to confirm it is handled properly.
                    with_null = null_count[null_count]
                    with_null_features = list(with_null.index)
                    logger.warning(f'WARNING: Int features without null values at train time contain null values at inference time! Imputing nulls to 0. To avoid this, pass the features as floats during fit!')
                    logger.warning(f'WARNING: Int features with nulls: {with_null_features}')
                    X[with_null_features] = X[with_null_features].fillna(0)

            if self._type_map_real_opt:
                # TODO: Confirm this works with sparse and other feature types!
                # FIXME: Address situation where test-time invalid type values cause crash:
                #  https://stackoverflow.com/questions/49256211/how-to-set-unexpected-data-type-to-na?noredirect=1&lq=1
                X = X.astype(self._type_map_real_opt)
        return X

    @staticmethod
    def get_default_infer_features_in_args() -> dict:
        return dict()

    def _infer_features_in_full(self, X: DataFrame, feature_metadata_in: FeatureMetadata = None):
        super()._infer_features_in_full(X=X, feature_metadata_in=feature_metadata_in)
        type_map_real = get_type_map_real(X[self.feature_metadata_in.get_features()])
        self._type_map_real_opt = X[self.feature_metadata_in.get_features()].dtypes.to_dict()
        self._feature_metadata_in_real = FeatureMetadata(type_map_raw=type_map_real, type_group_map_special=self.feature_metadata_in.get_type_group_map_raw())

    def _remove_features_in(self, features):
        super()._remove_features_in(features)
        if features:
            self._feature_metadata_in_real = self._feature_metadata_in_real.remove_features(features=features)
            for feature in features:
                self._type_map_real_opt.pop(feature, None)
            self._int_features = np.array(self.feature_metadata_in.get_features(valid_raw_types=[R_INT]))

    def print_feature_metadata_info(self, log_level=20):
        self._log(log_level, '\tOriginal Features (exact raw dtype, raw dtype):')
        self._feature_metadata_in_real.print_feature_metadata_full(self.log_prefix + '\t\t', print_only_one_special=True, log_level=log_level)
        super().print_feature_metadata_info(log_level=log_level)

    def _more_tags(self):
        return {'feature_interactions': False}
def test_feature_metadata_get_features():
    type_map_raw = dict(
        a='1',
        b='2',
        c='3',
        d='1',
        e='1',
        f='4',
    )

    type_group_map_special = {
        's1': ['a', 'b', 'd'],
        's2': ['a', 'e'],
        's3': ['a', 'b'],
        's4': ['f']
    }

    expected_get_features = ['a', 'b', 'c', 'd', 'e', 'f']

    feature_metadata = FeatureMetadata(
        type_map_raw=type_map_raw,
        type_group_map_special=type_group_map_special)

    assert feature_metadata.get_features() == expected_get_features

    assert feature_metadata.get_features(valid_raw_types=['1']) == [
        'a', 'd', 'e'
    ]
    assert feature_metadata.get_features(valid_raw_types=['1', '3']) == [
        'a', 'c', 'd', 'e'
    ]
    assert feature_metadata.get_features(valid_raw_types=['UNKNOWN']) == []

    assert feature_metadata.get_features(valid_special_types=['s2', 's3']) == [
        'a', 'b', 'c', 'e'
    ]
    assert feature_metadata.get_features(valid_special_types=['s4']) == [
        'c', 'f'
    ]
    assert feature_metadata.get_features(valid_special_types=[]) == ['c']
    assert feature_metadata.get_features(valid_special_types=['UNKNOWN']) == [
        'c'
    ]

    assert feature_metadata.get_features(
        invalid_raw_types=[]) == expected_get_features
    assert feature_metadata.get_features(invalid_raw_types=['1', '3']) == [
        'b', 'f'
    ]
    assert feature_metadata.get_features(
        invalid_raw_types=['UNKNOWN']) == expected_get_features

    assert feature_metadata.get_features(
        invalid_special_types=['UNKNOWN']) == expected_get_features
    assert feature_metadata.get_features(
        invalid_special_types=[]) == expected_get_features
    assert feature_metadata.get_features(
        invalid_special_types=['s2', 's4']) == ['b', 'c', 'd']

    assert feature_metadata.get_features(required_special_types=['s2']) == [
        'a', 'e'
    ]
    assert feature_metadata.get_features(
        required_special_types=['s2', 's3']) == ['a']
    assert feature_metadata.get_features(
        required_special_types=['s2', 's4']) == []
    assert feature_metadata.get_features(
        required_special_types=['UNKNOWN']) == []

    assert feature_metadata.get_features(required_special_types=['s2'],
                                         required_exact=True) == ['e']
    assert feature_metadata.get_features(
        required_special_types=['s1', 's2',
                                's3'], required_exact=True) == ['a']

    assert feature_metadata.get_features(
        required_at_least_one_special=True) == ['a', 'b', 'd', 'e', 'f']

    assert feature_metadata.get_features(required_raw_special_pairs=[
        ('1', ['s2']),
    ]) == ['a', 'e']
    assert feature_metadata.get_features(required_raw_special_pairs=[
        ('1', None),
    ]) == ['a', 'd', 'e']
    assert feature_metadata.get_features(required_raw_special_pairs=[
        ('1', ['s2']),
        (None, ['s4']),
        ('3', None),
    ]) == ['a', 'c', 'e', 'f']
    assert feature_metadata.get_features(
        required_raw_special_pairs=[
            ('1', ['s2']),
            (None, ['s4']),
            ('3', None),
        ],
        required_exact=True) == ['c', 'e', 'f']

    # Assert that valid_raw_types is the opposite of invalid_raw_types through all combinations
    raw_types_to_check = ['1', '2', '3', '4', 'UNKNOWN']
    for L in range(0, len(raw_types_to_check) + 1):
        for subset in itertools.combinations(raw_types_to_check, L):
            valid_raw_types = list(subset)
            invalid_raw_types = [
                raw_type for raw_type in raw_types_to_check
                if raw_type not in valid_raw_types
            ]
            assert feature_metadata.get_features(
                valid_raw_types=valid_raw_types
            ) == feature_metadata.get_features(
                invalid_raw_types=invalid_raw_types)

    # Combined arguments
    assert feature_metadata.get_features(invalid_special_types=['s2', 's3'],
                                         required_special_types=['s1'
                                                                 ]) == ['d']
    assert feature_metadata.get_features(valid_raw_types=['2', '3'],
                                         valid_special_types=['s1'
                                                              ]) == ['b', 'c']
    assert feature_metadata.get_features(
        valid_raw_types=['2', '3'],
        valid_special_types=['s1'],
        required_at_least_one_special=True) == ['b']
    assert feature_metadata.get_features(valid_raw_types=['2', '3'],
                                         required_special_types=['s1'
                                                                 ]) == ['b']
    assert feature_metadata.get_features(valid_raw_types=['2', '3'],
                                         required_special_types=['s1'],
                                         required_exact=True) == []
    assert feature_metadata.get_features(valid_raw_types=['2', '3'],
                                         required_special_types=['s1', 's3'
                                                                 ]) == ['b']
    assert feature_metadata.get_features(valid_raw_types=['2', '3'],
                                         required_special_types=['s1', 's3'],
                                         required_exact=True) == ['b']