示例#1
0
    def test_error_handling(self):
        for encoder_name in encoders.__all__:
            with self.subTest(encoder_name=encoder_name):

                # we exclude some columns
                X = th.create_dataset(n_rows=100)
                X = X.drop(['unique_str', 'none'], axis=1)
                X_t = th.create_dataset(n_rows=50, extras=True)
                X_t = X_t.drop(['unique_str', 'none'], axis=1)

                # illegal state, we have to first train the encoder...
                enc = getattr(encoders, encoder_name)()
                with self.assertRaises(ValueError):
                    enc.transform(X)

                # wrong count of attributes
                enc = getattr(encoders, encoder_name)()
                enc.fit(X, y)
                with self.assertRaises(ValueError):
                    enc.transform(X_t.iloc[:, 0:3])

                # no cols
                enc = getattr(encoders, encoder_name)(cols=[])
                enc.fit(X, y)
                self.assertTrue(enc.transform(X_t).equals(X_t))
示例#2
0
    def test_one_hot(self):
        enc = encoders.OneHotEncoder(verbose=1, return_df=False)
        enc.fit(X)
        self.assertEqual(
            enc.transform(X_t).shape[1],
            enc.transform(X).shape[1],
            'We have to get the same count of columns despite the presence of a new value'
        )

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='indicator')
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertIn('extra_-1', out.columns.values)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='return_nan')
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertEqual(
            len([x for x in out.columns.values
                 if str(x).startswith('extra_')]), 3)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='error')
        # The exception is already raised in fit() because transform() is called there to get
        # feature_names right.
        enc.fit(X)
        with self.assertRaises(ValueError):
            enc.transform(X_t)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='return_nan',
                                     use_cat_names=True)
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertIn('extra_A', out.columns.values)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     use_cat_names=True,
                                     handle_unknown='indicator')
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertIn('extra_-1', out.columns.values)

        # test inverse_transform
        X_i = th.create_dataset(n_rows=100, has_missing=False)
        X_i_t = th.create_dataset(n_rows=50, has_missing=False)
        cols = ['underscore', 'none', 'extra', 321, 'categorical']

        enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols)
        enc.fit(X_i)
        obtained = enc.inverse_transform(enc.transform(X_i_t))
        th.verify_inverse_transform(X_i_t, obtained)
    def test_inverse_transform(self):
        # we do not allow None in these data (but "none" column without any missing value is ok)
        X = th.create_dataset(n_rows=100, has_missing=False)
        X_t = th.create_dataset(n_rows=50, has_missing=False)
        cols = ['underscore', 'none', 321, 'categorical', 'categorical_int']

        for encoder_name in ['BaseNEncoder', 'BinaryEncoder', 'OneHotEncoder', 'OrdinalEncoder']:
            with self.subTest(encoder_name=encoder_name):

                # simple run
                enc = getattr(encoders, encoder_name)(verbose=1, cols=cols)
                enc.fit(X)
                th.verify_inverse_transform(X_t, enc.inverse_transform(enc.transform(X_t)))
    def test_handle_unknown_error(self):
        # BaseN has problems with None -> ignore None
        X = th.create_dataset(n_rows=100, has_missing=False)
        X_t = th.create_dataset(n_rows=50, extras=True, has_missing=False)

        for encoder_name in (set(encoders.__all__) - {'HashingEncoder'}):  # HashingEncoder supports new values by design -> excluded
            with self.subTest(encoder_name=encoder_name):

                # new value during scoring
                enc = getattr(encoders, encoder_name)(handle_unknown='error')
                enc.fit(X, y)
                with self.assertRaises(ValueError):
                    _ = enc.transform(X_t)
示例#5
0
from unittest import TestCase
import numpy as np
import category_encoders as encoders
import tests.helpers as th

# data definitions
X = th.create_dataset(n_rows=100)
np_y = np.random.default_rng(42).standard_normal(100) > 0.5


class TestGLMMEncoder(TestCase):
    def test_continuous(self):
        cols = [
            'unique_str', 'underscore', 'extra', 'none', 'invariant', 321,
            'categorical', 'na_categorical', 'categorical_int'
        ]
        enc = encoders.GLMMEncoder(cols=cols, binomial_target=False)
        # TODO: fix this test IRL
        # enc.fit(X, np_y)
        #th.verify_numeric(enc.transform(X))

    def test_binary(self):
        cols = [
            'unique_str', 'underscore', 'extra', 'none', 'invariant', 321,
            'categorical', 'na_categorical', 'categorical_int'
        ]
        enc = encoders.GLMMEncoder(cols=cols, binomial_target=True)
        # TODO: fix this test IRL
        #enc.fit(X, np_y)
        #th.verify_numeric(enc.transform(X))
示例#6
0
import pandas as pd
from unittest import TestCase  # or `from unittest import ...` if on Python 3.4+
import tests.helpers as th
import numpy as np

import category_encoders as encoders

np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)


class TestWeightOfEvidenceEncoder(TestCase):
    def test_woe(self):
        cols = [
            'unique_str', 'underscore', 'extra', 'none', 'invariant', 321,
            'categorical', 'na_categorical', 'categorical_int'
        ]

        # balanced label with balanced features
        X_balanced = pd.DataFrame(data=['1', '1', '1', '2', '2', '2'],
                                  columns=['col1'])
        y_balanced = [True, False, True, False, True, False]
        enc = encoders.WOEEncoder()
        enc.fit(X_balanced, y_balanced)
        X1 = enc.transform(X_balanced)
示例#7
0
# sample num of data
data_lines = 10000

# benchmarking result format
result_cols = [
    'encoder', 'used_processes', 'X_shape', 'min_time(s)', 'average_time(s)',
    'max_cpu_utilization(%)', 'average_cpu_utilization(%)'
]
results = []
cpu_utilization = multiprocessing.Manager().Queue()

# define data_set
np_X = th.create_array(n_rows=data_lines)
np_y = np.random.randn(np_X.shape[0]) > 0.5
X = th.create_dataset(n_rows=data_lines)
X_t = th.create_dataset(n_rows=int(data_lines / 2), extras=True)

cols = [
    'unique_str', 'underscore', 'extra', 'none', 'invariant', 321,
    'categorical', 'na_categorical'
]


def get_cpu_utilization():
    """
    new process for recording cpu utilization
    record cpu utilization every [cpu_sampling_rate] second & calculate its mean value
    the value is the cpu utilization during every encoding
    """
    global cpu_utilization