예제 #1
0
    def test_error_handling(self):
        for encoder_name in encoders.__all__:
            with self.subTest(encoder_name=encoder_name):

                # we exclude some columns
                X = th.create_dataset(n_rows=100)
                X = X.drop(['unique_str', 'none'], axis=1)
                X_t = th.create_dataset(n_rows=50, extras=True)
                X_t = X_t.drop(['unique_str', 'none'], axis=1)

                # illegal state, we have to first train the encoder...
                enc = getattr(encoders, encoder_name)()
                with self.assertRaises(ValueError):
                    enc.transform(X)

                # wrong count of attributes
                enc = getattr(encoders, encoder_name)()
                enc.fit(X, y)
                with self.assertRaises(ValueError):
                    enc.transform(X_t.iloc[:, 0:3])

                # no cols
                enc = getattr(encoders, encoder_name)(cols=[])
                enc.fit(X, y)
                self.assertTrue(enc.transform(X_t).equals(X_t))
    def test_one_hot(self):
        enc = encoders.OneHotEncoder(verbose=1, return_df=False)
        enc.fit(X)
        self.assertEqual(
            enc.transform(X_t).shape[1],
            enc.transform(X).shape[1],
            'We have to get the same count of columns despite the presence of a new value'
        )

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='indicator')
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertIn('extra_-1', out.columns.values)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='return_nan')
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertEqual(
            len([x for x in out.columns.values
                 if str(x).startswith('extra_')]), 3)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='error')
        # The exception is already raised in fit() because transform() is called there to get
        # feature_names right.
        enc.fit(X)
        with self.assertRaises(ValueError):
            enc.transform(X_t)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     handle_unknown='return_nan',
                                     use_cat_names=True)
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertIn('extra_A', out.columns.values)

        enc = encoders.OneHotEncoder(verbose=1,
                                     return_df=True,
                                     use_cat_names=True,
                                     handle_unknown='indicator')
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertIn('extra_-1', out.columns.values)

        # test inverse_transform
        X_i = th.create_dataset(n_rows=100, has_missing=False)
        X_i_t = th.create_dataset(n_rows=50, has_missing=False)
        cols = ['underscore', 'none', 'extra', 321, 'categorical']

        enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols)
        enc.fit(X_i)
        obtained = enc.inverse_transform(enc.transform(X_i_t))
        th.verify_inverse_transform(X_i_t, obtained)
    def test_handle_unknown_error(self):
        # BaseN has problems with None -> ignore None
        X = th.create_dataset(n_rows=100, has_none=False)
        X_t = th.create_dataset(n_rows=50, extras=True, has_none=False)

        for encoder_name in (set(encoders.__all__) - {'HashingEncoder'}):  # HashingEncoder supports new values by design -> excluded
            with self.subTest(encoder_name=encoder_name):

                # new value during scoring
                enc = getattr(encoders, encoder_name)(handle_unknown='error')
                enc.fit(X, y)
                with self.assertRaises(ValueError):
                    _ = enc.transform(X_t)
    def test_inverse_transform(self):
        # we do not allow None in these data (but "none" column without any None is ok)
        X = th.create_dataset(n_rows=100, has_none=False)
        X_t = th.create_dataset(n_rows=50, has_none=False)
        X_t_extra = th.create_dataset(n_rows=50, extras=True, has_none=False)
        cols = ['underscore', 'none', 'extra', 321, 'categorical']

        for encoder_name in ['BaseNEncoder', 'BinaryEncoder', 'OneHotEncoder', 'OrdinalEncoder']:
            with self.subTest(encoder_name=encoder_name):

                # simple run
                enc = getattr(encoders, encoder_name)(verbose=1, cols=cols)
                enc.fit(X)
                th.verify_inverse_transform(X_t, enc.inverse_transform(enc.transform(X_t)))
예제 #5
0
import pandas as pd
from unittest2 import TestCase  # or `from unittest import ...` if on Python 3.4+
import category_encoders.tests.helpers as th
import numpy as np
import warnings
import category_encoders as encoders

np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)


class TestOrdinalEncoder(TestCase):
    def test_ordinal(self):

        enc = encoders.OrdinalEncoder(verbose=1, return_df=True)
        enc.fit(X)
        out = enc.transform(X_t)
        self.assertEqual(len(set(out['extra'].values)), 4)
        self.assertIn(-1, set(out['extra'].values))
        self.assertFalse(enc.mapping is None)
        self.assertTrue(len(enc.mapping) > 0)

        enc = encoders.OrdinalEncoder(verbose=1,
                                      mapping=enc.mapping,
                                      return_df=True)
# loop times of benchmarking in every encoding, larger for more accurate but longer benchmarking time
benchmark_repeat = 3

# sample num of data
data_lines = 10000

# benchmarking result format
result_cols = ['encoder', 'used_processes', 'X_shape', 'min_time(s)', 'average_time(s)', 'max_cpu_utilization(%)', 'average_cpu_utilization(%)']
results = []
cpu_utilization = multiprocessing.Manager().Queue()

# define data_set
np_X = th.create_array(n_rows=data_lines)
np_y = np.random.randn(np_X.shape[0]) > 0.5
X = th.create_dataset(n_rows=data_lines)
X_t = th.create_dataset(n_rows=int(data_lines / 2), extras=True)

cols = ['unique_str', 'underscore', 'extra', 'none', 'invariant', 321, 'categorical', 'na_categorical']


def get_cpu_utilization():
    """
    new process for recording cpu utilization
    record cpu utilization every [cpu_sampling_rate] second & calculate its mean value
    the value is the cpu utilization during every encoding
    """
    global cpu_utilization
    psutil.cpu_percent(None)
    while True:
        cpu_utilization.put(psutil.cpu_percent(None))