def test_transform_empty(self): X = list(self._get_random_tuples()) for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) V.fit(X) Z = V.transform([]) self.assertEqual(Z.shape[0], 0)
def test_transform_produce_the_expected_values_on_the_result(self): random.seed("lady smith") X = self._get_random_tuples() random.seed("black mambazo") Y = list(self._get_random_tuples()) V = FeatureMappingFlattener(sparse=False) V.fit(X) Z = V.transform(Y) for y, z in zip(Y, Z): for i, v in enumerate(y): if isinstance(v, (int, float)): vector_idx = V.indexes[(i, None)] self.assertEqual(v, z[vector_idx]) elif isinstance(v, str): # we know that there's only ENUM type, with DRINKS vector_idx = V.indexes[(i, v)] self.assertEqual(1.0, z[vector_idx]) for other_value in self.DRINKS: if other_value != v: vector_idx = V.indexes[(i, other_value)] self.assertEqual(0.0, z[vector_idx]) else: # It's an array for j, v_j in enumerate(v): vector_idx = V.indexes[(i, j)] self.assertEqual(v_j, z[vector_idx])
def __init__(self, features, tolerant=False, sparse=True): # Upgrade `features` to `Feature` instances. features = list(map(make_feature, features)) if tolerant: self.evaluator = TolerantFeatureEvaluator(features) else: self.evaluator = FeatureEvaluator(features) self.flattener = FeatureMappingFlattener(sparse=sparse)
def test_fit_transform_consumes_data_only_once(self): random.seed("a kiss to build a dream on") X = list(self._get_random_tuples()) X_consumable = (x for x in X) V1 = FeatureMappingFlattener(sparse=False) V1.fit(X) Z1 = V1.transform(X) Z2 = V1.fit_transform(X_consumable) self.assertTrue(numpy.array_equal(Z1, Z2))
def test_sparse_single_zero(self): random.seed("something about us") V = FeatureMappingFlattener(sparse=True) abc = [chr(i) for i in range(65, 123)] X = [(set(random.choice(abc) for _ in range(20)), ) for _ in range(7)] element = chr(32) # Clearly outside what was seen at training V.fit(X) X = V.transform([(set(element), )]) self.assertEqual(X.shape[0], 1)
def test_sparse_is_equivalent(self): random.seed("the man who sold the world") X = list(self._get_random_tuples()) # fit + transform A = FeatureMappingFlattener(sparse=True) YA = A.fit_transform(X).todense() # fit_transform B = FeatureMappingFlattener(sparse=False) YB = B.fit_transform(X) self.assertTrue(numpy.array_equal(YA, YB))
def test_fit_transform_ok(self): random.seed("a kiss to build a dream on") X = list(self._get_random_tuples()) for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) Z = V.fit_transform(X) n = 100 m = 4 + 3 + 5 # 4 float, 1 enum, 1 list self.assertEqual(Z.shape, (n, m)) d = next(self._get_random_tuples()) Z = V.transform([d]) # Test that works for one dict too self.assertEqual(Z.shape, (1, m))
def test_sparse_single_zero(self): random.seed("something about us") V = FeatureMappingFlattener(sparse=True) abc = [chr(i) for i in range(65, 123)] X = [ (set(random.choice(abc) for _ in range(20)), ) for _ in range(7) ] element = chr(32) # Clearly outside what was seen at training V.fit(X) X = V.transform([(set(element), )]) self.assertEqual(X.shape[0], 1)
def test_transform_bad_values(self): random.seed("king of the streets") X = list(self._get_random_tuples()) d = X.pop() for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) V.fit(X) dd = tuple(list(d)[:-1]) # Missing value self.assertRaises(ValueError, V.transform, [dd]) dd = d + (10, ) # Extra value self.assertRaises(ValueError, V.transform, [dd]) dd = tuple([u"a string"] + list(d)[1:]) # Changed type self.assertRaises(ValueError, V.transform, [dd])
def test_transform_returns_a_matrix(self): random.seed("lady smith") X = list(self._get_random_tuples()) random.seed("black mambazo") Y = list(self._get_random_tuples()) for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) V.fit(X) Z = V.transform(Y) if sparse: self.assertIsInstance(Z, scipy.sparse.csr_matrix) else: self.assertIsInstance(Z, numpy.ndarray)
def test_transforming_non_fitted_word_is_ignored(self): X = [(self.COLORS[:-2], ), (self.COLORS[:-1], )] # never fited with self.COLORS[-1] known_colors = len(self.COLORS) - 1 V = FeatureMappingFlattener(sparse=False) V.fit(X) Y = [ (self.COLORS[-1:], ), # the unknown color only (self.COLORS[:], ), ] Z = V.transform(Y) self.assertTrue(numpy.array_equal(Z[0], [0.0] * known_colors)) self.assertTrue(numpy.array_equal(Z[1], [1.0] * known_colors))
def test_transforming_non_fitted_word_is_ignored(self): X = [(self.COLORS[:-2],), (self.COLORS[:-1], ) ] # never fited with self.COLORS[-1] known_colors = len(self.COLORS) - 1 V = FeatureMappingFlattener(sparse=False) V.fit(X) Y = [(self.COLORS[-1:], ), # the unknown color only (self.COLORS[:], ), ] Z = V.transform(Y) self.assertTrue(numpy.array_equal(Z[0], [0.0] * known_colors)) self.assertTrue(numpy.array_equal(Z[1], [1.0] * known_colors))
def test_transform_ok(self): random.seed("i am the program") X = list(self._get_random_tuples()) random.seed("dream on") Y = self._get_random_tuples() for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) V.fit(X) Z = V.transform(Y) n = 100 m = 4 + 3 + 5 # 3 float, 1 enum, 1 list self.assertEqual(Z.shape, (n, m)) d = next(self._get_random_tuples()) Z = V.transform([d]) # Test that works for one dict too self.assertEqual(Z.shape, (1, m))
def test_fit_ok(self): random.seed("sofi needs a ladder") X = list(self._get_random_tuples()) V = FeatureMappingFlattener() V.fit(X) V = FeatureMappingFlattener() # Check that works for one dict V.fit([next(self._get_random_tuples())])
def check_fit_fails(self, X): V = FeatureMappingFlattener() self.assertRaises(ValueError, V.fit, X) self.assertRaises(ValueError, V.fit, list(self.make_every_list_(X, set))) self.assertRaises(ValueError, V.fit, list(self.make_every_list_(X, tuple)))
def test_transform_produce_expected_values_on_the_result(self): random.seed("Lady smith") X = list(self._get_random_tuples()) random.seed("black mambazo") Y = list(self._get_random_tuples()) V = FeatureMappingFlattener(sparse=False) V.fit(X) Z = V.transform(Y) for y, z in zip(Y, Z): for i, v_seq in enumerate(y): assert isinstance(v_seq, (list, set, tuple)) # we know that there's only Bag-of-strings type, with COLORS # and a Bag of Persons counter = Counter(v_seq) for v, v_count in (counter.items()): vector_idx = V.indexes[(i, v)] self.assertEqual(v_count, z[vector_idx])
def test_fit_bad_values(self): V = FeatureMappingFlattener() self.assertRaises(ValueError, V.fit, [tuple()]) self.assertRaises(ValueError, V.fit, [({},)]) self.assertRaises(ValueError, V.fit, [([1], u"a"), ([], u"a")]) self.assertRaises(ValueError, V.fit, [(random,)]) self.assertRaises(ValueError, V.fit, [([1, u"a"],)]) self.assertRaises(ValueError, V.fit, [(u"a",), (1,)])
def test_fit_transform_equivalent(self): random.seed("j0hny guitar") X = list(self._get_random_tuples()) for sparse in [True, False]: # fit + transform A = FeatureMappingFlattener(sparse=sparse) A.fit(X) YA = A.transform(X) # fit_transform B = FeatureMappingFlattener(sparse=sparse) YB = B.fit_transform(X) if sparse: self.assertTrue(numpy.array_equal(YA.todense(), YB.todense())) else: self.assertTrue(numpy.array_equal(YA, YB)) self.assertEqual(A.indexes, B.indexes) self.assertEqual(A.reverse, B.reverse)
def test_fit_transform_bad_values(self): random.seed("king of the streets") X = list(self._get_random_tuples()) d = X.pop() for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) # Typical fit failures self.assertRaises(ValueError, V.fit_transform, [tuple()]) self.assertRaises(ValueError, V.fit_transform, [({},)]) self.assertRaises(ValueError, V.fit_transform, [([1], u"a"), ([], u"a")]) self.assertRaises(ValueError, V.fit_transform, [(random,)]) self.assertRaises(ValueError, V.fit_transform, [([1, u"a"],)]) self.assertRaises(ValueError, V.fit_transform, [("a",), (1,)]) # Typical transform failures bad = X + [tuple(list(d)[:-1])] # Missing value self.assertRaises(ValueError, V.fit_transform, bad) bad = X + [d + (10, )] # Extra value self.assertRaises(ValueError, V.fit_transform, bad) bad = X + [tuple([u"a string"] + list(d)[1:])] # Changed type self.assertRaises(ValueError, V.fit_transform, bad)
def test_fit_transform_empty(self): for sparse in [True, False]: V = FeatureMappingFlattener(sparse=sparse) self.assertRaises(ValueError, V.fit_transform, [])
def check_fit_ok(self, X): V = FeatureMappingFlattener() V.fit(X) V.fit(list(self.make_every_list_(X, set))) V.fit(list(self.make_every_list_(X, tuple)))
class Vectorizer(object): """ Vectorizer(features) provides a scikit-learn compatible component that given a collection of data points turns it into a matrix of vectors, where each vector contains the evaluation of every given feature for a single data point. Numerical features are mapped to a column of the resulting matrix. Enumerated features are mapped to multiple columns (one for each possible enumerated value), using 0 or 1 to indicate the presence of the enumerated value. Vectorial features are mapped to multiple columns. The API of this class follows scikit-learn conventions, see http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html Vectorizer(features, tolerant=True) changes the feature evaluation strategy to one that is more tolerant to failures when evaluating features. It is useful for testing and to run rough experiments when you still aren't sure if your data is clean or your features are correct. See the documentation for featureforge.evaluator.TolerantFeatureEvaluator Vectorizer(features, sparse=True) changes the result data type, returning a sparse numpy matrix instead of a dense matrix. See the documentation on featureforge.flattener.Flattener """ def __init__(self, features, tolerant=False, sparse=True): # Upgrade `features` to `Feature` instances. features = list(map(make_feature, features)) if tolerant: self.evaluator = TolerantFeatureEvaluator(features) else: self.evaluator = FeatureEvaluator(features) self.flattener = FeatureMappingFlattener(sparse=sparse) def fit(self, X, y=None): Xt = self.evaluator.fit_transform(X, y) self.flattener.fit(Xt, y) return self def fit_transform(self, X, y=None): Xt = self.evaluator.fit_transform(X, y) return self.flattener.fit_transform(Xt, y) def transform(self, X): Xt = self.evaluator.transform(X) return self.flattener.transform(Xt) def column_to_feature(self, i): """ Given a column index in the vectorizer's output matrix it returns the feature that originates that column. The return value is a tuple (feature, value). `feature` is the feature given in the initialization and `value` depends on the kind of feature that the column represents: - If the feature spawns numbers then `value` is `None` and should be ignored. - If the feature spawns strings then `value` is the string that corresponds to the one-hot encoding of that column. - If the feature spawns an array then `value` is the index within the spawned array that corresponds to that column. """ j, value = self.flattener.reverse[i] feature = self.evaluator.alive_features[j] return feature, value
class Vectorizer(object): """ Vectorizer(features) provides a scikit-learn compatible component that given a collection of data points turns it into a matrix of vectors, where each vector contains the evaluation of every given feature for a single data point. Numerical features are mapped to a column of the resulting matrix. Enumerated features are mapped to multiple columns (one for each possible enumerated value), using 0 or 1 to indicate the presence of the enumerated value. Vectorial features are mapped to multiple columns. The API of this class follows scikit-learn conventions, see http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html Vectorizer(features, tolerant=True) changes the feature evaluation strategy to one that is more tolerant to failures when evaluating features. It is useful for testing and to run rough experiments when you still aren't sure if your data is clean or your features are correct. See the documentation for featureforge.evaluator.TolerantFeatureEvaluator Vectorizer(features, sparse=True) changes the result data type, returning a sparse numpy matrix instead of a dense matrix. See the documentation on featureforge.flattener.Flattener """ def __init__(self, features, tolerant=False, sparse=True): # Upgrade `features` to `Feature` instances. features = list(map(make_feature, features)) if tolerant: self.evaluator = TolerantFeatureEvaluator(features) else: self.evaluator = FeatureEvaluator(features) self.flattener = FeatureMappingFlattener(sparse=sparse) def fit(self, X, y=None): Xt = self.evaluator.fit_transform(X, y) self.flattener.fit(Xt, y) return self def fit_transform(self, X, y): Xt = self.evaluator.fit_transform(X, y) return self.flattener.fit_transform(Xt, y) def transform(self, X): Xt = self.evaluator.transform(X) return self.flattener.transform(Xt) def column_to_feature(self, i): """ Given a column index in the vectorizer's output matrix it returns the feature that originates that column. The return value is a tuple (feature, value). `feature` is the feature given in the initialization and `value` depends on the kind of feature that the column represents: - If the feature spawns numbers then `value` is `None` and should be ignored. - If the feature spawns strings then `value` is the string that corresponds to the one-hot encoding of that column. - If the feature spawns an array then `value` is the index within the spawned array that corresponds to that column. """ j, value = self.flattener.reverse[i] feature = self.evaluator.alive_features[j] return feature, value
def test_fit_empty(self): V = FeatureMappingFlattener() self.assertRaises(ValueError, V.fit, [])