def test_pca_array(): X = np.array([[3, 0, 1, 6], [1, 2, 1, 6], [10, 2, 1, 6], [10, 2, 2, 5], [6, 2, 2, 5]]) pca = PCA() expected_X_t = pd.DataFrame( [[3.176246, 1.282616], [4.969987, -0.702976], [-3.954182, 0.429071], [-4.079174, -0.252790], [-0.112877, -0.755922]], columns=[f"component_{i}" for i in range(2)]) pca.fit(X) X_t = pca.transform(X) assert_frame_equal(expected_X_t, X_t.to_dataframe())
def test_pca_woodwork_custom_overrides_returned_by_components(X_df): y = pd.Series([1, 2, 1]) override_types = [Integer, Double] for logical_type in override_types: X = ww.DataTable(X_df, logical_types={0: logical_type}) pca = PCA(n_components=1) pca.fit(X) transformed = pca.transform(X, y) assert isinstance(transformed, ww.DataTable) assert transformed.logical_types == { 'component_0': ww.logical_types.Double }
def test_pca_numeric(data_type, make_data_type): X = pd.DataFrame([[3, 0, 1, 6], [1, 2, 1, 6], [10, 2, 1, 6], [10, 2, 2, 5], [6, 2, 2, 5]]) X = make_data_type(data_type, X) pca = PCA() expected_X_t = pd.DataFrame([[3.176246, 1.282616], [4.969987, -0.702976], [-3.954182, 0.429071], [-4.079174, -0.252790], [-0.112877, -0.755922]], columns=[f"component_{i}" for i in range(2)]) X_t = pd.DataFrame(pca.fit_transform(X)) assert_frame_equal(X_t, expected_X_t)
def test_n_components(): X = pd.DataFrame([[3, 0, 1, 6, 5, 10], [1, 2, 1, 3, 11, 4], [10, 2, 1, 12, 5, 6], [10, 6, 4, 4, 0, 1], [6, 8, 9, 3, 1, 5]]) pca = PCA(n_components=5) X_t = pca.fit_transform(X) assert X_t.shape[1] == 5 pca = PCA(n_components=3) X_t = pca.fit_transform(X) assert X_t.shape[1] == 3 pca = PCA(n_components=1) X_t = pca.fit_transform(X) assert X_t.shape[1] == 1
def test_variance(): X = pd.DataFrame([[3, 0, 1, 6, 5, 10], [1, 2, 1, 3, 11, 4], [10, 2, 1, 12, 5, 6], [10, 6, 4, 4, 0, 1], [6, 8, 9, 3, 1, 5]]) pca = PCA(variance=0.97) expected_X_t = pd.DataFrame([[-5.581732, 0.469307, 3.985657, 1.760273], [-6.961064, -5.026062, -3.170519, -0.624576], [-1.352624, 7.778657, -0.778879, -1.554429], [7.067179, 0.645894, -2.633617, 2.159135], [6.828241, -3.867796, 2.597358, -1.740404]], columns=[f"component_{i}" for i in range(4)]) X_t_90 = pca.fit_transform(X) assert_frame_equal(expected_X_t, X_t_90.to_dataframe()) pca = PCA(variance=0.75) X_t_75 = pca.fit_transform(X) assert X_t_75.shape[1] < X_t_90.shape[1] pca = PCA(variance=0.50) X_t_50 = pca.fit_transform(X) assert X_t_50.shape[1] < X_t_75.shape[1]
def test_describe_component(): enc = OneHotEncoder() imputer = Imputer() simple_imputer = SimpleImputer("mean") column_imputer = PerColumnImputer({"a": "mean", "b": ("constant", 100)}) scaler = StandardScaler() feature_selection_clf = RFClassifierSelectFromModel(n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf) feature_selection_reg = RFRegressorSelectFromModel(n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf) drop_col_transformer = DropColumns(columns=['col_one', 'col_two']) drop_null_transformer = DropNullColumns() datetime = DateTimeFeaturizer() text_featurizer = TextFeaturizer() lsa = LSA() pca = PCA() lda = LinearDiscriminantAnalysis() ft = DFSTransformer() us = Undersampler() assert enc.describe(return_dict=True) == {'name': 'One Hot Encoder', 'parameters': {'top_n': 10, 'features_to_encode': None, 'categories': None, 'drop': 'if_binary', 'handle_unknown': 'ignore', 'handle_missing': 'error'}} assert imputer.describe(return_dict=True) == {'name': 'Imputer', 'parameters': {'categorical_impute_strategy': "most_frequent", 'categorical_fill_value': None, 'numeric_impute_strategy': "mean", 'numeric_fill_value': None}} assert simple_imputer.describe(return_dict=True) == {'name': 'Simple Imputer', 'parameters': {'impute_strategy': 'mean', 'fill_value': None}} assert column_imputer.describe(return_dict=True) == {'name': 'Per Column Imputer', 'parameters': {'impute_strategies': {'a': 'mean', 'b': ('constant', 100)}, 'default_impute_strategy': 'most_frequent'}} assert scaler.describe(return_dict=True) == {'name': 'Standard Scaler', 'parameters': {}} assert feature_selection_clf.describe(return_dict=True) == {'name': 'RF Classifier Select From Model', 'parameters': {'number_features': 5, 'n_estimators': 10, 'max_depth': None, 'percent_features': 0.3, 'threshold': -np.inf, 'n_jobs': -1}} assert feature_selection_reg.describe(return_dict=True) == {'name': 'RF Regressor Select From Model', 'parameters': {'number_features': 5, 'n_estimators': 10, 'max_depth': None, 'percent_features': 0.3, 'threshold': -np.inf, 'n_jobs': -1}} assert drop_col_transformer.describe(return_dict=True) == {'name': 'Drop Columns Transformer', 'parameters': {'columns': ['col_one', 'col_two']}} assert drop_null_transformer.describe(return_dict=True) == {'name': 'Drop Null Columns Transformer', 'parameters': {'pct_null_threshold': 1.0}} assert datetime.describe(return_dict=True) == {'name': 'DateTime Featurization Component', 'parameters': {'features_to_extract': ['year', 'month', 'day_of_week', 'hour'], 'encode_as_categories': False}} assert text_featurizer.describe(return_dict=True) == {'name': 'Text Featurization Component', 'parameters': {}} assert lsa.describe(return_dict=True) == {'name': 'LSA Transformer', 'parameters': {}} assert pca.describe(return_dict=True) == {'name': 'PCA Transformer', 'parameters': {'n_components': None, 'variance': 0.95}} assert lda.describe(return_dict=True) == {'name': 'Linear Discriminant Analysis Transformer', 'parameters': {'n_components': None}} assert ft.describe(return_dict=True) == {'name': 'DFS Transformer', 'parameters': {"index": "index"}} assert us.describe(return_dict=True) == {'name': 'Undersampler', 'parameters': {"balanced_ratio": 4, "min_samples": 100, "min_percentage": 0.1}} # testing estimators base_classifier = BaselineClassifier() base_regressor = BaselineRegressor() lr_classifier = LogisticRegressionClassifier() en_classifier = ElasticNetClassifier() en_regressor = ElasticNetRegressor() et_classifier = ExtraTreesClassifier(n_estimators=10, max_features="auto") et_regressor = ExtraTreesRegressor(n_estimators=10, max_features="auto") rf_classifier = RandomForestClassifier(n_estimators=10, max_depth=3) rf_regressor = RandomForestRegressor(n_estimators=10, max_depth=3) linear_regressor = LinearRegressor() svm_classifier = SVMClassifier() svm_regressor = SVMRegressor() assert base_classifier.describe(return_dict=True) == {'name': 'Baseline Classifier', 'parameters': {'strategy': 'mode'}} assert base_regressor.describe(return_dict=True) == {'name': 'Baseline Regressor', 'parameters': {'strategy': 'mean'}} assert lr_classifier.describe(return_dict=True) == {'name': 'Logistic Regression Classifier', 'parameters': {'penalty': 'l2', 'C': 1.0, 'n_jobs': -1, 'multi_class': 'auto', 'solver': 'lbfgs'}} assert en_classifier.describe(return_dict=True) == {'name': 'Elastic Net Classifier', 'parameters': {'alpha': 0.5, 'l1_ratio': 0.5, 'n_jobs': -1, 'max_iter': 1000, "loss": 'log', 'penalty': 'elasticnet'}} assert en_regressor.describe(return_dict=True) == {'name': 'Elastic Net Regressor', 'parameters': {'alpha': 0.5, 'l1_ratio': 0.5, 'max_iter': 1000, 'normalize': False}} assert et_classifier.describe(return_dict=True) == {'name': 'Extra Trees Classifier', 'parameters': {'n_estimators': 10, 'max_features': 'auto', 'max_depth': 6, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_jobs': -1}} assert et_regressor.describe(return_dict=True) == {'name': 'Extra Trees Regressor', 'parameters': {'n_estimators': 10, 'max_features': 'auto', 'max_depth': 6, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_jobs': -1}} assert rf_classifier.describe(return_dict=True) == {'name': 'Random Forest Classifier', 'parameters': {'n_estimators': 10, 'max_depth': 3, 'n_jobs': -1}} assert rf_regressor.describe(return_dict=True) == {'name': 'Random Forest Regressor', 'parameters': {'n_estimators': 10, 'max_depth': 3, 'n_jobs': -1}} assert linear_regressor.describe(return_dict=True) == {'name': 'Linear Regressor', 'parameters': {'fit_intercept': True, 'normalize': False, 'n_jobs': -1}} assert svm_classifier.describe(return_dict=True) == {'name': 'SVM Classifier', 'parameters': {'C': 1.0, 'kernel': 'rbf', 'gamma': 'scale', 'probability': True}} assert svm_regressor.describe(return_dict=True) == {'name': 'SVM Regressor', 'parameters': {'C': 1.0, 'kernel': 'rbf', 'gamma': 'scale'}} try: xgb_classifier = XGBoostClassifier(eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75) xgb_regressor = XGBoostRegressor(eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75) assert xgb_classifier.describe(return_dict=True) == {'name': 'XGBoost Classifier', 'parameters': {'eta': 0.1, 'max_depth': 3, 'min_child_weight': 1, 'n_estimators': 75}} assert xgb_regressor.describe(return_dict=True) == {'name': 'XGBoost Regressor', 'parameters': {'eta': 0.1, 'max_depth': 3, 'min_child_weight': 1, 'n_estimators': 75}} except ImportError: pass try: cb_classifier = CatBoostClassifier() cb_regressor = CatBoostRegressor() assert cb_classifier.describe(return_dict=True) == {'name': 'CatBoost Classifier', 'parameters': {'allow_writing_files': False, 'n_estimators': 10, 'eta': 0.03, 'max_depth': 6, 'bootstrap_type': None, 'silent': True}} assert cb_regressor.describe(return_dict=True) == {'name': 'CatBoost Regressor', 'parameters': {'allow_writing_files': False, 'n_estimators': 10, 'eta': 0.03, 'max_depth': 6, 'bootstrap_type': None, 'silent': False}} except ImportError: pass try: lg_classifier = LightGBMClassifier() lg_regressor = LightGBMRegressor() assert lg_classifier.describe(return_dict=True) == {'name': 'LightGBM Classifier', 'parameters': {'boosting_type': 'gbdt', 'learning_rate': 0.1, 'n_estimators': 100, 'max_depth': 0, 'num_leaves': 31, 'min_child_samples': 20, 'n_jobs': -1, 'bagging_fraction': 0.9, 'bagging_freq': 0}} assert lg_regressor.describe(return_dict=True) == {'name': 'LightGBM Regressor', 'parameters': {'boosting_type': 'gbdt', 'learning_rate': 0.1, 'n_estimators': 20, 'max_depth': 0, 'num_leaves': 31, 'min_child_samples': 20, 'n_jobs': -1, 'bagging_fraction': 0.9, 'bagging_freq': 0}} except ImportError: pass
def test_pca_invalid(): X = pd.DataFrame([[3, 0, 1, 6], [1, None, 1, 6], [10, 2, 1, 6], [10, 2, 2, np.nan], [None, 2, 2, 5]]) pca = PCA() with pytest.raises(ValueError, match="must be all numeric"): pca.fit(X) X = pd.DataFrame([[3, 0, 1, 6], ['a', 'b', 'a', 'b'], [10, 2, 1, 6], [10, 2, 2, 23], [0, 2, 2, 5]]) pca = PCA() with pytest.raises(ValueError, match="must be all numeric"): pca.fit_transform(X) X_ok = pd.DataFrame([[3, 0, 1, 6], [1, 2, 1, 6], [10, 2, 1, 6], [10, 2, 2, 5], [6, 2, 2, 5]]) pca = PCA() pca.fit(X_ok) with pytest.raises(ValueError, match="must be all numeric"): pca.transform(X)