def test_scope_model(): data = patsy.demo_data("x1", "x2", "x3", "y") def myfunc(x): tmp = np.ones_like(x) tmp.fill(42) return tmp def check_X(X): return np.all(X[:, 1] == 42) # checking classifier raises error if check_X doesn't return true. # this checks that myfunc was actually applied est = PatsyModel(CheckingClassifier(check_X=check_X), "y ~ x1 + myfunc(x2)") est.fit(data)
def test_stateful_model(): data_train = patsy.demo_data("x1", "x2", "y") data_train['x1'][:] = 1 # mean of x1 is 1 data_test = patsy.demo_data("x1", "x2", "y") data_test['x1'][:] = 0 # center x1 est = PatsyModel(CheckingClassifier(), "y ~ center(x1) + x2") est.fit(data_train) def check_centering(X): return np.all(X[:, 0] == -1) est.estimator_.check_X = check_centering # make sure that mean of training, not test data was removed est.predict(data_test)
def test_scope_model(): data = patsy.demo_data("x1", "x2", "x3", "y") def myfunc(x): tmp = np.ones_like(x) tmp.fill(42) return tmp def check_X(X): return np.all(X[:, 1] == 42) # checking classifier raises error if check_X doesn't return true. # this checks that myfunc was actually applied est = PatsyModel(CheckingClassifier(check_X=check_X), "y ~ x1 + myfunc(x2)") est.fit(data) # test feature names assert_equal(est.feature_names_, ["x1", "myfunc(x2)"])
def test_intercept_model(): data = patsy.demo_data("x1", "x2", "x3", "y") def check_X_no_intercept(X): return X.shape[1] == 2 # check wether X contains only the two features, no intercept est = PatsyModel(CheckingClassifier(check_X=check_X_no_intercept), "y ~ x1 + x2") est.fit(data) # predict checks applying to new data est.predict(data) def check_X_intercept(X): shape_correct = X.shape[1] == 3 first_is_intercept = np.all(X[:, 0] == 1) return shape_correct and first_is_intercept # check wether X does contain intercept est = PatsyModel(CheckingClassifier(check_X=check_X_intercept), "y ~ x1 + x2", add_intercept=True) est.fit(data) est.predict(data)