def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", filter_type="remover", model_features="", one_hot_features="", n_estimators=10, criterion="gini", max_depth=None, max_features="auto", class_weight=None, method="predict_proba", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 8) # 4 features + 1 class + 3 probas self.assertEqual(len(names), 8)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", filter_type="remover", model_features="", one_hot_features="", time_left_for_this_task=30, per_run_time_limit=30, ensemble_size=5, method="predict_proba", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 8) # 4 features + 1 class + 3 probas self.assertEqual(len(names), 8)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", filter_type="remover", model_features="", ordinal_features="", penalty="l2", C=1.0, fit_intercept=True, class_weight=None, solver="liblinear", max_iter=100, multi_class="auto", method="predict_proba", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 8) # 4 features + 1 class + 3 probas self.assertEqual(len(names), 8)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", filter_type="remover", model_features="", one_hot_features="", C=1.0, kernel="rbf", degree=3, gamma="auto", probability=True, max_iter=-1, method="predict_proba", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 8) # 4 features + 1 class + 3 probas self.assertEqual(len(names), 8)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", date=None, group=["SepalLengthCm"], budget=20, ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] print(names) ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 12) # 4 original features + 8 new features self.assertEqual(len(names), 12)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", filter_type="remover", model_features="", one_hot_features="", hidden_layer_sizes=100, activation="relu", solver="adam", learning_rate="constant", max_iter=200, shuffle=True, method="predict_proba", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 8) # 4 features + 1 class + 3 probas self.assertEqual(len(names), 8)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", filter_type = "remover", model_features = "Species", max_samples="auto", contamination=0.1, max_features=1.0, ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 5) # 4 features + 1 anomaly score self.assertEqual(len(names), 5)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", strategy_num="mean", strategy_cat="most_frequent", fillvalue_num=0, fillvalue_cat="", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 4) # 4 features self.assertEqual(len(names), 4)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", filter_type="remover", model_features="Species", n_clusters=3, n_init=10, max_iter=300, algorithm="auto", ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 8) # 4 features + 1 cluster + 3 distance to clusters self.assertEqual(len(names), 8)
def test_experiment_iris(self): papermill.execute_notebook( "Experiment.ipynb", "/dev/null", parameters=dict( dataset="/tmp/data/iris.csv", target="Species", cutoff=0.9, threshold=0.0, ), ) papermill.execute_notebook( "Deployment.ipynb", "/dev/null", ) data = datasets.iris_testdata() with server.Server() as s: response = s.test(data=data) names = response["names"] ndarray = response["ndarray"] self.assertEqual(len(ndarray[0]), 3) # 4 features - 1 removed self.assertEqual(len(names), 3)