class TestReportGeneration(TestCase): def setUp(self): iris = load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.30, random_state=0) model = RandomForestClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) y_score = model.predict_proba(X_test) target_names = ['setosa', 'versicolor', 'virginica'] feature_names = range(4) model_name = 'a model' self.results = ClassifierEvaluator(estimator=model, y_true=y_test, y_pred=y_pred, y_score=y_score, feature_names=feature_names, target_names=target_names, estimator_name=model_name) def test_can_create_report(self): self.results.make_report()
# shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = RandomForestClassifier() classifier = classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_score = classifier.predict_proba(X_test) feature_list = range(4) target_names = ['setosa', 'versicolor', 'virginica'] # Create a trained model instance ce = ClassifierEvaluator(classifier, y_test, y_pred, y_score, feature_list, target_names, estimator_name='super awesome SVC') report = ce.make_report() # this will automativally render in Jupyter, or you can do report.save('/path') report