Пример #1
0
    def setUp(self):
        iris = load_iris()
        X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                            iris.target,
                                                            test_size=0.30,
                                                            random_state=0)

        model = RandomForestClassifier()
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        y_score = model.predict_proba(X_test)
        target_names = ['setosa', 'versicolor', 'virginica']
        feature_names = range(4)
        model_name = 'a model'

        self.results = ClassifierEvaluator(estimator=model,
                                           y_true=y_test,
                                           y_pred=y_pred,
                                           y_score=y_score,
                                           feature_names=feature_names,
                                           target_names=target_names,
                                           estimator_name=model_name)

        self.empty = ClassifierEvaluator(estimator=None,
                                         y_true=None,
                                         y_pred=None,
                                         y_score=None,
                                         feature_names=None,
                                         target_names=None,
                                         estimator_name=None)

        self.template = '''
Пример #2
0
class TestReportGeneration(TestCase):
    def setUp(self):
        iris = load_iris()
        X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                            iris.target,
                                                            test_size=0.30,
                                                            random_state=0)

        model = RandomForestClassifier()
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        y_score = model.predict_proba(X_test)
        target_names = ['setosa', 'versicolor', 'virginica']
        feature_names = range(4)
        model_name = 'a model'

        self.results = ClassifierEvaluator(estimator=model,
                                           y_true=y_test,
                                           y_pred=y_pred,
                                           y_score=y_score,
                                           feature_names=feature_names,
                                           target_names=target_names,
                                           estimator_name=model_name)

    def test_can_create_report(self):
        self.results.make_report()
    def setUp(self):
        iris = load_iris()
        X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                            iris.target,
                                                            test_size=0.30,
                                                            random_state=0)

        model = RandomForestClassifier()
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        y_score = model.predict_proba(X_test)
        target_names = ['setosa', 'versicolor', 'virginica']
        feature_names = range(4)
        model_name = 'a model'

        self.results = ClassifierEvaluator(estimator=model, y_true=y_test,
                                           y_pred=y_pred, y_score=y_score,
                                           feature_names=feature_names,
                                           target_names=target_names,
                                           estimator_name=model_name)

        self.empty = ClassifierEvaluator(estimator=None, y_true=None,
                                         y_pred=None, y_score=None,
                                         feature_names=None,
                                         target_names=None,
                                         estimator_name=None)

        self.template = '''
def test_can_plot():
    data = datasets.make_classification(200, 10, 5, class_sep=0.65)
    X = data[0]
    y = data[1]

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

    est = RandomForestClassifier()
    est.fit(X_train, y_train)

    evaluator = ClassifierEvaluator(est, y_true=y_test, X=X_test)

    evaluator.confusion_matrix()
Пример #5
0
def visualize(X, y, estimator, test_size=0.33, path=None, html=True):
    """Visualize Result

    :param numpy matrix X: dataset.
    :param numpy matrix y: label matrix.
    :param float test_size: testset's size/alldataset's size.
    """
    result = split_train_test(X, y, estimator, test_size)
    target_names = [
        'business', 'entertainment', 'health', 'politics', 'sports',
        'technology'
    ]
    ce = ClassifierEvaluator(estimator,
                             result['y_test'],
                             result['y_pred'],
                             result['y_score'],
                             target_names=target_names)
    if html and path:
        template = '''
            # Report

            ###1. Estimator Name.

                `{estimator_type}`

            ###2. Confusion Matrix.

                {confusion_matrix}

            ###3. ROC Graph.

                {roc}

            ###4. Precision Recall Graph.

                {precision_recall}
            '''

        ce.generate_report(template, path=path)
    else:
        ce.confusion_matrix
class TestReportGeneration(TestCase):
    def setUp(self):
        iris = load_iris()
        X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                            iris.target,
                                                            test_size=0.30,
                                                            random_state=0)

        model = RandomForestClassifier()
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        y_score = model.predict_proba(X_test)
        target_names = ['setosa', 'versicolor', 'virginica']
        feature_names = range(4)
        model_name = 'a model'

        self.results = ClassifierEvaluator(estimator=model, y_true=y_test,
                                           y_pred=y_pred, y_score=y_score,
                                           feature_names=feature_names,
                                           target_names=target_names,
                                           estimator_name=model_name)

        self.template = '''
                            # Report
                            {estimator_type}
                            {date}
                            {confusion_matrix}
                            {roc}
                            {precision_recall}

                            {feature_importances_table}

                            {feature_importances}
                        '''

    def test_report_creation_no_style(self):
        self.results.generate_report(self.template)

    def test_report_creation_no_style_save_file(self):
        self.results.generate_report(self.template, 'report.html')

    def test_report_creation_style_save_file(self):
        self.results.generate_report(self.template,
                                     style='static/simple.css')

    def test_report_creation_style(self):
        self.results.generate_report(self.template, 'report.html',
                                     style='static/simple.css')
Пример #7
0
def visualize(X, y, estimator, test_size=0.4, html=False):
    result = split_train_test(X, y, estimator, test_size)
    target_names = [
        'business', 'entertainment', 'health', 'politics', 'sports',
        'technology'
    ]
    ce = ClassifierEvaluator(estimator,
                             result['y_test'],
                             result['y_pred'],
                             result['y_score'],
                             target_names=target_names)
    if html:
        template = '''
            # Report
            {estimator_type}
            {date}
            {confusion_matrix}
            {roc}
            {precision_recall}
            '''

        ce.generate_report(template, path='report.html')
    else:
        ce.confusion_matrix
Пример #8
0
# Learn to predict each class against the other
classifier = RandomForestClassifier()
classifier = classifier.fit(X_train, y_train)

y_pred = classifier.predict(X_test)
y_score = classifier.predict_proba(X_test)

feature_list = range(4)
target_names = ['setosa', 'versicolor', 'virginica']

# Create a trained model instance
ce = ClassifierEvaluator(classifier,
                         y_test,
                         y_pred,
                         y_score,
                         feature_list,
                         target_names,
                         estimator_name='super awesome SVC')

template = '''
           # Report
           {estimator_type}
           {date}
           {confusion_matrix}
           {roc}
           {precision_recall}
           '''

ce.generate_report(template, 'report.html')
Пример #9
0
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=.5,
                                                    random_state=0)

# Learn to predict each class against the other
classifier = RandomForestClassifier()
classifier = classifier.fit(X_train, y_train)

y_pred = classifier.predict(X_test)
y_score = classifier.predict_proba(X_test)

feature_list = range(4)
target_names = ['setosa', 'versicolor', 'virginica']

# Create a trained model instance
ce = ClassifierEvaluator(classifier,
                         y_test,
                         y_pred,
                         y_score,
                         feature_list,
                         target_names,
                         estimator_name='super awesome SVC')

report = ce.make_report()

# this will automativally render in Jupyter, or you can do report.save('/path')
report
Пример #10
0
class TestReportGeneration(TestCase):
    def setUp(self):
        iris = load_iris()
        X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                            iris.target,
                                                            test_size=0.30,
                                                            random_state=0)

        model = RandomForestClassifier()
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        y_score = model.predict_proba(X_test)
        target_names = ['setosa', 'versicolor', 'virginica']
        feature_names = range(4)
        model_name = 'a model'

        self.results = ClassifierEvaluator(estimator=model,
                                           y_true=y_test,
                                           y_pred=y_pred,
                                           y_score=y_score,
                                           feature_names=feature_names,
                                           target_names=target_names,
                                           estimator_name=model_name)

        self.empty = ClassifierEvaluator(estimator=None,
                                         y_true=None,
                                         y_pred=None,
                                         y_score=None,
                                         feature_names=None,
                                         target_names=None,
                                         estimator_name=None)

        self.template = '''
                            # Report
                            {estimator_type}
                            {date}
                            {confusion_matrix}
                            {roc}
                            {precision_recall}

                            {feature_importances_table}

                            {feature_importances}
                        '''

    def test_no_style(self):
        self.results.generate_report(template=self.template)

    def test_no_style_save(self):
        self.results.generate_report(template=self.template,
                                     path='tmp_report.html')

    def test_generate_with_all_attr_none(self):
        self.empty.generate_report(template='# Title')

    def test_apply_custom_css(self):
        with open('baseline_html/empty.html') as f:
            expected = f.read()

        result = self.empty.generate_report(template='# Title\n ## Section',
                                            style='static/simple.css')

        assert expected == result

    def test_returning_and_saving_are_the_same(self):
        self.empty.generate_report(template='# Title\n ## Section',
                                   path='tmp_report.html')

        with open('tmp_report.html') as f:
            saved = f.read()

        returned = self.empty.generate_report(template='# Title\n ## Section')

        assert saved == returned
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
                                                    random_state=0)

# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
                                 random_state=random_state))
classifier = classifier.fit(X_train, y_train)

y_pred = classifier.predict(X_test)
y_score = classifier.decision_function(X_test)

feature_list = range(4)
target_names = ['setosa', 'versicolor', 'virginica']

# Create a trained model instance
ce = ClassifierEvaluator(classifier, y_test, y_pred, y_score,
                         feature_list, target_names,
                         estimator_name='super awesome SVC')

template = '''
           # Report
           {estimator_type}
           {date}
           {confusion_matrix}
           {roc}
           {precision_recall}
           '''

ce.generate_report(template, 'report.html')
class TestReportGeneration(TestCase):
    def setUp(self):
        iris = load_iris()
        X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                            iris.target,
                                                            test_size=0.30,
                                                            random_state=0)

        model = RandomForestClassifier()
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        y_score = model.predict_proba(X_test)
        target_names = ['setosa', 'versicolor', 'virginica']
        feature_names = range(4)
        model_name = 'a model'

        self.results = ClassifierEvaluator(estimator=model, y_true=y_test,
                                           y_pred=y_pred, y_score=y_score,
                                           feature_names=feature_names,
                                           target_names=target_names,
                                           estimator_name=model_name)

        self.empty = ClassifierEvaluator(estimator=None, y_true=None,
                                         y_pred=None, y_score=None,
                                         feature_names=None,
                                         target_names=None,
                                         estimator_name=None)

        self.template = '''
                            # Report
                            {estimator_type}
                            {date}
                            {confusion_matrix}
                            {roc}
                            {precision_recall}

                            {feature_importances_table}

                            {feature_importances}
                        '''

    def test_no_style(self):
        self.results.generate_report(template=self.template)

    def test_no_style_save(self):
        self.results.generate_report(template=self.template,
                                     path='tmp_report.html')

    def test_generate_with_all_attr_none(self):
        self.empty.generate_report(template='# Title')

    def test_apply_custom_css(self):
        with open('baseline_html/empty.html') as f:
            expected = f.read()

        result = self.empty.generate_report(template='# Title\n ## Section',
                                            style='static/simple.css')

        assert expected == result

    def test_returning_and_saving_are_the_same(self):
        self.empty.generate_report(template='# Title\n ## Section',
                                   path='tmp_report.html')

        with open('tmp_report.html') as f:
            saved = f.read()

        returned = self.empty.generate_report(template='# Title\n ## Section')

        assert saved == returned