Пример #1
0
class LogisticRegressionCVImpl():
    def __init__(self,
                 Cs=10,
                 fit_intercept=True,
                 cv=3,
                 dual=False,
                 penalty='l2',
                 scoring=None,
                 solver='lbfgs',
                 tol=0.0001,
                 max_iter=100,
                 class_weight='balanced',
                 n_jobs=None,
                 verbose=0,
                 refit=True,
                 intercept_scaling=1.0,
                 multi_class='ovr',
                 random_state=None):
        self._hyperparams = {
            'Cs': Cs,
            'fit_intercept': fit_intercept,
            'cv': cv,
            'dual': dual,
            'penalty': penalty,
            'scoring': scoring,
            'solver': solver,
            'tol': tol,
            'max_iter': max_iter,
            'class_weight': class_weight,
            'n_jobs': n_jobs,
            'verbose': verbose,
            'refit': refit,
            'intercept_scaling': intercept_scaling,
            'multi_class': multi_class,
            'random_state': random_state
        }
        self._wrapped_model = SKLModel(**self._hyperparams)

    def fit(self, X, y=None):
        if (y is not None):
            self._wrapped_model.fit(X, y)
        else:
            self._wrapped_model.fit(X)
        return self

    def predict(self, X):
        return self._wrapped_model.predict(X)

    def predict_proba(self, X):
        return self._wrapped_model.predict_proba(X)

    def decision_function(self, X):
        return self._wrapped_model.decision_function(X)
Пример #2
0
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
# create heatmap
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu", fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')

print("Accuracy:", metrics.accuracy_score(y_test, lt_predictions))
print("Precision:", metrics.precision_score(y_test, lt_predictions))
print("Recall:", metrics.recall_score(y_test, lt_predictions))

y_pred_proba = lr_classifier.predict_proba(X_test)[::, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr, tpr, label="data 1, auc=" + str(auc))
plt.legend(loc=4)
plt.show()

# Perceptron
perceptron_classifier = Perceptron(random_state=11)
perceptron_classifier.fit(X_train, y_train)
perc_predictions = perceptron_classifier.predict(X_test)
score = accuracy_score(y_test, perc_predictions)
f_score = f1_score(y_test, perc_predictions, average='micro')
print("The accuracy score (Perceptron) is:", score)
print("The F score-Micro (Perceptron) is:", f_score)