for i in C:
    for j in penalty:
        clf = LogisticRegression(C=i, penalty=j, solver='saga',
                                 max_iter=100).fit(X_train, y_train)
        scoring = []
        accuracyScore = []
        for train_index, test_index in tscv.split(X):
            print("TRAIN:", train_index, "TEST:", test_index)
            X_train, X_test = X[train_index], X[test_index]
            y_train, y_test = y[train_index], y[test_index]
            clf.fit(X_train, y_train)

            test_predictions = clf.predict_proba(X_test).T[1]

            test_score = clf.score(X_test, y_test)
            testAcScore = clf.accuracy_score(X_test, y_test)
            scoring.append(test_score)
            accuracyScore.append(testAcScore)

            #prediction = clf.predict()
            print(test_score)
            print('i: ', i)
            print('j: ', j)

        plt.plot(scoring)
        plt.plot(accuracyScore)
        plt.show()
'''
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
Example #2
0
plot_feature_importance(forest.feature_importances_, X.columns,
                        'RANDOM FOREST CLASSIFIER')

# In[12]:

#logistic Regression
from sklearn.linear_model import LogisticRegression

logistic_regression = LogisticRegression()
logistic_regression.fit(X_train, y_train)

# In[14]:

y_pred = logistic_regression.predict(X_test)
accuracy = logistic_regression.accuracy_score(y_test, y_pred)
accuracy_percentage = 100 * accuracy
accuracy_percentage

# In[ ]:

from sklearn.linear_model import LogisticRegressionCV
import numpy as np
import matplotlib.pyplot as plt

# get importance
importance = logistic_regression.coef_[0]
# summarize feature importance
for i, v in enumerate(importance):
    print('Feature: %0d, Score: %.5f' % (i, v))
# plot feature importance