示例#1
0
print "Accuracy on the training set = ", accuracy

# Compare with model learned by sklearn's logistic regression with reg = 1/C
# the regularization parameter set below can be varied (on a logarithmic scale)

regs = [0.01, 0.05, 0.1, 0.5, 1., 5., 10., 50., 100., 500.]
for reg in regs:
    # L2 regularization with sklearn LogisticRegression

    from sklearn import linear_model
    sk_logreg_l2 = linear_model.LogisticRegression(C=1.0/reg,solver='lbfgs',fit_intercept=False)
    sk_logreg_l2.fit(XX,y)
    print "Theta found by sklearn with L2 reg: ", sk_logreg_l2.coef_[0]
    print "Loss with sklearn theta: ", reg_lr1.loss(sk_logreg_l2.coef_[0],XX,y,0.0)

    plot_utils.plot_decision_boundary_sklearn_poly(X,y,sk_logreg_l2,reg,p,'Exam 1 score', 'Exam 2 score',['Not Admitted','Admitted'])
    plt.savefig('fig4_sk.pdf')


    # L1 regularization witk sklearn LogisticRegression

    sk_logreg_l1 = linear_model.LogisticRegression(C=1.0/reg,solver='liblinear',fit_intercept=False,penalty='l1')
    sk_logreg_l1.fit(XX,y)
    print "Theta found by sklearn with L1 reg: ", sk_logreg_l1.coef_[0]
    print "Loss with sklearn theta: ", reg_lr1.loss(sk_logreg_l1.coef_[0],XX,y,0.0)

    # plot regularization paths for L1 regression
    # Exploration of L1 regularization 
    # 
    plot_utils.plot_regularization_path(XX,y)
    plt.savefig('fig5.pdf')