Exemplo n.º 1
0
reg_lr1 = RegLogisticRegressor()

# run fmin on the loss function and gradient implemented in logistic_regressor.py

reg = 1.0
theta_opt = reg_lr1.train(XX,y,reg=reg,num_iters=1000,norm=False)

# print the theta found and the final loss

print 'Theta found by fmin_bfgs: ',theta_opt
print "Final loss = ", reg_lr1.loss(theta_opt,XX,y,0.0)

# plot the decision boundary

plot_utils.plot_decision_boundary_poly(X,y,theta_opt,reg,p,'Chip Test 1', 'Chip Test 2',['y = 0','y = 1'])
plt.savefig('fig4.pdf')

# compute accuracy on training set

reg_lr1.theta = theta_opt
predy = reg_lr1.predict(XX)

# TODO: fill in the expression for accuracy of prediction
accuracy = 1. * sum([predy[i] == y[i] for i in xrange(len(y))]) / len(y)
print "Accuracy on the training set = ", accuracy

# Compare with model learned by sklearn's logistic regression with reg = 1/C
# the regularization parameter set below can be varied (on a logarithmic scale)

regs = [0.01, 0.05, 0.1, 0.5, 1., 5., 10., 50., 100., 500.]