def plot_logreg(data, degree, theta, E_list):
    """
    Plot the results for the logistic regression exercice

    :param data:
    :param degree:
    :param theta:
    :param Jtrain:
    :param Jtest:
    :return:
    """

    # Plot the list of errors
    if len(E_list) > 0:
        fig, ax = plt.subplots(1)
        ax.plot(E_list, linewidth=2)
        ax.set_xlabel('Iteration number')
        ax.set_ylabel('Error')
        ax.set_title('Error monitoring')

    # Plot the solution on with training and testing set
    K = 100
    x_min = -1
    x_max = 1

    [xx1, xx2] = np.meshgrid(np.linspace(x_min, x_max, K),
                             np.linspace(x_min, x_max, K))

    XX = poly_2D_design_matrix(xx1.reshape(K**2, 1), xx2.reshape(K**2, 1),
                               degree)
    hh = sig(XX.dot(theta)).reshape(K, K)

    fig, ax = plt.subplots(1, 2)

    for k, st in zip([0, 1], ['train', 'test']):
        x1 = data['x1_' + st]
        x2 = data['x2_' + st]
        y = data['y_' + st]

        XX = poly_2D_design_matrix(x1, x2, degree)
        J = logreg.cost(theta, XX, y)

        ax[k].pcolor(xx1, xx2, hh, cmap='cool')

        pos, neg = y, np.logical_not(y)
        ax[k].scatter(x1[pos], x2[pos], marker='o', color='black', s=20)
        ax[k].scatter(x1[neg], x2[neg], marker='x', color='white', s=40)

        ax[k].contour(xx1, xx2, hh, levels=[.5], linewidths=[3])

        ax[k].set_xlabel('x1')
        if k == 0:
            ax[k].set_ylabel('x2')

        ax[k].set_title('{} set J: {:.3f}'.format(st, J))

        # cbar = plt.colorbar()
        # cbar.ax.set_ylabel('h(x)')
        ax[k].set_xlim([x_min, x_max])
        ax[k].set_ylim([x_min, x_max])
Example #2
0
    def f(theta): return lr.cost(theta, X_train, data['y_train'])

    def df(theta): return lr.grad(theta, X_train, data['y_train'])
Example #3
0
 def f(theta):
     return lr.cost(theta, X_train, data['y_train'])
Example #4
0
 def cost_cb(th):
     costs.append(logreg.cost(th,Xfm,y,lda))
Example #5
0
# skip gradient descent or fminunc
#newths = scopt.optimize.fmin(logreg.cost,th,args=(X,y,0))

lda = 0
epsilon = 0.0001
niter = 10000

alphas = (0.1,0.5,1,2)
pltnum = (221,222,223,224)
alpha_costs = {}
for alpha in alphas:
    costs = []
    newths = th
    prev_cost = float("inf")
    for i in range(niter):
        new_cost = logreg.cost(newths,X,y,lda)
        if prev_cost -new_cost <= epsilon:
            break
        costs.append(new_cost)
        prev_cost = new_cost

        newths = logreg.bgd(newths,X,y,alpha,lda)
    costs = np.array(costs)

    alpha_costs[alpha] = costs

numpts = 64
x_min, x_max = x1.min(), x1.max()
y_min, y_max = x2.min(), x2.max()
hx = (x_max -x_min)/numpts
hy = (y_max -y_min)/numpts
input('\nProgram paused. Press enter to continue.')

# =================== Part 2: Cost and Gradient descent ===================

(m, n) = X.shape

# Append column of ones to X
ones = np.ones((m, n + 1))
ones[:, 1:] = X
X = ones

# Initialize fitting parameters
initial_theta = np.zeros(n + 1)

# Compute and display initial cost and gradient
(J, grad) = cost(initial_theta, X, y)

print('Cost at initial theta (zeros): %f' % J)
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): ')
print(grad)
print('Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628')

# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
(J, grad) = cost(test_theta, X, y)

print('\nCost at test theta: %f' % J)
print('Expected cost (approx): 0.218')
print('Gradient at test theta: ')
print(grad)
Example #7
0
# skip gradient descent or fminunc
#newths = scopt.optimize.fmin(logreg.cost,th,args=(X,y,0))

lda = 0
epsilon = 0.0001
niter = 10000

alphas = (0.1, 0.5, 1, 2)
pltnum = (221, 222, 223, 224)
alpha_costs = {}
for alpha in alphas:
    costs = []
    newths = th
    prev_cost = float("inf")
    for i in range(niter):
        new_cost = logreg.cost(newths, X, y, lda)
        if prev_cost - new_cost <= epsilon:
            break
        costs.append(new_cost)
        prev_cost = new_cost

        newths = logreg.bgd(newths, X, y, alpha, lda)
    costs = np.array(costs)

    alpha_costs[alpha] = costs

numpts = 64
x_min, x_max = x1.min(), x1.max()
y_min, y_max = x2.min(), x2.max()
hx = (x_max - x_min) / numpts
hy = (y_max - y_min) / numpts
Example #8
0
 def cost_cb(th):
     costs.append(logreg.cost(th, Xfm, y, lda))