Beispiel #1
0
def plot_boundary(theta, grau):
    x = np.linspace(-1,1.5,50)
    y = np.linspace(-0.8,1.2,50)
    
    xx, yy = np.meshgrid(x, y)

    theta = np.matrix(theta)
    
    X_poly = mapFeature(xx.ravel(), yy.ravel(), grau)
    
    Z = sigmoide(X_poly.dot(theta.T))
    Z = Z.reshape(xx.shape)
    
    plt.title('lambda = 1')
    plt.contour(x, y, Z, [0.5], linewidths=1, colors='green')
    
    legendas = [Line2D([0], [0], marker='+', color='k', lw=0, label='Aceito (y = 1)'),
                       Line2D([0], [0], marker='o',color='y', lw=0, label='Rejeitado (y = 0)'),
                       Line2D([0], [0], color='g', lw=2, label='Fronteira de Decisão')]
    
    plt.legend(handles=legendas)
    
    dirname = os.path.dirname(__file__)
    plt.savefig(dirname + os.path.sep + '/plot4.2.png')
    plt.show()
Beispiel #2
0
def plotReg(x, y, theta, degree):
    pos = np.where(y == 1)
    neg = np.where(y == 0)
    p.scatter(x[0, pos], x[1, pos], marker='o')
    p.scatter(x[0, neg], x[1, neg], marker='x')
    nx = 100
    rangex = np.array([np.amin(x[0, :]), np.amax(x[0, :])])
    dx = (rangex[1] - rangex[0]) / nx
    ny = 100
    rangey = np.array([np.amin(x[1, :]), np.amax(x[1, :])])
    dy = (rangey[1] - rangey[0]) / ny
    xplot = np.empty([2, 1])
    xcontour = np.empty(nx)
    ycontour = np.empty(ny)
    zcontour = np.empty([nx, ny])
    for ii in range(nx):
        for jj in range(ny):
            xplot[0, 0] = rangex[0] + ii * dx
            xcontour[ii] = xplot[0, 0]
            xplot[1, 0] = rangey[0] + jj * dy
            ycontour[jj] = xplot[1, 0]
            (zplot, nfeatures) = mapFeature(xplot, degree)
            zcontour[ii, jj] = np.dot(zplot[:, 0], theta)
    p.contour(xcontour, ycontour, zcontour, levels=[0])
    p.show()
def plotReg(x,y,theta,degree):
    pos = np.where(y==1)
    neg = np.where(y==0)
    p.scatter(x[0,pos],x[1,pos],marker='o')
    p.scatter(x[0,neg],x[1,neg],marker='x')
    nx = 100
    rangex = np.array([np.amin(x[0,:]),np.amax(x[0,:])])
    dx = (rangex[1]-rangex[0])/nx
    ny = 100
    rangey = np.array([np.amin(x[1,:]),np.amax(x[1,:])])
    dy = (rangey[1]-rangey[0])/ny
    xplot = np.empty([2,1])
    xcontour = np.empty(nx)
    ycontour = np.empty(ny)
    zcontour = np.empty([nx,ny])
    for ii in range(nx):
        for jj in range(ny):
            xplot[0,0] = rangex[0] + ii*dx
            xcontour[ii] = xplot[0,0]
            xplot[1,0] = rangey[0] + jj*dy
            ycontour[jj] = xplot[1,0]
            (zplot,nfeatures) = mapFeature(xplot,degree)
            zcontour[ii,jj] = np.dot(zplot[:,0],theta)
        # endfor jj in range(ny)
    # endfor ii in range(nx)
    p.contour(xcontour,ycontour,zcontour,levels=[0])
    p.show()
def plotDecisionBoundary(theta, u, v):
    z = np.zeros((len(u), len(v)))

    for i in range(len(u)):
        for j in range(len(v)):
            z[i, j] = np.dot(mapFeature(u[i], v[j]), theta)
    return z
def plotDecisionBoundary(theta, X, y):
    """
    Plots the data points X and y into a new figure with the decision boundary defined by theta.
    Parameters
    ----------
    theta : ndarray, shape (n_features,)
        Linear regression parameter.
    X : ndarray, shape (n_samples, n_features)
        Training data, where n_samples is the number of samples and n_features is the number of features.
    y : ndarray, shape (n_samples,)
        Labels.
    """
    if X.shape[1] <= 3:
        plot_X = np.array([np.amin(X[:, 1]) - 2, np.amax(X[:, 1]) + 2])
        plot_y = -1.0 / theta[2] * (theta[1] * plot_X + theta[0])
        plt.plot(plot_X, plot_y)
    else:
        u = np.linspace(-1, 1.5, 50)
        # u.resize((len(u), 1))
        v = np.linspace(-1, 1.5, 50)
        # v.resize((len(v), 1))
        z = np.zeros((len(u), len(v)))
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = mapFeature(u[i:i + 1], v[j:j + 1]).dot(theta)
        z = z.T
        u, v = np.meshgrid(u, v)
        cs = plt.contour(u, v, z, levels=[0])
        fmt = {}
        strs = ['Decision boundary']
        for l, s in zip(cs.levels, strs):
            fmt[l] = s

        plt.clabel(cs, cs.levels[::2], inline=True, fmt=fmt, fontsize=10)
Beispiel #6
0
def plotDecisionBoundary(theta, X, y):
    # Plot Data
    f,j1,a,b=plotData.plotData(X[:,1:3], y)

    if X.shape[1]<= 3:
##  Only need 2 points to define a line, so choose two endpoints
        plot_x = [np.amin(X[:,1:2],axis=0)-2,  np.amax(X[:,1:2],axis=0)+2]

##  Calculate the decision boundary line
        list1=[i*theta[1,0] for i in plot_x]
        list1=[j+theta[0,0] - 0.5 for j in list1]
        x=-(1/theta[2,0])
        plot_y = [k*x for k in list1]
        plot_y = np.matrix(plot_y)

##  Plot, and adjust axes for better viewing
        c, =j1.plot(plot_x, plot_y)
        c.set_label('Decision Boundary')
        plt.axis([30,100,30,100])
    else:
## Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))
## Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                qa=mapFeature.mapFeature(u[i], v[j])               
                z[i,j] = qa@theta
        z = z.T ## important to transpose z before calling contour
        c=j1.contour(u,v,z,levels=0)
        c.collections[0].set_label('Decision Boundary')
    return f,j1,a,b;
Beispiel #7
0
def plotDecisionBoundary(theta, X, y, xlabel='', ylabel='', legends=[]):
    pos = y[:, 0] == 1
    neg = y[:, 0] == 0
    plt.scatter(X[pos, 0], X[pos, 1], c='k', marker='+', label=legends[0])
    plt.scatter(X[neg, 0],
                X[neg, 1],
                c='y',
                marker='o',
                label=legends[1],
                alpha=0.5)

    if X.shape[1] <= 2:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 0]) - 2, max(X[:, 0]) + 2])
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y, label=legends[2])
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((u.size, v.size))
        for i in range(u.size):
            for j in range(v.size):
                z[i, j] = mapFeature(
                    np.array(u[i]).reshape((1, 1)),
                    np.array(v[j]).reshape((1, 1))).dot(theta)
        plt.contour(u, v, z.T, levels=[0.0], label=legends[2])

    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.legend()
    plt.show()
def plotDecisionBoundary(theta, X, y):
    plt.figure()
    pos = np.where(y == 1)
    neg = np.where(y == 0)
    plt.scatter(X[pos[0], 1], X[pos[0], 2], color='red', marker='o')
    plt.scatter(X[neg[0], 1], X[neg[0], 2], color='blue', marker='x')
    # plt.xlim(30, 100)
    # plt.ylim(30, 100)
    # plt.xlim(-1, 1.5)
    # plt.ylim(-1, 1.5)
    plt.xlabel('Exam 1 score')
    plt.ylabel('Exam 2 score')
    plt.legend(['Admitted', 'Not admitted'])

    if len(theta) <= 3:
        plot_x = np.array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2])
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y)
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = mapFeature(u[i], v[j]).dot(theta)
        z = z.T
        plt.contour(u, v, z)

    plt.show()
def plotDecisionBoundary(theta, X, y):
    
    # Create New Figure
    plotData(X[:,1:3],y.T[0])

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:,1]), np.max(X[:,1])])

        # Calculate the decision boundary line
        plot_y = (-1/theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x,plot_y)

        # Legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((u.shape[0], v.shape[0]))
        # Evaluate z = theta*x over the grid
        for i in range(0,u.shape[0]):
            for j in range(0,v.shape[0]):
                z[i,j] = np.dot(theta.T, mapFeature(u[i],v[j]))

        # !!! important for plot
        u, v = np.meshgrid(u, v)

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z.T, (0,), colors='g', linewidths=2)
Beispiel #10
0
def plotDecisionBoundary(theta, X, y):
    plt, pos, neg = plotData(X[:, 1:3], y)
    r, c = X.shape
    if c <= 3:
        plot_x = np.array([min(X[:, 1]) - 2, max(X[:, 1]) + 2])
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])
        db = plt.plot(plot_x, plot_y)[0]
        plt.legend((pos, neg, db),
                   ('Admitted', 'Not Admitted', 'Decision Boundary'),
                   loc='lower left')
        plt.axis([30, 100, 30, 100])
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))
        for i in range(1, len(u)):
            for j in range(1, len(v)):
                z[i, j] = np.dot(
                    mapFeature(np.asarray([[u[i]]]), np.asarray([[v[j]]])),
                    theta)
        z = z.T
        #print(z)
        CS = plt.contour(u, v, z, levels=[0])
        db = CS.collections[0]
        plt.clabel(CS, fmt='%2.1d', colors='g', fontsize=14)
        plt.legend((pos, neg, db), ('y = 1', 'y = 0', 'Decision Boundary'),
                   loc='upper right')

    return plt
Beispiel #11
0
def plotDecisionBoundary(theta, X, y):

    # Create New Figure
    plotData(X[:, 1:3], y.T[0])

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:, 1]), np.max(X[:, 1])])

        # Calculate the decision boundary line
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((u.shape[0], v.shape[0]))
        # Evaluate z = theta*x over the grid
        for i in range(0, u.shape[0]):
            for j in range(0, v.shape[0]):
                z[i, j] = np.dot(theta.T, mapFeature(u[i], v[j]))

        # !!! important for plot
        u, v = np.meshgrid(u, v)

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z.T, (0, ), colors='g', linewidths=2)
def plotDecisionBoundary(theta, X, y, xlabel='', ylabel='', legends=[]):
    pos = y[:, 0] == 1
    neg = y[:, 0] == 0
    plt.scatter(X[pos, 0], X[pos, 1], c='k', marker='+',
                label=legends[0])
    plt.scatter(X[neg, 0], X[neg, 1], c='y', marker='o',
                label=legends[1], alpha=0.5)

    if X.shape[1] <= 2:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 0]) - 2, max(X[:, 0]) + 2])
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y, label=legends[2])
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((u.size, v.size))
        for i in range(u.size):
            for j in range(v.size):
                z[i, j] = mapFeature(np.array(u[i]).reshape((1, 1)),
                                     np.array(v[j]).reshape((1, 1))).dot(theta)
        plt.contour(u, v, z.T, levels=[0.0], label=legends[2])

    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.legend()
    plt.show()
def plotDecisionBoundary(theta, X, y):

    import matplotlib.pyplot as plt
    import numpy as np
    import mapFeature as mf
    import plotData as pd

    # Plot Data
    fig = plt.figure()

    plt, p1, p2 = pd.plotData(X[:, 1:3], y)

    plt.hold(True)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 1]), max(X[:, 1])])

        # Calculate the decision boundary line
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        p3 = plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3[0]),
                   ('Admitted', 'Not Admitted', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0.5)

        plt.axis([30, 100, 30, 100])

        plt.show()
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = np.dot(
                    mf.mapFeature(np.array([u[i]]), np.array([v[j]])), theta)
        z = np.transpose(z)  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level 0
        # we get collections[0] so that we can display a legend properly
        p3 = plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3), ('y = 1', 'y = 0', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0)

        plt.show()

    plt.hold(False)  # prevents further drawing on plot
Beispiel #14
0
def plot_data(X, y, theta=np.array([])):
    """ Plot student admission data on a graph """

    # Set y and x axis labels for scatter plot
    plt.ylabel('Exam 2 score')
    plt.xlabel('Exam 1 score')

    admitted = np.where(y == 1)[0]
    not_admitted = np.where(y == 0)[0]

    # Plot all admitted students
    plt.scatter(X[admitted, :1],
                X[admitted, 1:],
                marker='+',
                label='Admitted',
                c='black')

    # Plot all non-admitted students
    plt.scatter(X[not_admitted, :1],
                X[not_admitted, 1:],
                marker='o',
                label='Not admitted',
                c='yellow',
                edgecolors='black')

    # Set legend for scatter plot
    plt.legend(loc='upper right', fontsize=8)

    # Show best fit line
    if theta.size != 0:
        if theta.size <= 3:
            x_coords = np.array([np.min(X[:, 1]), np.max(X[:, 1])])
            y_coords = (-1 / theta[2]) * (theta[0] + theta[1] * x_coords)
            plt.plot(x_coords, y_coords, 'b-', label='Decision boundary')
        else:
            # Here is the grid range
            u = np.linspace(-1, 1.5, 50)
            v = np.linspace(-1, 1.5, 50)

            z = np.zeros((u.size, v.size))
            # Evaluate z = theta*x over the grid
            for i, ui in enumerate(u):
                for j, vj in enumerate(v):
                    z[i, j] = np.dot(mapFeature(ui, vj), theta)

            z = z.T  # important to transpose z before calling contour
            # print(z)

            # Plot z = 0
            pyplot.contour(u, v, z, levels=[0], linewidths=2, colors='g')
            pyplot.contourf(u,
                            v,
                            z,
                            levels=[np.min(z), 0, np.max(z)],
                            cmap='Greens',
                            alpha=0.4)

    plt.show()
Beispiel #15
0
def plotDecisionBoundary(theta, X, y):
    #PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    #the decision boundary defined by theta
    #   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
    #   positive examples and o for the negative examples. X is assumed to be
    #   a either
    #   1) Mx3 matrix, where the first column is an all-ones column for the
    #      intercept.
    #   2) MxN, N>3 matrix, where the first column is all-ones

    # Plot Data
    plt, p1, p2 = pd.plotData(X[:, 1:3], y)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 1]) - 2, max(X[:, 1]) + 2])

        # Calculate the decision boundary line
        plot_y = (-1.0 / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        p3 = plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3[0]),
                   ('Admitted', 'Not Admitted', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0.5)
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = np.dot(
                    mf.mapFeature(np.array([u[i]]), np.array([v[j]])), theta)
        z = z.transpose()  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level 0
        # we get collections[0] so that we can display a legend properly
        p3 = plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3), ('y = 1', 'y = 0', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0)

    return plt, p1, p2
def plotDecisionBoundary(theta, X, y, labels):
    #PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    #the decision boundary defined by theta
    #   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
    #   positive examples and o for the negative examples. X is assumed to be
    #   a either
    #   1) Mx3 matrix, where the first column is an all-ones column for the
    #      intercept.
    #   2) MxN, N>3 matrix, where the first column is all-ones

    # Plot Data
    pos_handle, neg_handle = plotData(X[:, 1:3], y, labels)
    #hold on

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = [np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2]

        # Calculate the decision boundary line
        plot_y = np.dot((-1.0 / theta[2]),
                        (np.dot(theta[1], plot_x) + theta[0]))

        # Plot, and adjust axes for better viewing
        boundary_handle = plt.plot(plot_x, plot_y,
                                   label='Decision Boundary')[0]

        # Legend, specific for the exercise
        #axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((u.size, v.size))
        # Evaluate z = theta*x over the grid
        for i in range(u.size):
            for j in range(v.size):
                z[i,
                  j] = np.dot(mapFeature(np.array([u[i]]), np.array([v[j]])),
                              theta)
        z = z.T  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        u, v = np.meshgrid(u, v)
        boundary_handle = plt.contour(u, v, z, [0], linewidth=2).collections[0]
        boundary_handle.set_label('Decision Boundary')
    #hold off
    return (pos_handle, neg_handle, boundary_handle)
def plotDecisionBoundary(theta, X, y):

#PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
#the decision boundary defined by theta
#   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
#   positive examples and o for the negative examples. X is assumed to be
#   a either
#   1) Mx3 matrix, where the first column is an all-ones column for the
#      intercept.
#   2) MxN, N>3 matrix, where the first column is all-ones

# Plot Data
    plotData(X[:,1:], y)
    plt.hold(True)

    if np.size(X, 1) <= 3:
        #Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:,1])-2,  np.max(X[:,1])+2])

        # Calculate the decision boundary line
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        grid_num_u = u.shape[0]
        grid_num_v = u.shape[0]

        z = np.zeros((grid_num_u, grid_num_v))
        # Evaluate z = theta*x over the grid
        for i in range(0,grid_num_u):
            for j in range(0,grid_num_v):
                z[i,j] = mapFeature(u[i], v[j]).dot(theta)


        z = z.T # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the range [0, 0]

        #plt.contour(theta0_vals, theta1_vals, J_vals, linespace=np.logspace(-2, 3, num=100))

        plt.contour(u, v, z)
def plotDecisionBoundary(theta, X, y):
    # plotDecisionBoundary plots the data points with + for the positive examples
    # and o for the negative examples. X is assumed to be a either
    # 1) Mx3 matrix, where the first column is an all-ones column for the intercept.
    # 2) MxN, N>3 matrix, where the first column is all-ones

    # Plot the data

    plotData(X[:, 1:3], y)

    if X.shape[1] <= 3:
        # only need 2 points to define a line, so choose two endpoints
        plot_x = array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2])

        # Calculate the decision boundary line

        plot_y = array((-1 / theta[2]) * (theta[1] * plot_x + theta[0]))

        # Plot and adjust axes for better viewing

        plt.plot(plot_x, plot_y)

        # Legend, specific

        # plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 105, 30, 105])
    else:
        # Here is the grid range

        u = linspace(-1, 1.5, 50)
        v = linspace(-1, 1.5, 50)

        z = np.zeros((u.size, v.size))
        # Evaluate z = theta*x over the grid

        for i in range(u.size):
            for j in range(v.size):
                z[i, j] = np.dot(
                    mapFeature(u[i].reshape((1, 1)), v[j].reshape(1, 1)),
                    theta)

        z = z.T  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify range [0,0]

        plt.contour(u, v, z, [0], linewidth=2)
def plotDecisionBoundary(theta, X, y):
    """PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
     the decision boundary defined by theta
     PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the 
     positive examples and o for the negative examples. X is assumed to be 
     a either 
     1) Mx3 matrix, where the first column is an all-ones column for the 
         intercept.
     2) MxN, N>3 matrix, where the first column is all-ones"""

    if X.shape[1] <= 3:
        #Only need 2 points to define a line, so choose two endpoints
        plot_x = np.c_[np.min(X[:, 1]) - 2,
                       np.max(X[:, 1]) + 2]

        #Calculate the decision boundary line
        left = -1 / theta[2]
        right = theta[1] * plot_x + theta[0]
        plot_y = left * right

        #Plot, and adjust axes for better viewing
        line, = plt.plot(plot_x.flatten(),
                         plot_y.flatten(),
                         c='b',
                         label="Decision Boundary",
                         marker="*")

        plt.axis([30, 100, 30, 100])

        return line
    else:
        #Here is the grid range
        u = np.linspace(-1, 1.5, num=50)
        v = np.linspace(-1, 1.5, num=50)

        A, B = np.meshgrid(u, v)

        z = np.zeros((u.size, v.size))
        #Evaluate z = theta*x over the grid
        for i in range(u.size):
            for j in range(v.size):
                z[i, j] = np.dot(mapFeature(u[i], v[j]), theta)

        z = z.T  # important to transpose z before calling contour
        #Plot z = 0
        #Notice you need to specify the range [0, 0]
        return plt.contour(A, B, z)
Beispiel #20
0
def plotDecisionBoundary(theta, X, y):
    #PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    #the decision boundary defined by theta
    #   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
    #   positive examples and o for the negative examples. X is assumed to be
    #   a either
    #   1) Mx3 matrix, where the first column is an all-ones column for the
    #      intercept.
    #   2) MxN, N>3 matrix, where the first column is all-ones

    # Plot Data
    fig = plotData(X[:,1:], y)
    hold(True)

    if size(X, 1) <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = array([min(X[:,1])-2,  max(X[:,1])+2])

        # Calculate the decision boundary line
        plot_y = (-1/theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plot(plot_x, plot_y)

        # Legend, specific for the exercise
        legend(('Admitted', 'Not admitted', 'Decision Boundary'), numpoints=1)
        axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = linspace(-1, 1.5, 50)
        v = linspace(-1, 1.5, 50)

        z = zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                z[i,j] = dot(mapFeature(u[i], v[j]), theta)

        z = z.T # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level as [0]
        contour(u, v, z, [0], linewidth=2)

    hold(False)

    return fig
def plotDecisionBoundary(theta, X, y, degree):
    #PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    #the decision boundary defined by theta
    #   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
    #   positive examples and o for the negative examples. X is assumed to be
    #   a either
    #   1) Mx3 matrix, where the first column is an all-ones column for the
    #      intercept.
    #   2) MxN, N>3 matrix, where the first column is all-ones

    # plot Data
    index0 = np.where(y == 0)[0]
    index1 = np.where(y == 1)[0]

    plt.plot(X[index0, 1], X[index0, 2], 'ro')
    plt.plot(X[index1, 1], X[index1, 2], 'g+')

    if X.shape[1] <= 3:
        # only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.amin(X[:, 1]) - 2, np.amax(X[:, 1]) + 2])

        # calculate the decision boundary line
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])

        # plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

        # legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 100, 30, 100])

    else:
        # here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))

        # evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                model = mapFeature(u[i], v[j], degree)
                z[i, j] = np.dot(model, theta)
            # end
        # end

        z = z.T
        plt.contour(u, v, z, 0)
def plotDecisionBoundary(theta, X, y):
    '''Plots the data points X and y into a new figure with the decision boundary defined by theta'''
#   plots the data points with + for the
#   positive examples and o for the negative examples. X is assumed to be
#   a either
#   1) Mx3 matrix, where the first column is an all-ones column for the
#      intercept.
#   2) MxN, N>3 matrix, where the first column is all-ones

# Plot Data
    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:, 1])-2, np.max(X[:, 1])+2])

        # Calculate the decision boundary line
        # plot_y = (-1./theta(3)).*(theta(2).*plot_x + theta(1))
        plot_y = -(plot_x*theta[1]+theta[0])/theta[2]

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y, label='Decision Boundary')

        # Legend, specific for the exercise
        plt.axis([30, 100, 30, 100])

    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        # for i = 1:length(u)
        #     for j = 1:length(v)
        #         z(i,j) = mapFeature(u(i), v(j))*theta
        #     end
        # end

        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = mapFeature(np.array(u[i]), np.array(v[j])).dot(theta)
        u, v = np.meshgrid(u, v)
        z = z.T  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z, levels=0)
Beispiel #23
0
def plotDecisionBoundary(theta, X, y):
    
    if(np.size(X, 1) <= 3):
        plot_x = np.array([np.min(X[:, 1]) - 2, np.max(X[:, 2]) + 2])
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y)
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        
        z = np.zeros((len(u), len(v)))
        
        for i,ui in enumerate(u):
            for j,vi in enumerate(v):
                z[i, j] = mapFeature(ui.reshape(-1,1), vi.reshape(-1,1)) @ theta
                
        plt.contour(u,v, z.T, 0)
        return z
def plotDecisionBoundary(theta, X, y):
    # Plos the data points X and y into a new figure with
    # the decision boundary defined by theta
    # ------------------------------
    # This function plots the data point + for the
    # positive examples and o the negative examples.
    # X is assumed to be a either
    # 1) Mx3 Matrix, where the first column is an all ones
    #    columns for the intercept
    # 2) MxN, N > 3 matrix, where the first column is all-ones\

    # plot data
    plotData(X[:, 1:], y)

    if X.shape[1] <= 3:
        # Only need 2 points to define line, so choose two endpoints
        plot_x = np.array([[np.min(X[:, 1]) - 2], [np.max(X[:, 1]) - 2]])

        # Caculate the decision boundary line
        plot_y = -1. / theta[2] * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

    else:
        # Here is the grid range
        u = np.linspace(-1, 1.25, 50)
        v = np.linspace(-1, 1.25, 50)

        u = u.reshape((50, 1))
        v = v.reshape((50, 1))

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                f = mapFeature(u[i], v[j])
                f = f.reshape((len(f), 1))

                z[i][j] = f.T.dot(theta)

        uu, vv = np.meshgrid(u, v)

        plt.contour(uu, vv, z, 0)
Beispiel #25
0
def plotDecisionBoundary(theta, X, y):
    PD.plotData(X[:,[1,2]], y)
    if X.shape[1] <= 3:
        slope     = -theta[1]/theta[2]
        intercept = -theta[0]/theta[2]
        plot_x    = np.array([min(X[:,1])-2, max(X[:,2])+2])  #X-co-ordinate of the end points of decision line
        plot_y    = slope*plot_x + intercept #Decison line
        plt.plot(plot_x, plot_y, c="orange", label="decision boundary")
        plt.legend()
    else:
        u_vals = np.linspace(-1, 1.5, 50)
        v_vals = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u_vals), len(v_vals)))
        for i in range(len(u_vals)):
            for j in range(len(v_vals)):
                X1 = np.array([u_vals[i]])
                X2 = np.array([v_vals[j]])
                z[i, j] = MP.mapFeature(X1,X2)@theta
        plt.contour(u_vals, v_vals, z.T, 0)
def plotDecisionBoundary(X, y, theta):

    if X.shape[1] <= 3:
        plotData(X[:, 1:], y)
        plot_x = np.array([[np.min(X[:, 2]), np.max(X[:,2])]])
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])
        plt.plot(np.squeeze(plot_x), np.squeeze(plot_y))
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((u.shape[0], v.shape[0]))


        for i in range(u.shape[0]):
            for j in range(v.shape[0]):
                z[i, j] = mapFeature( np.array( [[u[i]]] ) ,  np.array( [[v[j]]]  ) ).dot(theta)

        cset = plt.contour(u, v, z.T, [0, 100])
        cset.collections[1].set_label('Decision Boundary')
def plotDecisionBoundary(theta, X, y):
    """PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
     the decision boundary defined by theta
     PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the 
     positive examples and o for the negative examples. X is assumed to be 
     a either 
     1) Mx3 matrix, where the first column is an all-ones column for the 
         intercept.
     2) MxN, N>3 matrix, where the first column is all-ones"""

    if X.shape[1] <= 3:
        #Only need 2 points to define a line, so choose two endpoints
        plot_x = np.c_[np.min(X[:,1]) - 2,  np.max(X[:,1]) + 2];
    
        #Calculate the decision boundary line
        left =  -1  / theta[2]
        right = theta[1]  * plot_x + theta[0]
        plot_y = left * right
        
        #Plot, and adjust axes for better viewing
        line, = plt.plot(plot_x.flatten(), plot_y.flatten(), c = 'b', label="Decision Boundary", marker="*")
         
        plt.axis([30, 100, 30, 100])
        
        return line
    else:
        #Here is the grid range
        u = np.linspace(-1, 1.5, num=50)
        v = np.linspace(-1, 1.5, num=50)
        
        A, B = np.meshgrid(u, v)
    
        z = np.zeros((u.size, v.size))
        #Evaluate z = theta*x over the grid
        for i in range(u.size):
            for j in range(v.size):
                z[i,j] = np.dot(mapFeature(u[i], v[j]), theta)
           
        z = z.T # important to transpose z before calling contour
        #Plot z = 0
        #Notice you need to specify the range [0, 0]
        return plt.contour(A, B, z)
def plotDecisionBoundary(ax, theta, X, y):
    '''
    %PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    %the decision boundary defined by theta
    %   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the 
    %   positive examples and o for the negative examples. X is assumed to be 
    %   a either 
    %   1) Mx3 matrix, where the first column is an all-ones column for the 
    %      intercept.
    %   2) MxN, N>3 matrix, where the first column is all-ones
    '''

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 2]), max(X[:, 2])])

        # Calculate the decision boundary line
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        ax.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted'],
                   loc='upper right',
                   fontsize='x-small',
                   numpoints=1)
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.array([
            mapFeature(u[i], v[j]).dot(theta) for i in range(len(u))
            for j in range(len(v))
        ])
        #Reshape to get a 2D array
        z = np.reshape(z, (50, 50))

        #Draw the plot
        plt.contour(u, v, z, levels=[0.0])
def plot(data, theta):
    
    t1 = np.arange(-1,1.5,0.01)
    t2 = np.arange(-1,1.5,0.01)

    T1, T2 = np.meshgrid(t1,t2)

    T1_flat = T1.ravel()
    T2_flat = T2.ravel()

    pontos = np.array([T1_flat, T2_flat]).T

    X_pontos = mapFeature(pontos[:,0],pontos[:,1], 6)

    y_pred = predict(X_pontos,theta)

    Z = np.array(y_pred, ndmin=2)
    Z = Z.reshape(T1.shape)

    plot_ex2data2.plot(data)
    plt.contour(T1, T2, Z, levels=[0], linewidths=2, colors='g',alpha=0.8)
Beispiel #30
0
def plotDecisionBoundary(theta, X, y, legend, label, title=None):
    plt.ion()
    plotData(X[:, 1:], y, legend, label)
    n = X.shape[1]
    if n <= 3:
        plot_x = np.array([X[:, 1].min(axis=0) - 2, X[:, 1].max(axis=0) + 2])
        plot_y = -1 / theta[2] * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y, label='Decision Boundary')
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros([len(u), len(v)])
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = mapFeature(u[i], v[j], 1) @ theta
        plt.contour(u, v, z.T, [0], linewidths=2)
    if title:
        plt.title(title)
    plt.legend()
    plt.ioff()
    plt.show()
    plt.close('all')
def plotDecisionBoundary(theta,X,y):
    #importing some useful modules
    import plotData as pD
    #linear algebra computations using numpy
    import numpy as np
    #plotting module
    import pylab as plt
    import mapFeature as mF
    # plot the Data
    pD.plotData(X[:,1:3],y)
    #getting the shape of the matrix x
    m,n=np.shape(X)
    #starting an interactive mode in pylab
    plt.ion()

    if (n<=3):
        #Only need 2 points to define a line, so choose two endpoints
        plot_x= np.min(X[:,1])-2,np.max(X[:,1])+2
        #plot 
        plot_y=np.dot((-1./theta[2]),np.dot(theta[1],plot_x)+theta[0])
        plt.plot(plot_x, plot_y)
    else:
         u =np.linspace(-1, 1.5, 50);
         u=u.reshape(np.size(u),1);
         v = np.linspace(-1, 1.5, 50);
         v=v.reshape(np.size(v),1);

         z = np.matrix(np.zeros((len(u), len(v)),dtype=float));

         for i in range (len(u)):
             for j in range(len(v)):
                 z[i,j]=np.dot(mF.mapFeature(u[i], v[j]),theta)
         
         #reshaping back to original way to enable the plotting
         u =np.linspace(-1, 1.5, 50);
         v =np.linspace(-1, 1.5, 50);
         #plotting a contour for the decision boundary z is transposed
         plt.contour(u, v,np.transpose( z),(0,0),label="decision")
Beispiel #32
0
def plotDecisionBoundary(theta, X, y):
    f2 = plotData(X[:, 1:], y)
    # print(X[:, 1:])
    m, n = X.shape
    if n <= 3:
    # Only need 2 points to define a line, so choose two endpoints
        minVals = X[:, 1].min(0)-2
        maxVals = X[:, 1].max(0)+2
        plot_x = np.array([minVals, maxVals])
        plot_y = (-1 / theta[2]) * (plot_x.dot(theta[1]) + theta[0])
        f2.plot(plot_x, plot_y, label="Test Data", color='b')
        plt.show()

    else:
    # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))

        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = mapFeature.mapFeature(u[i], v[j])* theta
Beispiel #33
0
def plotDecisionBoundary(theta, X, y):

    theta = np.array(theta)

    if X.shape[1] <= 3:
        plot_x = np.array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2])

        #calcular fronteira de decisão
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])

        #ajuste
        plt.plot(plot_x, plot_y)

        #legendas
        plt.legend(['Admitido', 'Não admitido', 'Fronteira de decisão'])
        plt.xlim([30, 100])
        plt.ylim([30, 100])
    else:
        #alcance do grid
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-0.8, 1.2, 50)

        z = np.zeros((u.size, v.size))

        for i, ui in enumerate(u):
            for j, vj in enumerate(v):
                z[i, j] = np.dot(mapFeature(ui, vj), theta)

        z = z.T

        plt.contour(u, v, z, levels=[0], linewidths=2, colors='g')
        plt.contourf(u,
                     v,
                     z,
                     levels=[np.min(z), 0, np.max(z)],
                     cmap='Greens',
                     alpha=0.4)
Beispiel #34
0
def plotDecisionBoundary(theta, X, y):

    # Plot Data
    plotData(X[:, 1:3], y)

    if size(X, 1) <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = array([min(X[:, 1]) - 2, max(X[:, 1]) + 2])

        # Calculate the decision boundary line
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.xlim([30, 100])
        plt.ylim([30, 100])
    else:
        # Here is the grid range
        u = linspace(-1, 1.5, 50)
        v = linspace(-1, 1.5, 50)

        z = zeros((u.size, v.size))
        # Evaluate z = theta*x over the grid

        for i in range(size(u, 0)):
            for j in range(size(v, 0)):
                z[i, j] = mapFeature(array([u[i]]), array([v[j]])) @ theta


        z = z.T  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z, levels=[0], linewidths=2, colors='c')
def plotDecisionBoundary(theta, X, y):
    """Plots the data points with + for the positive examples and o for the
    negaive examples. X is assumed to be a either
    1) Mx3 matrix, where the first column is an all-ones column for the
       intercept.
    2) MxN, N>3 matrix, where the first column is all-ones
    """
    # Plot Data
    plotData(X[:, 1:], y)
    plt.ion()

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 1]) - 2, max(X[:, 1]) + 2])

        # Calculate the decision boundary line
        plot_y = (-1 / theta[2]) * (theta[2] * plot_x + theta[0])

        # Plot
        plt.plot(plot_x, plot_y)
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the gird
        for i, u_ in enumerate(u):
            for j, v_ in enumerate(v):
                z[i, j] = mapFeature(u_, v_) @ theta

        z = z.T  # important to transpose z before calling contour
        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z, levels=[0], linewidths=2)

    plt.ioff()
Beispiel #36
0
def main():
    config_path = './config.json'
    model_path = './model.json'
    names_path = './price.json'
    accuracy_path = './accuracy.json'

    config = pd.read_json(config_path, typ='series')

    data_path = config['Dataset']
    theta = np.array([config['Theta']]).T
    alpha = config['Alpha']
    lamb = 0
    num_iter = config['NumIter']

    with open(data_path, 'rb') as csvfile:
        data = np.loadtxt(csvfile, delimiter=',')
        new_data = mF.mapFeature(data[:, 0], data[:, 1])
    length = len(new_data)
    X = np.array(new_data)
    y = np.array(data[:, -1]).reshape((length, 1))

    theta = gradientDescent(X, y, theta, lamb, alpha, num_iter)

    THETA = theta.T
    THETA = THETA.tolist()
    model = {'Cost': computeCost(X, y, theta, lamb), 'Theta': THETA}
    MODEL = json.dumps(model)
    with open(model_path, 'w') as fm:
        fm.write(MODEL)

    accuracy = {'Accuracy': precison(y, predict(X, theta))}
    ACCURACY = json.dumps(accuracy)
    with open(accuracy_path, 'w') as fa:
        fa.write(ACCURACY)

    fa.close()
    fm.close()
def plotDecisionBoundary(theta, X, y):
    plt.figure()
    plotData(X[:, 1:], y)

    if X.shape[1] <= 3:
        #Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 2]), max(X[:, 2])])

        #Calculate the decision boundary line
        #theta0 + theta1*x1 + theta2*x2 = 0
        #y=mx+b is replaced by x2 = (-1/thetheta2)(theta0 + theta1*x1)
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y)
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))
        for i in range(len(u)):
            for j in range(len(v)):
                z[i,
                  j] = np.dot(mapFeature(np.array([u[i]]), np.array([v[j]])),
                              theta)
        z = np.transpose(z)
        plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]
from scipy.optimize import fmin_bfgs
from plotData import plotData
from plotDecisionBoundary import plotDecisionBoundary
from mapFeature import mapFeature
from costFunctionReg import costFunctionReg
from predict import predict

data = np.loadtxt("ex2data2.txt", usecols=(0,1,2), delimiter=',',dtype=None)

X = data[:, 0:2]
y = data[:, 2]
y = y[:, np.newaxis]
l = 1

m, n = X.shape
plotData(X, y)

X = mapFeature(X[:, 0][np.newaxis].T, X[:, 1][np.newaxis].T)
m, n = X.shape

theta = np.zeros((1, n))

#find out why is there so huge difference between fmin and fmin_bfgs ?
#fmin gives totaly wrong result
options = {'full_output': True, 'retall': True}
theta, cost, _, _, _, _, _, allvecs = fmin_bfgs(lambda t: costFunctionReg(X, y, t, l), theta, maxiter=400, **options)
plotDecisionBoundary(X, y, theta)
#plt.show()

p = predict(X, theta[np.newaxis])[np.newaxis]
print np.mean((p.T == y)) * 100
import numpy as np
import readData
import plotData
from computeCost import computeRegularizedCost
from gradientDescent import regularizedLogisiticDeriv
from mapFeature import mapFeature
from scipy.optimize import fmin_bfgs

if __name__=="__main__":
    (x,y,nexamples) = readData.readSecond()

    #plotData.plotPoints(x,y)

    degree = 6

    (X,nfeatures) = mapFeature(x,degree)

    theta = np.zeros(nfeatures+1)

    lam = 1

    # should return 0.693 for the second data set
    print computeRegularizedCost(theta,X,y,lam)

    # this code is only used for my gradient descent
    # which converges quite slowly compared to bfgs method
    #iterations = 100000
    #alpha = 0.001
    #gradientDescent.gradientDescent(X,y,theta,alpha,iterations)
    theta=fmin_bfgs(computeRegularizedCost,theta,fprime=regularizedLogisiticDeriv,args=(X,y,lam))
from cost_function import cost_function
from batch_gradient_update import batch_gradient_update
from sigmoid_function import sigmoid_function
from featureNormalize import featureNormalize
import numpy as np
from mapFeature import mapFeature
from regularized_cost_function import regularize_cost_function
import scipy as sp
X,y=read_data("ex2data2.txt")
# after featureNormalize it accuarcy could get 89%
X,X_mu,X_sigma=featureNormalize(X)

#plot_data(X,y)
y=np.reshape(y,(y.size,1))
#*********** mapFeature 2D-->28D
X=mapFeature(X)
X=np.concatenate((np.ones([len(X[0,:]),1]),X.T),axis=1)
llambda=1
m,n=X.shape
initial_theta=np.zeros([n,1])
cost,grad=regularize_cost_function(initial_theta,X,y,llambda)
theta=batch_gradient_update(initial_theta,X,y,llambda)
print theta
prob=sigmoid_function(np.dot(X,theta))
print prob
prob[prob>0.5]=1.0
prob[prob<0.5]=0.0
print prob
y=np.reshape(y,prob.shape)
print "accuracy:",tuple(1-sum(abs(prob-y))/100)
Beispiel #41
0
    # =========== Part 1: Regularized Logistic Regression ============
    # In this part, you are given a dataset with data points that are not
    # linearly separable. However, you would still like to use logistic
    # regression to classify the data points.
    #
    # To do so, you introduce more features to use -- in particular, you add
    # polynomial features to our data matrix (similar to polynomial
    # regression).
    #

    # Add Polynomial Features

    # Note that mapFeature also adds a column of ones for us, so the intercept
    # term is handled
    X = mapFeature(X[:, 0], X[:, 1])

    # Initialize fitting parameters
    initial_theta = np.zeros(X.shape[1],)

    # Set regularization parameter lambda to 1
    lmbda = 1

    # Compute and display initial cost and gradient for regularized logistic
    # regression
    cost, grad = costFunctionReg(initial_theta, X, y, lmbda)

    print('Cost at initial theta (zeros): %f' % cost)

    input('Program paused. Press enter to continue.\n')
Beispiel #42
0
if __name__ == "__main__":
    # Load Data
    filename = 'data/data2.dat'
    data = loadtxt(filename, delimiter=',')
    X = data[:, 0:2]
    y = np.array([data[:, 2]]).T
    n,d = X.shape
    
    # Standardize the data
    mean = X.mean(axis=0)
    std = X.std(axis=0)
    X = (X - mean) / std
    
    # map features into a higher dimensional feature space
    X = mapFeature(X[:,0],X[:,1])
    
    # train logistic regression
    logregModel = LogisticRegression()
    logregModel.fit(X,y)
    
    
    # reload the data for 2D plotting purposes
    data = loadtxt(filename, delimiter=',')
    PX = data[:, 0:2]
    y = data[:, 2]
    
    # Standardize the data
    mean = PX.mean(axis=0)
    std = PX.std(axis=0)
    PX = (PX - mean) / std
plotData(X, y, xlabel='Microchip Test 1', ylabel='Microchip Test 2',
         legends=['y = 1', 'y = 0'])


# =========== Part 1: Regularized Logistic Regression ============
# In this part, you are given a dataset with data points that are not
# linearly separable. However, you would still like to use logistic
# regression to classify the data points.
#
# To do so, you introduce more features to use -- in particular, you add
# polynomial features to our data matrix (similar to polynomial
# regression).

# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = mapFeature(X[:, [0]], X[:, [1]])

# Initialize fitting parameters
initial_theta = np.zeros(X.shape[1])

# Set regularization parameter lambda to 1
lamda = 1

cost, _ = costFunctionReg(initial_theta, X, y, lamda)
print('Cost at initial theta (zeros):', cost)


# ============= Part 2: Regularization and Accuracies =============
# Optional Exercise:
# In this part, you will get to try different values of lambda and
# see how regularization affects the decision coundart
def plotDecisionBoundary(theta, X, y):
#PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
#the decision boundary defined by theta
#   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the 
#   positive examples and o for the negative examples. X is assumed to be 
#   a either 
#   1) Mx3 matrix, where the first column is an all-ones column for the 
#      intercept.
#   2) MxN, N>3 matrix, where the first column is all-ones

    import matplotlib.pyplot as plt
    import numpy as np
    import mapFeature as mf
    import plotData as pd

    # Plot Data
    fig = plt.figure()

    plt, p1, p2 = pd.plotData(X[:,1:3], y)

    plt.hold(True)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:,1])-2,  max(X[:,1])+2])

        # Calculate the decision boundary line
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        p3 = plt.plot(plot_x, plot_y)
        
        # Legend, specific for the exercise
        plt.legend((p1, p2, p3[0]), ('Admitted', 'Not Admitted', 'Decision Boundary'), numpoints=1, handlelength=0.5)

        plt.axis([30, 100, 30, 100])

        plt.show(block=False)
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros(( len(u), len(v) ))
        # Evaluate z = theta*x over the grid
        for i in xrange(len(u)):
            for j in xrange(len(v)):
                z[i,j] = np.dot(mf.mapFeature(np.array([u[i]]), np.array([v[j]])),theta)
        z = np.transpose(z) # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level 0
        # we get collections[0] so that we can display a legend properly
        p3 = plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]
        
        # Legend, specific for the exercise
        plt.legend((p1,p2, p3),('y = 1', 'y = 0', 'Decision Boundary'), numpoints=1, handlelength=0)

        plt.show(block=False)

    plt.hold(False) # prevents further drawing on plot
Beispiel #45
0
## =========== Part 1: Regularized Logistic Regression ============
#  In this part, you are given a dataset with data points that are not
#  linearly separable. However, you would still like to use logistic
#  regression to classify the data points.
#
#  To do so, you introduce more features to use -- in particular, you add
#  polynomial features to our data matrix (similar to polynomial
#  regression).
#

# Add Polynomial Features

# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = mapFeature(X[:,0], X[:,1])

# Initialize fitting parameters
initial_theta = zeros(size(X, 1))

# Set regularization parameter lambda to 1
lambda_ = 1.

# Compute and display initial cost and gradient for regularized logistic
# regression
cost, grad = costFunctionReg(initial_theta, X, y, lambda_)

print('Cost at initial theta (zeros): %f' % cost)

print('\nProgram paused. Press enter to continue.')
input()
Beispiel #46
0
%
%  To do so, you introduce more features to use -- in particular, you add
%  polynomial features to our data matrix (similar to polynomial
%  regression).
%

% Add Polynomial Features

% Note that mapFeature also adds a column of ones for us, so the intercept
% term is handled



"""
# Note that mapFeature also adds a column of ones for us, so the intercept term is handled
X = mF.mapFeature(X[:,0], X[:,1]);

#Initialize fitting parameters
m,n=np.shape(X)
#initial guess
initial_theta =np.zeros((n, 1),dtype=float);
#regularization parameter .this can be changed
labda = 1.;
#importing optimization module and costfunction with regularization lambda
from scipy import optimize;
import costFunctionReg as cFR
#getting the cost from the function
cost=cFR.costFunctionReg(initial_theta,X,y,labda)
#optimizing using BFGS important to set jac=False i.e. jacobian is set to false
res = optimize.minimize(cFR.costFunctionReg, initial_theta, args=(X,y,labda), \
                                               method='BFGS',jac=False, options={'maxiter':400})