Esempio n. 1
0
def visualizeBoundary(X, y, model):
    '''
   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision 
   boundary learned by the SVM and overlays the data on it
   '''

    import numpy as np
    from plotData import plotData
    import matplotlib.pyplot as plt
    from sklearn import svm
    from gaussianKernelGramMatrix import gaussianKernelGramMatrix

    # Make classification predictions over a grid of values
    # Meshgrid return coordinate matrices from coordinate vectors
    x_min, x_max = X[:, 0].min(), X[:, 0].max()
    y_min, y_max = X[:, 1].min(), X[:, 1].max()
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
                         np.linspace(y_min, y_max, 100))

    Z = model.predict(
        gaussianKernelGramMatrix(np.c_[xx.ravel(), yy.ravel()], X))

    # alternative without the Gaussian Matrix implementation
    #Z = model.predict(np.c_[xx.ravel(), yy.ravel()])

    Z = Z.reshape(xx.shape)

    plotData(X, y)
    plt.contour(xx, yy, Z, colors='b', levels=[0.5])
Esempio n. 2
0
def visualizeBoundary(X, y, model):
    """
    Plots a non-linear decision boundary learned by the SVM and overlays the data on it.
    Parameters
    ----------
    X : array_like
        (m x 2) The training data with two features (to plot in a 2-D plane).
    y : array_like
        (m, ) The data labels.
    model : dict
        Dictionary of model variables learned by SVM.
    """
    plotData(X, y)

    # make classification predictions over a grid of values
    x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
    x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), 100)
    X1, X2 = np.meshgrid(x1plot, x2plot)

    vals = np.zeros(X1.shape)
    for i in range(X1.shape[1]):
        this_X = np.stack((X1[:, i], X2[:, i]), axis=1)
        vals[:, i] = svmPredict(model, this_X)

    plt.contour(X1, X2, vals, colors='y', linewidths=2)
    plt.pcolormesh(X1,
                   X2,
                   vals,
                   cmap='YlGnBu',
                   alpha=0.25,
                   edgecolors='None',
                   lw=0)
    plt.grid(False)
Esempio n. 3
0
def plotDecisionBoundary(theta, X, y):

    # Create New Figure
    plotData(X[:, 1:3], y.T[0])

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:, 1]), np.max(X[:, 1])])

        # Calculate the decision boundary line
        plot_y = (-1 / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((u.shape[0], v.shape[0]))
        # Evaluate z = theta*x over the grid
        for i in range(0, u.shape[0]):
            for j in range(0, v.shape[0]):
                z[i, j] = np.dot(theta.T, mapFeature(u[i], v[j]))

        # !!! important for plot
        u, v = np.meshgrid(u, v)

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z.T, (0, ), colors='g', linewidths=2)
Esempio n. 4
0
def visualizeBoundary(X, y, model):
    #VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision
    #   boundary learned by the SVM and overlays the data on it

    # Plot the training data on top of the boundary
    plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
    x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), 100)
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros(X1.shape)
    for i in range(X1.shape[1]):
        this_X = np.stack((X1[:, i], X2[:, i]), axis=1)
        vals[:, i] = svmPredict(model, this_X)
    # Plot the SVM boundary
    #hold on
    plt.contour(X1, X2, vals, colors='y', linewidths=2)
    plt.pcolormesh(X1,
                   X2,
                   vals,
                   cmap='YlGnBu',
                   alpha=0.25,
                   edgecolors='None',
                   lw=0)
    plt.grid(False)
def plotDecisionBoundary(theta, X, y):
    
    # Create New Figure
    plotData(X[:,1:3],y.T[0])

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:,1]), np.max(X[:,1])])

        # Calculate the decision boundary line
        plot_y = (-1/theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x,plot_y)

        # Legend, specific for the exercise
        plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((u.shape[0], v.shape[0]))
        # Evaluate z = theta*x over the grid
        for i in range(0,u.shape[0]):
            for j in range(0,v.shape[0]):
                z[i,j] = np.dot(theta.T, mapFeature(u[i],v[j]))

        # !!! important for plot
        u, v = np.meshgrid(u, v)

        # Plot z = 0
        # Notice you need to specify the range [0, 0]
        plt.contour(u, v, z.T, (0,), colors='g', linewidths=2)
def visualizeBoundaryLinear(X, y, model):
    
    w = model['w']
    b = model['b']
    xp = np.linspace(np.min(X[:,0]), np.max(X[:,0]), 100).reshape(-1,1)
    yp = -(w[0]*xp + b)/w[1]
    plotData(X, y.T[0])
    plt.plot(xp, yp, 'b-')
def visualizeBoundaryLinear(X, y, model):

    w = model['w']
    b = model['b']
    xp = np.linspace(np.min(X[:, 0]), np.max(X[:, 0]), 100).reshape(-1, 1)
    yp = -(w[0] * xp + b) / w[1]
    plotData(X, y.T[0])
    plt.plot(xp, yp, 'b-')
def visualizeBoundaryLinear(X, y, model):
    """plots a linear decision boundary
    learned by the SVM and overlays the data on it
    """

    w = model.coef_.flatten()
    b = model.intercept_.flatten()
    xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
    yp = -(w[0]*xp + b)/w[1]
    plotData(X, y)
    plt.plot(xp, yp, '-b')
Esempio n. 9
0
def visualizeBoundaryLinear(X, y, model):
    """plots a linear decision boundary
    learned by the SVM and overlays the data on it
    """

    w = model.coef_.flatten()
    b = model.intercept_.flatten()
    xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
    yp = -(w[0] * xp + b) / w[1]
    plotData(X, y)
    plt.plot(xp, yp, '-b')
def visualizeBoundaryLinear(X, y, model):
    #VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the
    #SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary 
    #   learned by the SVM and overlays the data on it

    w, b = model['w'], model['b']
    xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
    yp = -(w[0] * xp + b)/w[1]
    plotData(X, y)
    #hold on;
    plt.plot(xp, yp, '-b')
Esempio n. 11
0
def plotDecisionBoundary(x, y, theta):

    plotData(x[:, 1:3], y)
    plot_min_x = min(x[:, 2])[0, 0] - 2
    plot_max_x = max(x[:, 2])[0, 0] + 2

    plot_min_y = (-1.0 / theta[2]) * (theta[1] * plot_min_x + theta[0])
    plot_max_y = (-1.0 / theta[2]) * (theta[1] * plot_max_x + theta[0])

    plt.plot([plot_min_x, plot_max_x], [plot_min_y, plot_max_y])
    plt.xlabel('X1')
    plt.ylabel('X2')
    plt.show()
Esempio n. 12
0
def visualizeBoundaryLinear(X, y, model):
    #VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the
    #SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary
    #   learned by the SVM and overlays the data on it

    w = model.w
    b = model.b
    xp = linspace(min(X[:,0]), max(X[:,0]), 100)
    yp = - (w[0]*xp + b) / w[1]
    plotData(X, y)
    hold(True)
    plot(xp, yp, '-b')
    hold(False)
Esempio n. 13
0
def plotDecisionBoundary(theta, X, y):

#PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
#the decision boundary defined by theta
#   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
#   positive examples and o for the negative examples. X is assumed to be
#   a either
#   1) Mx3 matrix, where the first column is an all-ones column for the
#      intercept.
#   2) MxN, N>3 matrix, where the first column is all-ones

# Plot Data
    plotData(X[:,1:], y)
    plt.hold(True)

    if np.size(X, 1) <= 3:
        #Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([np.min(X[:,1])-2,  np.max(X[:,1])+2])

        # Calculate the decision boundary line
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        grid_num_u = u.shape[0]
        grid_num_v = u.shape[0]

        z = np.zeros((grid_num_u, grid_num_v))
        # Evaluate z = theta*x over the grid
        for i in range(0,grid_num_u):
            for j in range(0,grid_num_v):
                z[i,j] = mapFeature(u[i], v[j]).dot(theta)


        z = z.T # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the range [0, 0]

        #plt.contour(theta0_vals, theta1_vals, J_vals, linespace=np.logspace(-2, 3, num=100))

        plt.contour(u, v, z)
    def inf(self):

        self.topicSample(init=True)

        cg = self.colGibSamp()

        plt = plDa.plotData('Iteration', 'Log Probability')
        plt.updatePlot(0, cg)

        print '\nIteration %s: %s' % (0, cg)
        self.printResults()

        for s in xrange(1, self.iterations + 1):

            sys.stdout.write('.')

            if s % 10 == 0:

                cg = self.colGibSamp()
                print "cg ", cg
                plt.updatePlot(s, cg)

                print '\nIteration %s: %s' % (0, cg)
                self.printResults()

            self.topicSample()
Esempio n. 15
0
def plotDecisionBoundary(theta, X, y):
    # Plot Data
    f,j1,a,b=plotData.plotData(X[:,1:3], y)

    if X.shape[1]<= 3:
##  Only need 2 points to define a line, so choose two endpoints
        plot_x = [np.amin(X[:,1:2],axis=0)-2,  np.amax(X[:,1:2],axis=0)+2]

##  Calculate the decision boundary line
        list1=[i*theta[1,0] for i in plot_x]
        list1=[j+theta[0,0] - 0.5 for j in list1]
        x=-(1/theta[2,0])
        plot_y = [k*x for k in list1]
        plot_y = np.matrix(plot_y)

##  Plot, and adjust axes for better viewing
        c, =j1.plot(plot_x, plot_y)
        c.set_label('Decision Boundary')
        plt.axis([30,100,30,100])
    else:
## Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))
## Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                qa=mapFeature.mapFeature(u[i], v[j])               
                z[i,j] = qa@theta
        z = z.T ## important to transpose z before calling contour
        c=j1.contour(u,v,z,levels=0)
        c.collections[0].set_label('Decision Boundary')
    return f,j1,a,b;
def visualizeBoundary(X, y, model):
    
    # Plot the training data on top of the boundary
    plotData(X,y.T[0])

    # Make classification predictions over a grid of values
    x1plot = np.linspace(np.min(X[:,0]), np.max(X[:,0]), 100).reshape(-1,1)
    x2plot = np.linspace(np.min(X[:,1]), np.max(X[:,1]), 100).reshape(-1,1)
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros(X1.shape)
    for i in range(X1.shape[1]):
        this_X = np.vstack((X1[:,i], X2[:,i])).T
        vals[:,i] = svmPredict(model, this_X).reshape(1,-1)

    # Plot the SVM boundary
    plt.contour(X1, X2, vals, colors='b', linewidths=1)
Esempio n. 17
0
def plotDecisionBoundary(theta, X, y):
    plt, pos, neg = plotData(X[:, 1:3], y)
    r, c = X.shape
    if c <= 3:
        plot_x = np.array([min(X[:, 1]) - 2, max(X[:, 1]) + 2])
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])
        db = plt.plot(plot_x, plot_y)[0]
        plt.legend((pos, neg, db),
                   ('Admitted', 'Not Admitted', 'Decision Boundary'),
                   loc='lower left')
        plt.axis([30, 100, 30, 100])
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u), len(v)))
        for i in range(1, len(u)):
            for j in range(1, len(v)):
                z[i, j] = np.dot(
                    mapFeature(np.asarray([[u[i]]]), np.asarray([[v[j]]])),
                    theta)
        z = z.T
        #print(z)
        CS = plt.contour(u, v, z, levels=[0])
        db = CS.collections[0]
        plt.clabel(CS, fmt='%2.1d', colors='g', fontsize=14)
        plt.legend((pos, neg, db), ('y = 1', 'y = 0', 'Decision Boundary'),
                   loc='upper right')

    return plt
def plotDecisionBoundary(theta, X, y):
    # plotDecisionBoundary plots the data points with + for the positive examples
    # and o for the negative examples. X is assumed to be a either
    # 1) Mx3 matrix, where the first column is an all-ones column for the intercept.
    # 2) MxN, N>3 matrix, where the first column is all-ones

    # Plot the data

    plotData(X[:, 1:3], y)

    if X.shape[1] <= 3:
        # only need 2 points to define a line, so choose two endpoints
        plot_x = array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2])

        # Calculate the decision boundary line

        plot_y = array((-1 / theta[2]) * (theta[1] * plot_x + theta[0]))

        # Plot and adjust axes for better viewing

        plt.plot(plot_x, plot_y)

        # Legend, specific

        # plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
        plt.axis([30, 105, 30, 105])
    else:
        # Here is the grid range

        u = linspace(-1, 1.5, 50)
        v = linspace(-1, 1.5, 50)

        z = np.zeros((u.size, v.size))
        # Evaluate z = theta*x over the grid

        for i in range(u.size):
            for j in range(v.size):
                z[i, j] = np.dot(
                    mapFeature(u[i].reshape((1, 1)), v[j].reshape(1, 1)),
                    theta)

        z = z.T  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify range [0,0]

        plt.contour(u, v, z, [0], linewidth=2)
def plotDecisionBoundary(theta, X, y):

    import matplotlib.pyplot as plt
    import numpy as np
    import mapFeature as mf
    import plotData as pd

    # Plot Data
    fig = plt.figure()

    plt, p1, p2 = pd.plotData(X[:, 1:3], y)

    plt.hold(True)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 1]), max(X[:, 1])])

        # Calculate the decision boundary line
        plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        p3 = plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3[0]),
                   ('Admitted', 'Not Admitted', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0.5)

        plt.axis([30, 100, 30, 100])

        plt.show()
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = np.dot(
                    mf.mapFeature(np.array([u[i]]), np.array([v[j]])), theta)
        z = np.transpose(z)  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level 0
        # we get collections[0] so that we can display a legend properly
        p3 = plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3), ('y = 1', 'y = 0', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0)

        plt.show()

    plt.hold(False)  # prevents further drawing on plot
def visualizeBoundaryLinear(X, y, model):
    #VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the
    #SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary
    #   learned by the SVM and overlays the data on it

    # plot decision boundary
    # right assignments from http://stackoverflow.com/a/22356267/583834
    w = model.coef_[0]
    b = model.intercept_[0]
    xp = np.linspace(X[:, 0].min(), X[:, 0].max(), 100)
    yp = -(w[0] * xp + b) / w[1]

    plt.plot(xp, yp, 'b-')

    # plot training data
    pd.plotData(X, y)
def visualizeBoundaryLinear(X, y, model):
    #VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the
    #SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary 
    #   learned by the SVM and overlays the data on it

    # plot decision boundary
    # right assignments from http://stackoverflow.com/a/22356267/583834
    w = model.coef_[0]
    b = model.intercept_[0]
    xp = np.linspace(X[:,0].min(), X[:,0].max(), 100)
    yp = - (w[0] * xp + b) / w[1]

    plt.plot(xp, yp, 'b-')

    # plot training data
    pd.plotData(X, y)
def visualizeBoundaryLinear(X, y, model):
    """
    Plots a linear decision boundary learned by the SVM.
    Parameters
    ----------
    X : array_like
        (m x 2) The training data with two features (to plot in a 2-D plane).
    y : array_like
        (m, ) The data labels.
    model : dict
        Dictionary of model variables learned by SVM.
    """
    w, b = model['w'], model['b']
    xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
    yp = -(w[0] * xp + b)/w[1]

    plotData(X, y)
    plt.plot(xp, yp, '-b')
Esempio n. 23
0
def plot_svc(svc, X, y, h=0.02, pad=0.25):
    x_min, x_max = X[:, 0].min()-pad, X[:, 0].max()+pad
    y_min, y_max = X[:, 1].min()-pad, X[:, 1].max()+pad
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = svc.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.2)

    plotData(X, y)
    #plt.scatter(X[:,0], X[:,1], s=70, c=y, cmap=mpl.cm.Paired)
    # Support vectors indicated in plot by vertical lines
    sv = svc.support_vectors_
    #plt.scatter(sv[:,0], sv[:,1], c='k', marker='|', s=100, linewidths='1')
    plt.xlim(x_min, x_max)
    plt.ylim(y_min, y_max)
    plt.xlabel('X1')
    plt.ylabel('X2')
    print('Number of support vectors: ', svc.support_.size)
def plotDecisionBoundary(theta, X, y):
    # Plos the data points X and y into a new figure with
    # the decision boundary defined by theta
    # ------------------------------
    # This function plots the data point + for the
    # positive examples and o the negative examples.
    # X is assumed to be a either
    # 1) Mx3 Matrix, where the first column is an all ones
    #    columns for the intercept
    # 2) MxN, N > 3 matrix, where the first column is all-ones\

    # plot data
    plotData(X[:, 1:], y)

    if X.shape[1] <= 3:
        # Only need 2 points to define line, so choose two endpoints
        plot_x = np.array([[np.min(X[:, 1]) - 2], [np.max(X[:, 1]) - 2]])

        # Caculate the decision boundary line
        plot_y = -1. / theta[2] * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

    else:
        # Here is the grid range
        u = np.linspace(-1, 1.25, 50)
        v = np.linspace(-1, 1.25, 50)

        u = u.reshape((50, 1))
        v = v.reshape((50, 1))

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                f = mapFeature(u[i], v[j])
                f = f.reshape((len(f), 1))

                z[i][j] = f.T.dot(theta)

        uu, vv = np.meshgrid(u, v)

        plt.contour(uu, vv, z, 0)
def plotDecisionBoundary(theta, X, y, Lambda):
    """
    Plots the data points X and y into a new figure with the decision boundary 
    defined by theta     
      PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
      positive examples and o for the negative examples. X is assumed to be
      a either
      1) Mx3 matrix, where the first column is an all-ones column for the
         intercept.
      2) MxN, N>3 matrix, where the first column is all-ones
    """

    # Plot Data
    plt.figure()
    plotData(X[:,1:], y)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 2]),  max(X[:, 2])])

        # Calculate the decision boundary line
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plt.plot(plot_x, plot_y)

    else:

        xvals = np.linspace(-1,1.5,50)
        yvals = np.linspace(-1,1.5,50)
        zvals = np.zeros((len(xvals),len(yvals)))
        for i in range(len(xvals)):
            for j in range(len(yvals)):
                myfeaturesij = mapFeature(np.array([xvals[i]]),np.array([yvals[j]]))
                zvals[i][j] = np.dot(theta.flatten(),myfeaturesij.T)
        zvals = zvals.transpose()
    
        u, v = np.meshgrid( xvals, yvals )
        mycontour = plt.contour( xvals, yvals, zvals, [0])
        #Kind of a hacky way to display a text on top of the decision boundary
        myfmt = { 0:'Lambda = %d'% Lambda}
        plt.clabel(mycontour, inline=1, fontsize=15, fmt=myfmt)
        plt.title("Decision Boundary")
        plt.show()
Esempio n. 26
0
def plotDecisionBoundary(theta, X, y):
    PD.plotData(X[:,[1,2]], y)
    if X.shape[1] <= 3:
        slope     = -theta[1]/theta[2]
        intercept = -theta[0]/theta[2]
        plot_x    = np.array([min(X[:,1])-2, max(X[:,2])+2])  #X-co-ordinate of the end points of decision line
        plot_y    = slope*plot_x + intercept #Decison line
        plt.plot(plot_x, plot_y, c="orange", label="decision boundary")
        plt.legend()
    else:
        u_vals = np.linspace(-1, 1.5, 50)
        v_vals = np.linspace(-1, 1.5, 50)
        z = np.zeros((len(u_vals), len(v_vals)))
        for i in range(len(u_vals)):
            for j in range(len(v_vals)):
                X1 = np.array([u_vals[i]])
                X2 = np.array([v_vals[j]])
                z[i, j] = MP.mapFeature(X1,X2)@theta
        plt.contour(u_vals, v_vals, z.T, 0)
def plotDecisionBoundary(X, y, theta):

    if X.shape[1] <= 3:
        plotData(X[:, 1:], y)
        plot_x = np.array([[np.min(X[:, 2]), np.max(X[:,2])]])
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])
        plt.plot(np.squeeze(plot_x), np.squeeze(plot_y))
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros((u.shape[0], v.shape[0]))


        for i in range(u.shape[0]):
            for j in range(v.shape[0]):
                z[i, j] = mapFeature( np.array( [[u[i]]] ) ,  np.array( [[v[j]]]  ) ).dot(theta)

        cset = plt.contour(u, v, z.T, [0, 100])
        cset.collections[1].set_label('Decision Boundary')
Esempio n. 28
0
def visualizeBoundary(X, y, model):
    """plots a non-linear decision boundary learned by the
    SVM and overlays the data on it"""

    # Plot the training data on top of the boundary
    plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), X.shape[0]).T
    x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), X.shape[0]).T
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros(X1.shape)

    for i in range(X1.shape[1]):
        this_X = np.column_stack((X1[:, i], X2[:, i]))
        vals[:, i] = model.predict(this_X)

    # Plot the SVM boundary
    #contour(X1, X2, vals, [0 0], 'Color', 'b')
    plt.contourf(X1, X2, vals, cmap=plt.cm.Paired, alpha=0.2)
Esempio n. 29
0
def visualizeBoundary(X, y, model, varargin=0):
    #VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision
    #   boundary learned by the SVM and overlays the data on it

    # Plot the training data on top of the boundary
    pd.plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = np.linspace(X[:, 0].min(), X[:, 0].max(), 100).T
    x2plot = np.linspace(X[:, 1].min(), X[:, 1].max(), 100).T
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros(X1.shape)
    for i in xrange(X1.shape[1]):
        this_X = np.column_stack((X1[:, i], X2[:, i]))
        vals[:, i] = model.predict(gkgm.gaussianKernelGramMatrix(this_X, X))

    # Plot the SVM boundary
    plt.contour(X1, X2, vals, colors="blue", levels=[0, 0])
    plt.show(block=False)
def visualizeBoundary(X, y, model, varargin=0):
    #VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision 
    #   boundary learned by the SVM and overlays the data on it

    # Plot the training data on top of the boundary
    pd.plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = np.linspace(X[:,0].min(), X[:,0].max(), 100).T
    x2plot = np.linspace(X[:,1].min(), X[:,1].max(), 100).T
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros(X1.shape)
    for i in xrange(X1.shape[1]):
       this_X = np.column_stack((X1[:, i], X2[:, i]))
       vals[:, i] = model.predict(gkgm.gaussianKernelGramMatrix(this_X, X))

    # Plot the SVM boundary
    plt.contour(X1, X2, vals, colors="blue", levels=[0,0])
    plt.show(block=False)
def visualizeBoundary(X, y, model):
    """plots a non-linear decision boundary learned by the
    SVM and overlays the data on it"""

# Plot the training data on top of the boundary
    plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = np.linspace(min(X[:,0]), max(X[:,0]), X.shape[0]).T
    x2plot = np.linspace(min(X[:,1]), max(X[:,1]), X.shape[0]).T
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros(X1.shape)

    for i in range(X1.shape[1]):
        this_X = np.column_stack((X1[:, i], X2[:, i]))
        vals[:, i] = model.predict(this_X)

    # Plot the SVM boundary
    #contour(X1, X2, vals, [0 0], 'Color', 'b')
    plt.contour(X1, X2, vals, levels=[0.0, 0.0])
def visualizeBoundaryLinear(X, y, model):
    '''
    VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary 
    learned by the SVM and overlays the data on it
    '''

    import numpy as np
    from plotData import plotData
    import matplotlib.pyplot as plt

    b = model.intercept_
    w = model.coef_
    xp = np.linspace(np.min(X[:, 0]), np.max(X[:, 0]), num=50)
    # Calculate the decision boundary line:
    # g(z) = 1/2 >> e^(-z) = 1 >> z = 0
    # theta0 + theta1X1 + theta2X2 = 0
    # x2 plays as y >> y = - (theta0 + theta1X1) / theta2
    yp = -(w.item(0) * xp + b) / w.item(1)
    plotData(X, y)
    plt.plot(xp, yp, '-b')
Esempio n. 33
0
def plotDecisionBoundary(theta, X, y):
    #PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    #the decision boundary defined by theta
    #   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
    #   positive examples and o for the negative examples. X is assumed to be
    #   a either
    #   1) Mx3 matrix, where the first column is an all-ones column for the
    #      intercept.
    #   2) MxN, N>3 matrix, where the first column is all-ones

    # Plot Data
    plt, p1, p2 = pd.plotData(X[:, 1:3], y)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:, 1]) - 2, max(X[:, 1]) + 2])

        # Calculate the decision boundary line
        plot_y = (-1.0 / theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        p3 = plt.plot(plot_x, plot_y)

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3[0]),
                   ('Admitted', 'Not Admitted', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0.5)
        plt.axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = np.dot(
                    mf.mapFeature(np.array([u[i]]), np.array([v[j]])), theta)
        z = z.transpose()  # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level 0
        # we get collections[0] so that we can display a legend properly
        p3 = plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]

        # Legend, specific for the exercise
        plt.legend((p1, p2, p3), ('y = 1', 'y = 0', 'Decision Boundary'),
                   numpoints=1,
                   handlelength=0)

    return plt, p1, p2
Esempio n. 34
0
def visualizeBoundaryLinear(X, y, clf):
    """
    Plots a linear decision boundary learned by the SVM.
    Parameters
    ----------
    X : ndarray, shape (n_samples, n_features)
        Samples, where n_samples is the number of samples and n_features is the number of features.
    y : ndarray, shape (n_samples,)
        Labels.
    clf : Support Vector Classification
        The trained SVM.
    """
    plotData(X, y)

    coef = clf.coef_.ravel()
    intercept = clf.intercept_.ravel()

    xp = np.linspace(np.min(X[:, 0]), np.max(X[:, 0]), 100)
    yp = -1.0 * (coef[0] * xp + intercept[0]) / coef[1]

    plt.plot(xp, yp, linestyle='-', color='b')
Esempio n. 35
0
def visualizeBoundary(X, y, model):
    #VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision
    #   boundary learned by the SVM and overlays the data on it

    # Plot the training data on top of the boundary
    plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = linspace(min(X[:,0]), max(X[:,0]), 100)
    x2plot = linspace(min(X[:,1]), max(X[:,1]), 100)
    [X1, X2] = meshgrid(x1plot, x2plot)
    vals = zeros(shape(X1))
    for i in range(size(X1, 1)):
       this_X = column_stack((X1[:, i], X2[:, i]))
       vals[:, i] = svmPredict(model, this_X)

    # Plot the SVM boundary
    hold(True)
    contour(X1, X2, vals, [0], color='b')
    hold(False)
Esempio n. 36
0
def visualizeBoundary(X, y, model):
    """plots a non-linear decision boundary learned by the
    SVM and overlays the data on it"""

    m = np.size(X, 0)

    # Plot the training data on top of the boundary
    plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), m)
    x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), m)
    X1, X2 = np.meshgrid(x1plot, x2plot)
    vals = np.zeros((m, m))

    for i in range(m):
        this_X = np.c_[X1[:, i], X2[:, i]]  # (863, 2)
        vals[:, i] = svmPredict(model, this_X)

    # Plot the SVM boundary
    plt.contour(X1, X2, vals, colors='blue')
Esempio n. 37
0
def visualizeBoundary(X, y, model):
    #VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM
    #   VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision
    #   boundary learned by the SVM and overlays the data on it

    # Plot the training data on top of the boundary
    plotData(X, y)

    # Make classification predictions over a grid of values
    x1plot = linspace(min(X[:, 0]), max(X[:, 0]), 100)
    x2plot = linspace(min(X[:, 1]), max(X[:, 1]), 100)
    [X1, X2] = meshgrid(x1plot, x2plot)
    vals = zeros(shape(X1))
    for i in range(size(X1, 1)):
        this_X = column_stack((X1[:, i], X2[:, i]))
        vals[:, i] = svmPredict(model, this_X)

    # Plot the SVM boundary
    hold(True)
    contour(X1, X2, vals, [0], color='b')
    hold(False)
Esempio n. 38
0
def plotDecisionBoundary(X, y, theta, mu, sigma):
    """
        Функция позволяет выполнить визуализацию данных c маркером
        + для положительных примеров и маркером o для отрицательных 
        примеров, с границей решения для заданного множества 
        параметров модели theta, матрицы объекты-признаки X и вектора 
        меток y
    """

    n = X.shape[1]
    X = repmat(sigma, X.shape[0], 1) * X[:, 1:n] + repmat(mu, X.shape[0], 1)
    plotData(X, y)

    Theta = np.zeros(theta.shape)

    Theta[1, 0] = theta[1, 0] / sigma[0]
    Theta[2, 0] = theta[2, 0] / sigma[1]
    Theta[0, 0] = theta[0, 0] - mu[0] * Theta[1, 0] - mu[1] * Theta[2, 0]

    plot_x = np.array([min(X[:, 1]), max(X[:, 1])])
    plot_y = (-1. / Theta[2, 0]) * (Theta[1, 0] * plot_x + Theta[0, 0])
    plt.plot(plot_x, plot_y)
Esempio n. 39
0
def plotDecisionBoundary(theta, X, y, legend, label, title=None):
    plt.ion()
    plotData(X[:, 1:], y, legend, label)
    n = X.shape[1]
    if n <= 3:
        plot_x = np.array([X[:, 1].min(axis=0) - 2, X[:, 1].max(axis=0) + 2])
        plot_y = -1 / theta[2] * (theta[1] * plot_x + theta[0])
        plt.plot(plot_x, plot_y, label='Decision Boundary')
    else:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
        z = np.zeros([len(u), len(v)])
        for i in range(len(u)):
            for j in range(len(v)):
                z[i, j] = mapFeature(u[i], v[j], 1) @ theta
        plt.contour(u, v, z.T, [0], linewidths=2)
    if title:
        plt.title(title)
    plt.legend()
    plt.ioff()
    plt.show()
    plt.close('all')
Esempio n. 40
0
def plotDecisionBoundary(theta,X,y):
    #importing some useful modules
    import plotData as pD
    #linear algebra computations using numpy
    import numpy as np
    #plotting module
    import pylab as plt
    import mapFeature as mF
    # plot the Data
    pD.plotData(X[:,1:3],y)
    #getting the shape of the matrix x
    m,n=np.shape(X)
    #starting an interactive mode in pylab
    plt.ion()

    if (n<=3):
        #Only need 2 points to define a line, so choose two endpoints
        plot_x= np.min(X[:,1])-2,np.max(X[:,1])+2
        #plot 
        plot_y=np.dot((-1./theta[2]),np.dot(theta[1],plot_x)+theta[0])
        plt.plot(plot_x, plot_y)
    else:
         u =np.linspace(-1, 1.5, 50);
         u=u.reshape(np.size(u),1);
         v = np.linspace(-1, 1.5, 50);
         v=v.reshape(np.size(v),1);

         z = np.matrix(np.zeros((len(u), len(v)),dtype=float));

         for i in range (len(u)):
             for j in range(len(v)):
                 z[i,j]=np.dot(mF.mapFeature(u[i], v[j]),theta)
         
         #reshaping back to original way to enable the plotting
         u =np.linspace(-1, 1.5, 50);
         v =np.linspace(-1, 1.5, 50);
         #plotting a contour for the decision boundary z is transposed
         plt.contour(u, v,np.transpose( z),(0,0),label="decision")
Esempio n. 41
0
def plotDecisionBoundary(theta, X, y):
    #PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
    #the decision boundary defined by theta
    #   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the
    #   positive examples and o for the negative examples. X is assumed to be
    #   a either
    #   1) Mx3 matrix, where the first column is an all-ones column for the
    #      intercept.
    #   2) MxN, N>3 matrix, where the first column is all-ones

    # Plot Data
    fig = plotData(X[:,1:], y)
    hold(True)

    if size(X, 1) <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = array([min(X[:,1])-2,  max(X[:,1])+2])

        # Calculate the decision boundary line
        plot_y = (-1/theta[2]) * (theta[1] * plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        plot(plot_x, plot_y)

        # Legend, specific for the exercise
        legend(('Admitted', 'Not admitted', 'Decision Boundary'), numpoints=1)
        axis([30, 100, 30, 100])
    else:
        # Here is the grid range
        u = linspace(-1, 1.5, 50)
        v = linspace(-1, 1.5, 50)

        z = zeros((len(u), len(v)))
        # Evaluate z = theta*x over the grid
        for i in range(len(u)):
            for j in range(len(v)):
                z[i,j] = dot(mapFeature(u[i], v[j]), theta)

        z = z.T # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level as [0]
        contour(u, v, z, [0], linewidth=2)

    hold(False)

    return fig
Esempio n. 42
0
print '5x5 Identity Matrix: '
print warmUpExercise()

print('Program paused. Press enter to continue.')
raw_input()


## ======================= Part 2: Plotting =======================
print 'Plotting Data ...'
data = loadtxt('./ex1data1.txt', delimiter=',')
X = data[:, 0]; y = data[:, 1]
m = len(y) # number of training examples

# Plot Data
# Note: You have to complete the code in plotData.py
firstPlot = plotData(X, y)
firstPlot.show()

print 'Program paused. Press enter to continue.'
raw_input()



## =================== Part 3: Gradient descent ===================
print 'Running Gradient Descent ...'

X = column_stack((ones(m), data[:,0])) # Add a column of ones to x
theta = zeros(2) # initialize fitting parameters

# Some gradient descent settings
iterations = 1500
Esempio n. 43
0
#load data in the first column of variable data in variable X 
#unlike in matlab in python indexing starts at 0
X = data[:, 0]

#load data in second column of variable data in variable y
y = data[:, 1]
#get the length of y
m=len(y)
#set y as a m*1 matrix .this is because numpy stores it as a numpy array and to perform linear algebra we need as an m by1 matrix 

y=y.reshape(m,1)


#calling the function to plot data 

ax=plt.plotData(X,y)
"""%% =================== Part 3: Gradient descent =================== """
#initializing theta to zeros that is for initial values for theta we set it to zeros with a data type float
theta=np.zeros((2,1),dtype=float)

#adding a ones column to X so that we can use the preceeding column as a feature
X=np.c_[np.ones(m),X]


#compute the cost of the initial values
J=cC.computeCost(X, y,theta)
#setting variables needed by the gradient descent which requires X,theta,y alpha ,num_iters
iterations = 1500;
alpha = 0.01;
#printing the cost with one variable when theta is initialized to zeros .it should be approximately 32.07
print("the cost should be approximately equal to 32.07 \n %s"%(J))
Esempio n. 44
0
import costFunction as cF
#importing MapFeature
import mapFeature as mF
#importing sigmoid fucntion
import sigmoid as sg
#loading the training data from text file
data=np.loadtxt(("ex2data1.txt"),delimiter=",")
X= data[:,:2]
y = data[:, 2]
"""=========================plotting=========================="""

# We start the exercise by first plotting the data to understand the   the problem we are working with.

print('Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.\n')
#plotting the Data
pD.plotData(X, y);

#Put some labels ;


pl.show()
pl.xlabel('Exam 1 score')
pl.ylabel('Exam 2 score')



print('\nProgram paused. Press enter to continue.\n');

raw_input()

def plotDecisionBoundary(theta, X, y):
#PLOTDECISIONBOUNDARY Plots the data points X and y into a new figure with
#the decision boundary defined by theta
#   PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the 
#   positive examples and o for the negative examples. X is assumed to be 
#   a either 
#   1) Mx3 matrix, where the first column is an all-ones column for the 
#      intercept.
#   2) MxN, N>3 matrix, where the first column is all-ones

    import matplotlib.pyplot as plt
    import numpy as np
    import mapFeature as mf
    import plotData as pd

    # Plot Data
    fig = plt.figure()

    plt, p1, p2 = pd.plotData(X[:,1:3], y)

    plt.hold(True)

    if X.shape[1] <= 3:
        # Only need 2 points to define a line, so choose two endpoints
        plot_x = np.array([min(X[:,1])-2,  max(X[:,1])+2])

        # Calculate the decision boundary line
        plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0])

        # Plot, and adjust axes for better viewing
        p3 = plt.plot(plot_x, plot_y)
        
        # Legend, specific for the exercise
        plt.legend((p1, p2, p3[0]), ('Admitted', 'Not Admitted', 'Decision Boundary'), numpoints=1, handlelength=0.5)

        plt.axis([30, 100, 30, 100])

        plt.show(block=False)
    else:
        # Here is the grid range
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)

        z = np.zeros(( len(u), len(v) ))
        # Evaluate z = theta*x over the grid
        for i in xrange(len(u)):
            for j in xrange(len(v)):
                z[i,j] = np.dot(mf.mapFeature(np.array([u[i]]), np.array([v[j]])),theta)
        z = np.transpose(z) # important to transpose z before calling contour

        # Plot z = 0
        # Notice you need to specify the level 0
        # we get collections[0] so that we can display a legend properly
        p3 = plt.contour(u, v, z, levels=[0], linewidth=2).collections[0]
        
        # Legend, specific for the exercise
        plt.legend((p1,p2, p3),('y = 1', 'y = 0', 'Decision Boundary'), numpoints=1, handlelength=0)

        plt.show(block=False)

    plt.hold(False) # prevents further drawing on plot
import predict as pr
import plotData as pd
import mapFeature as mf
import plotDecisionBoundary as pdb
import numpy as np 
from scipy.optimize import fmin_bfgs

## Load Data
#  The first two columns contains the exam scores and the third column
#  contains the label.

data = np.loadtxt('ex2data2.txt', delimiter=",")
X = data[:,:2]
y = data[:,2]

plt, p1, p2 = pd.plotData(X, y)

# # Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
plt.legend((p1, p2), ('y = 1', 'y = 0'), numpoints=1, handlelength=0)

plt.show(block=False) # prevents having to close the graph to move forward with ex2_reg.py

raw_input('Program paused. Press enter to continue.\n')


## =========== Part 1: Regularized Logistic Regression ============
#  In this part, you are given a dataset with data points that are not
#  linearly separable. However, you would still like to use logistic 
#  regression to classify the data points. 
Esempio n. 47
0
from costFunction import costFunction
from plotDecisionBoundary import plotDecisionBoundary
from predict import predict


data = np.loadtxt('ex2data1.txt', delimiter=',')
X = data[:, [0, 1]]
y = data[:, [2]]


# ==================== Part 1: Plotting ====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting data with + indicating (y = 1) examples,',
      'and o indicating (y = 0) examples.\n')
plotData(X, y, xlabel='Exam 1 score', ylabel='Exam 2 score',
         legends=['Admitted', 'Not Admitted'])


# ============ Part 2: Compute Cost and Gradient ============
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You neeed to complete the code in
# costFunction.py
m, n = X.shape
X = np.hstack((np.ones((m, 1)), X))
initial_theta = np.zeros(n + 1)

cost, grad = costFunction(initial_theta, X, y)
print('Cost at initial theta (zeros):', cost)
print('Gradient at initial theta (zeros):', grad, '\n')

Esempio n. 48
0
import numpy as np
from scipy.optimize import fmin_bfgs

from mapFeature import mapFeature
from plotData import plotData
from costFunctionReg import costFunctionReg
from plotDecisionBoundary import plotDecisionBoundary
from predict import predict


# Plot Data
data = np.loadtxt('ex2data2.txt', delimiter=',')
X = data[:, [0, 1]]
y = data[:, [2]]
plotData(X, y, xlabel='Microchip Test 1', ylabel='Microchip Test 2',
         legends=['y = 1', 'y = 0'])


# =========== Part 1: Regularized Logistic Regression ============
# In this part, you are given a dataset with data points that are not
# linearly separable. However, you would still like to use logistic
# regression to classify the data points.
#
# To do so, you introduce more features to use -- in particular, you add
# polynomial features to our data matrix (similar to polynomial
# regression).

# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = mapFeature(X[:, [0]], X[:, [1]])
Esempio n. 49
0
clf.fit(X,y)
print('Coefficients from scikit learn LinearRegression: \n',clf.intercept_,' ',clf.coef_)

clf = linear_model.Ridge (alpha = .5)
clf.fit(X,y)
print('Coefficients from scikit learn RidgeRegression: \n',clf.intercept_,' ',clf.coef_)

clf = linear_model.Lasso(alpha = 0.1)
clf.fit(X,y)
print('Coefficients from scikit learn Lasso: \n',clf.intercept_,' ',clf.coef_)



# Plot Data
# Note: You have to complete the code in plotData.m
plotData(X, y);

# =================== Part 3: Gradient descent ===================
print('Running Gradient Descent ...\n')
#X = [ones(m, 1), data(:,1)]; # Add a column of ones to x
ones = np.ones((m,1))
X = np.hstack((ones,X))

theta = np.zeros((2, 1)) # initialize fitting parameters

# Some gradient descent settings
iterations = 1500;
alpha = 0.01;

# compute and display initial cost
computeCost(X, y, theta)
Esempio n. 50
0
print 'Running warmUpExercise ...'
print '5x5 Identity Matrix:'
warmup = warmUpExercise()
print warmup
#raw_input("Program paused. Press Enter to continue...")

# ======================= Part 2: Plotting =======================
data = np.loadtxt('C:\Users\HTDA\Coursera-Stanford-ML-Python\ex1\ex1data1.txt', delimiter=',')
m = data.shape[0]
X = np.vstack(zip(np.ones(m),data[:,0]))
y = data[:, 1]

# Plot Data
# Note: You have to complete the code in plotData.py
print 'Plotting Data ...'
plotData(data)
#show()

#raw_input("Program paused. Press Enter to continue...")

# =================== Part 3: Gradient descent ===================
print 'Running Gradient Descent ...'
theta = np.zeros(2)
# compute and display initial cost
J = computeCost(X, y, theta)
print 'cost: %0.4f ' % J

# Some gradient descent settings
iterations = 1500
alpha = 0.01
Esempio n. 51
0
import code
import numpy as np
import plotData
import computeCost
import gradientDescent

# ============================== Load and Plot data ==============================

print("Plotting Data ...\n")
data = np.genfromtxt("../data/ex1data1.txt", delimiter = ",")
X = data[:, 0]
y = data[:, 1]
plotData.plotData(X, y)

pause = code.InteractiveConsole()
pause.raw_input(prompt = "Press Enter to continue: ")

# ============================== Gradient descent ================================

print("Running Gradient Descent ...\n")
m = len(y)
X = np.c_[np.ones((m, 1)), data[:, 0]]
X = np.reshape(X, (m, 2))
y = np.reshape(y, (m, 1))
theta = np.zeros((2, 1))

iterations = 1500
alpha = 0.01

temp = computeCost.computeCost(X, y, theta)
print("The first J: ", temp)
X = data[:, 0:2] #x refers to the population size in 10,000s
y = data[:, 2] #y refers to the profit in $10,000s

m = y.size #umber of training examples

y = y.reshape((m,1))


"""## Part 1: Plotting ====================
We start the exercise by first plotting the data to understand the 
  the problem we are working with."""

#scatter plot
print("Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.")

line_pos, line_neg = plotData(X, y, "Exam 1","Exam 2", "Admitted","Not Admitted")

plt.legend(handles=[line_pos,line_neg])

plt.show(block=False)


print("\nProgram paused. Press enter to continue.\n")
pause()


"""## Part 2: Compute Cost and Gradient """

#  Setup the data matrix appropriately, and add ones for the intercept term
m, n = X.shape
Esempio n. 53
0
from mapFeature import mapFeature
from costFunctionReg import costFunctionReg

if __name__ == '__main__':
    # Initialization
    os.system('cls' if os.name == 'nt' else 'clear')
    plt.ion()

    # Load Data
    # The first two columns contains the X values and the third column
    # contains the label (y).

    data = np.asmatrix(np.loadtxt('ex2data2.txt', delimiter=','))
    X, y = data[:, :2], data[:, 2]

    plotData(X, y, ['y = 1', 'y = 0'])

    # Put some labels and Legend
    plt.xlabel('Microchip Test 1')
    plt.ylabel('Microchip Test 2')
    plt.legend(numpoints=1)

    plt.show()

    input('Program paused. Press enter to continue.\n')

    # =========== Part 1: Regularized Logistic Regression ============
    # In this part, you are given a dataset with data points that are not
    # linearly separable. However, you would still like to use logistic
    # regression to classify the data points.
    #
plt.ion()

## =============== Part 1: Loading and Visualizing Data ================
#  We start the exercise by first loading and visualizing the dataset. 
#  The following code will load the dataset into your environment and plot
#  the data.
#

print('Loading and Visualizing Data ...\n')

# Load from ex6data1: 
# You will have X, y in your environment
data = sio.loadmat('ex6data1.mat')
X = data['X']; y = data['y']
# Plot training data
plotData(X, y.T[0])

input('\nProgram paused. Press enter to continue.\n')

## ==================== Part 2: Training Linear SVM ====================
#  The following code will train a linear SVM on the dataset and plot the
#  decision boundary learned.
#

# Load from ex6data1: 
# You will have X, y in your environment
data = sio.loadmat('ex6data1.mat')
X = data['X']; y = data['y']
y = y.astype(int)

print('\nTraining Linear SVM ...\n')
Esempio n. 55
0
    plt.ion()

    # Load Data
    # The first two columns contains the exam scores and the third column
    # contains the label.
    data = np.asmatrix(np.loadtxt('ex2data1.txt', delimiter=','))
    X, y = data[:, :2], data[:, 2]

    # ==================== Part 1: Plotting ====================
    #  We start the exercise by first plotting the data to understand the
    #  the problem we are working with.

    print('Plotting data with + indicating (y = 1) examples and o ' +
          'indicating (y = 0) examples.')

    plotData(X, y, ['Admitted', 'Not admitted'])

    # Put some labels and Legend
    plt.xlabel('Exam 1 score')
    plt.ylabel('Exam 2 score')
    plt.legend(numpoints=1)

    plt.show()

    input('Program paused. Press enter to continue.\n')

    # ============ Part 2: Compute Cost and Gradient ============
    #  In this part of the exercise, you will implement the cost and gradient
    #  for logistic regression. You neeed to complete the code in
    #  costFunction.m
print(wue.warmUpExercise())

raw_input('Program paused. Press enter to continue.\n')

## ======================= Part 2: Plotting =======================
print('Plotting Data...')

data = np.loadtxt('ex1data1.txt', delimiter=",")
X = data[:,0]
y = data[:,1]
m = len(y) # number of training examples

# Plot Data
# Note: You have to complete the code in plotData.py

pd.plotData(X, y)

raw_input('Program paused. Press enter to continue.\n')

## =================== Part 3: Gradient descent ===================
print('Running Gradient Descent...')

X_padded = np.column_stack((np.ones((m,1)), X)) # Add a column of ones to x
theta = np.zeros((2, 1)) # initialize fitting parameters

# Some gradient descent settings
iterations = 1500
alpha = 0.01

# compute and display initial cost
print cc.computeCost(X_padded, y, theta)
Esempio n. 57
0
from plotData import plotData
from costFunctionReg import costFunctionReg
from mapFeature import mapFeature
from plotDecisionBoundary import plotDecisionBoundary
from predict import predict

## Load Data
#  The first two columns contains the X values and the third column
#  contains the label (y).

data = loadtxt('ex2data2.txt', delimiter=',');
X = data[:,:2]
y = data[:, 2]

fig = plotData(X, y)

# Put some labels
hold(True)
# Labels and Legend
xlabel('Microchip Test 1')
ylabel('Microchip Test 2')

# Specified in plot order
legend(('y = 1', 'y = 0'), numpoints=1)
hold(False)
fig.show()



## =========== Part 1: Regularized Logistic Regression ============
weights_index = len(var_names) - 2
plotData.plotWeights(dir_names, 'weights.png', max_delta_time, options.dir_name_to_ignore)

legend_index = len(var_names) - 1
plotLegend.plotLegend(dir_names, 'legend.png', max_delta_time, options.dir_name_to_ignore)

filename_multiCoeff = 'constraint_set_card.txt'

labels =  plotData.getLabels(dir_names, var_names[0] + '.txt', max_delta_time, options.dir_name_to_ignore)

list_plots = []

# do not plot legend (need to use the plotLegend function called above)
for i in range(0,len(var_names) - 1):
    if use_multi_coeff[i]:
        l = plotData.plotData(dir_names, var_names[i] + '.txt', max_delta_time, options.dir_name_to_ignore, show_legend, col_idx[i],'',filename_multiCoeff, coeff_y_lim)
    else:
        l = plotData.plotData(dir_names, var_names[i] + '.txt', max_delta_time, options.dir_name_to_ignore, show_legend, col_idx[i],'', '', coeff_y_lim)
    list_plots.append(l)

# add empty plot for legend
list_plots.append([])

var_names_cross = [ 'd_norm_w',
                    'd_angle_w'
                    ];

#for i in range(0,len(var_names_cross)):
for i in range(0,0):
    l = plotCrossData.plotCrossData(dir_names, 'parameter_vector0/', i, var_names_cross[i] + '.png')
    list_plots.append(l)