Example #1
0
def logicRegressionRegularized(data):
    X = mapFeature(data[:, :-1], 6)
    y = data[:, -1]
    theta = np.zeros(shape=X.shape[1])

    theta, loss = gradientDescent(X, y, theta, options)
    accurates = []
    # for i in range(25):
    #     theta, _ = gradientDescent(X, y, theta, options)
    # test
    # predict = (np.round(sigmoid(np.dot(X, theta))) == y)
    # accurate = 1.0 * np.sum(predict == True) / len(y)
    # accurates.append(accurate)
    # print i * options["iterations"], accurate
    # print theta

    # test
    # predict = (np.round(sigmoid(np.dot(X, theta))) == y)
    # print 1.0 * np.sum(predict == True) / len(y)
    # print 1.0 * np.sum(y==0) / len(y)

    # plotLoss(accurates, 50)
    plotLoss(loss, options["iterations"])
    plotSortBlock(data, theta)
    plotSortScatter(data)
    plotShow()
Example #2
0
def simpleLinerRegression(data):
    X = np.c_[data[:, :-1], np.ones(shape=data.shape[0])]
    y = data[:, -1]
    theta = np.ones(shape=data.shape[1])

    theta, loss = gradientDescent(X, y, theta, options)
    print theta

    plotLine(data, theta)
    plotLoss(loss, options["iterations"])
Example #3
0
def logicRegressionLine(data):
    X = np.c_[data[:, :-1], np.ones(shape=data.shape[0])]
    y = data[:, -1]
    theta = np.zeros(shape=data.shape[1])

    theta, loss = gradientDescent(X, y, theta, options)
    print theta

    # test (use train data)
    predict = (np.round(sigmoid(np.dot(X, theta))) == y)
    print 1.0 * np.sum(predict == True) / len(y)

    plotLoss(loss, options["iterations"])
    plotSortScatter(data)
    plotSortLine(data, theta)
    plotShow()
Example #4
0
def mulLinerRegression(data):
    X = data[:, :-1]
    X_norm, mu, sigma = featureNormalize(X)
    X_norm = np.c_[X_norm, np.ones(shape=data.shape[0])]
    y = data[:, -1]
    theta = np.zeros(shape=data.shape[1])

    theta, loss = gradientDescent(X_norm, y, theta, options)
    plotLoss(loss, options["iterations"])
    print theta, mu, sigma

    plot3D(data, theta, mu, sigma)

    # test
    x = [[1380, 3], [1494, 3], [1940, 4]]
    x = np.c_[(x - mu) / sigma, np.ones(3)]
    print np.dot(x, theta)
Example #5
0
def oneVsAll(images, labels, K):
    images = np.c_[images, np.ones(images.shape[0])]
    print images.shape
    all_theta = np.zeros(shape=(K, images.shape[1]))

    splices = images.shape[0] / 10
    data = np.split(images, splices, axis=0)

    y = np.split(labels, splices, axis=0)
    losses = []
    for i in range(K):
        for j in range(splices):
            # print images[j].shape
            # print y[j].shape
            # print all_theta[i].shape
            print j
            all_theta, loss = gradientDescent(data[j], (y[j] == i),
                                              all_theta[i], options)
        # losses.append(loss)
        plotLoss(loss, options["iterations"])
Example #6
0
if __name__ == "__main__":
    trainData = data.classifyCircleData(100, 0)
    testData = data.classifyCircleData(100, 0.1)
    iteration = 400
    alpha = 0.03
    net = buildNetwork([2, 3, 2, 1],
                       activation=Activation.tanh,
                       outputActivation=Activation.tanh,
                       regularization=None,
                       inputIds=["x1", "x2"])

    # trainLoss, testLoss = train(network=net, iteration=iteration, trainData=trainData)
    trainLoss = []
    testLoss = []
    for i in range(iteration):
        oneStep(network=net, iteration=iteration, trainData=trainData)
        trainLoss.append(getLoss(network=net, dataPoints=trainData))
        testLoss.append(getLoss(network=net, dataPoints=testData))
        print "step:", i, "  loss:", getLoss(network=net, dataPoints=trainData)

    plotLoss(trainLoss, iteration, '-')
    plotLoss(testLoss, iteration, '--')
    plt.show()

    predictPlot(network=net, data=testData)
    result = predict(network=net, data=testData[:, :-1])
    print 1.0 * np.sum(result * testData[:, -1] > 0) / len(result)
    # for i in range(len(result)):
    #     print testData[i], result[i]