from optim import * import matplotlib.pyplot as plt from Activation import sigmoid import numpy as np from loadData import Data from functools import partial from linReg import * # data closer to y = x0 + x1 + 2* x2 data1 = [[[1, 1], [4.2]], [[1, 2], [6.0]], [[2, 1], [5.8]], [[2, 2], [7.2]], [[2, 3], [8.5]], [[3, 3], [9.2]], [[1, 3], [8.1]], [[3, 1], [6.1]], [[4, 1], [7.1]], [[1, 4], [10.3]], [[4, 2], [9.5]], [[2, 4], [10.8]]] d1 = Data() d1.loadList(data1) d1.addBiasRow() print d1.X theta_init = np.matrix(np.zeros((d1.n, 1))) cost = partial(linRegCost,data = d1,theta = theta_init, regLambda = 0.001) J, theta = gradDesc(cost, theta_init, 500, 0.1) print 'model is :', list(np.transpose(theta).flat) # test the trained model with data testLinReg(theta, d1) # test the model (1, 1, 2) testLinReg(np.matrix([[1], [1], [2]]), d1)
pat1 = [ [[0, 0, 0], [0]], [[0, 0, 1], [1]], [[0, 1, 0], [1]], [[0, 1, 1], [0]], [[1, 0, 0], [1]], [[1, 0, 1], [0]], [[1, 1, 0], [0]], [[1, 1, 1], [1]], ] ActSig = sigmoid(4) # ActSig.view() d1 = Data() d1.loadList(pat1) d2 = Data() d2.loadList(pat2, 4) n2 = ANN([3, 3, 4], ActSig) # n2.displaySynpWt() arch1 = [3, 4, 1] n1 = ANN(arch1, ActSig) # n1.displaySynpWt() ########################################################## ########## Training With Gradient DescentPortion ######### cost = partial(n1.bpCost, data=d1, regLambda=0.003) J, Wt = gradDesc(cost, init_x=n1.W, maxEpochs=500, lr=0.8)
from functools import partial from logReg import * # and function of 3 variables pat1 = [[[0, 0, 0], [0]], [[0, 0, 1], [1]], [[0, 1, 0], [1]], [[0, 1, 1], [1]], [[1, 0, 0], [2]], [[1, 0, 1], [3]], [[1, 1, 0], [3]], [[1, 1, 1], [3]], ] d1 = Data() d1.loadList(pat1, numClasses = 4) #print d1.y act = sigmoid().h # our activation function is simgmoid model, J = trainOneVsAllGD(d1, act,epochs = 5000, lr = 0.25) #print d1.y plt.plot(np.transpose(J)) plt.show() print predictMultiple(model, d1.X, act) # d1.addBiasRow() # theta_init = np.matrix(np.zeros((d1.n, 1))) #