Esempio n. 1
1
 def trainrelu(self,x,y,iterations,learningrate,plot=False,printy=True,printw=True):
     '''Relu Activation for Hidden layers and Sigmoid on final output.'''
     if plot:
         cconn,pconn = Pipe(duplex = False)
         pconn2,cconn2 = Pipe(duplex = False)
         cconn3,pconn3 = Pipe(duplex = False)
         Process(target=self.processplotter,args=(cconn,cconn2,cconn3,)).start()
     Wcorr=self.W*0
     lw= len(self.W)
     result=[[] for i in range(lw)]
     #Lsum=[[] for i in range(len(W))]
     if plot:
         nnplotter.plotinit()
     q=lambda z,y:mx(matmul(pad(x,((0,0),(1,0)),
       'constant',constant_values=1),self.W[z].T),0)if z==0 else (expit(matmul(pad(q(z-1,y),((0,0),(1,0)),
         'constant',constant_values=1),self.W[z].T)) if (z == y) else mx(matmul(pad(q(z-1,y),((0,0),(1,0)),
         'constant',constant_values=1),self.W[z].T),0))
     try:
         for k in range(iterations):
             for i in range(lw-1,-1,-1):
                 result[i]=pad(q(i,lw-1),((0,0),(1,0)),'constant',constant_values=1)
             for i in range(len(x)):
                 X=pad(x[i],((1,0)),'constant',constant_values=1)#input bias
                 for j in range(lw-1,-1,-1):
                     if j==lw-1:
                         Wcorr[j]=array([(result[j][i]-y[i])*(result[j][i]*(1-result[j][i]))])
                     else:
                         Wcorr[j]=(matmul(Wcorr[j+1][0][1:],self.W[j+1])*array([(result[j][i] >0)*1]))
                 for j in range(lw-1,-1,-1):
                     if j==0:
                         self.W[0]=self.W[0]-learningrate*delete(matmul(Wcorr[0].T,array([X])),0,0)
                     else:
                         self.W[j]=self.W[j]-learningrate*delete(matmul(Wcorr[j].T,array([result[j-1][i]])),0,0)
             if plot and pconn2.recv() == "Send":
                 Loss = (np.mean((self.predictrelu(x)-y)**2))/len(x)
                 pconn.send(self.W)
                 pconn3.send([k,Loss])
             if printy:
                 print(self.predictrelu(x))
             print('iteration : {}'.format(k+1))
     except KeyboardInterrupt:
         pass
     if printw:
       for i in range(lw):
           print('W[%d]=\n'%i,self.W[i],'\n')
     nnplotter.plt.close()
     return
Esempio n. 2
1
def ss_err(Y, F, S, scale = 1.):
    err = 0.

    for i in Y:
        for u in Y[i]:
            R = Y[i][u] - scale*mx(F[i], S[u])
            err += sum(R**2)

    return err
Esempio n. 3
0
 def predictrelu(self,x):
     '''returns the network output of a specific input specifically
         NOTE:
             if a single input is given to predict funcion it must be of shape
             (1,n) {n = no of input neurons}  
             Example: [1 , 1] has a shape of (2,) which is not accepted yet
             but [[1, 1]] has a shape of (1,2) which is desired if single input
             You know what you are doing :) '''
     l=len(self.W)-1
     q=lambda z,y:mx(matmul(pad(x,((0,0),(1,0)),
       'constant',constant_values=1),self.W[z].T),0)if z==0 else (expit(matmul(pad(q(z-1,y),((0,0),(1,0)),
         'constant',constant_values=1),self.W[z].T)) if (z == y) else mx(matmul(pad(q(z-1,y),((0,0),(1,0)),
         'constant',constant_values=1),self.W[z].T),0))
     return q(l,l)
Esempio n. 4
0
 def predictrelu(self, x):
     """returns the network output of a specific input specifically using relu activation"""
     l = len(self.W) - 1
     q = lambda z, y: mx(
         matmul(pad(x, (
             (0, 0), (1, 0)), 'constant', constant_values=1), self.W[z].T),
         0) if z == 0 else (expit(
             matmul(
                 pad(q(z - 1, y), ((0, 0), (1, 0)),
                     'constant',
                     constant_values=1), self.W[z].T)) if (z == y) else mx(
                         matmul(
                             pad(q(z - 1, y), ((0, 0), (1, 0)),
                                 'constant',
                                 constant_values=1), self.W[z].T), 0))
     return q(l, l)
Esempio n. 5
0
def solveS(Yt, F):
    h = F[F.keys()[0]].shape[1]
    #print('d = ' + str(d))
    
    S = {}
    
    for u in Yt:
        Su = np.zeros((h,1))
        Isize = 0

        for i in Yt[u]:
            d = F[i].shape[0]

            if h <= d:
                tv = mx(np.transpose(F[i]), F[i])
                tv = np.linalg.inv(tv)
                tv = mx(tv, np.transpose(F[i]))
                tv = mx(tv, Yt[u][i])
                Su = Su + tv
            else:
                tv = np.transpose(F[i])
                tv = mx(tv, np.linalg(mx(np.transpose(F[i]), F[i])))
                tv = mx(tv, Yt[u][i])
                Su = Su + tv
                #Su = Su + mx(mx(np.linalg.inv(mx(np.transpose(F[i]), F[i])), np.transpose(F[i])), Yt[u][i])
            Isize += 1

        Su = Su / Isize

        S[u] = Su

    return S
Esempio n. 6
0
def readData(src):
	f = file(src)
	data_list = f.read().splitlines()
	
	exp = len(data_list)
	attr = len(data_list[0].split(','))
	m = mx(np.empty([exp, attr+1]))
	for i in range(exp):
		m[i] = map(float, data_list[i].split(',')) + [BIAS]

	f.close()

	return m
Esempio n. 7
0
def solveF(Y, S):
    F = {}
    
    for i in Y:
        Sit = []
        Yit = []
        for u in Y[i]:
            Sit.append(S[u])
            Yit.append(Y[i][u])

        Sit = np.concatenate(Sit, axis=1)
        Yit = np.concatenate(Yit, axis=1)
            
        Si = Sit
        Yi = Yit

        #print(i)
        #print(Si.shape)
        #print(Yi.shape)
        Fi = mx(mx( Yi, np.transpose(Si)), np.linalg.inv(mx(Si, np.transpose(Si)))) #needs to use matmul i believe

        F[i] = Fi

    return F
Esempio n. 8
0
# Hong Liu   # Include a section with your name

import numpy as np
from numpy import matrix as mx
A = np.random.random(
    15
)  # Create matrix A with size (3,5) containing random numbers A = np.random.random(15)
A = A.reshape(3, 5)
A = mx(A)

A.size  # Find the size of matrix A
len(A)  # Find the length of matrix A

A.resize((3, 4))  # Resize (crop/slice) matrix A to size (3,4)

B = A.transpose()  # Find the transpose of matrix A and assign it to B

B[:, 0].min()  # Find the minimum value in column 1 of matrix B

A.min()  # Find the minimum values for the entire matrix A
A.max()  # Find the maximum values for the entire matrix A

X = np.random.random(4)  # Create vector X (an array) with 4 random numbers

from numpy import dot


def my_calculate(A,
                 X):  # Create a function and pass vector X and matrix A in it
    D = dot(A, A.T) * dot(
        X, X.T)  # multiply vector X with matrix A and assign the result to D
Esempio n. 9
0
 def trainrelu(self,
               x,
               y,
               iterations,
               learningrate,
               plot=False,
               printy=True,
               printw=True,
               vmode="queue",
               boost=0,
               L2=0):
     """Relu Activation for Hidden layers and Sigmoid on final output."""
     if plot:
         if vmode == "queue":
             event_q = Queue()
             send_q = Queue()
             p = Process(target=self.processplotterqueue,
                         args=(
                             event_q,
                             send_q,
                         ))
             p.start()
             send_q.get(block=True, timeout=3)  #checking startsignal
         elif vmode == "pipe":
             cconn, pconn = Pipe(duplex=False)
             pconn2, cconn2 = Pipe(duplex=False)
             cconn3, pconn3 = Pipe(duplex=False)
             Process(target=self.processplotterpipe,
                     args=(
                         cconn,
                         cconn2,
                         cconn3,
                     )).start()
         else:
             print("visualization mode unknown. Turning off plotting")
             plot = False
     Wcorr = self.W * 0
     lw = len(self.W)
     result = [[] for i in range(lw)]
     #Lsum=[[] for i in range(len(W))]
     q = lambda z, y: mx(
         matmul(pad(x, (
             (0, 0), (1, 0)), 'constant', constant_values=1), self.W[z].T),
         0) if z == 0 else (expit(
             matmul(
                 pad(q(z - 1, y), ((0, 0), (1, 0)),
                     'constant',
                     constant_values=1), self.W[z].T)) if (z == y) else mx(
                         matmul(
                             pad(q(z - 1, y), ((0, 0), (1, 0)),
                                 'constant',
                                 constant_values=1), self.W[z].T), 0))
     try:
         for k in range(iterations):
             for i in range(lw - 1, -1, -1):
                 result[i] = pad(q(i, lw - 1), ((0, 0), (1, 0)),
                                 'constant',
                                 constant_values=1)
             for i in range(len(x)):
                 X = pad(x[i], ((1, 0)), 'constant',
                         constant_values=1)  #input bias
                 for j in range(lw - 1, -1, -1):
                     if j == lw - 1:
                         Wcorr[j] = array([
                             (result[j][i] - y[i]) * (result[j][i] *
                                                      (1 - result[j][i]))
                         ])
                     else:
                         Wcorr[j] = (
                             matmul(Wcorr[j + 1][0][1:], self.W[j + 1]) *
                             array([(result[j][i] > 0) * 1]))
                 for j in range(lw - 1, -1, -1):
                     if j == 0:
                         self.W[0] = self.W[0] - learningrate * delete(
                             matmul(Wcorr[0].T, array([X])), 0, 0)
                     else:
                         self.W[j] = self.W[j] - learningrate * delete(
                             matmul(Wcorr[j].T, array([result[j - 1][i]])),
                             0, 0)
             Loss = (mean((self.predictrelu(x) - y)**2))
             if plot:
                 if vmode == "queue":
                     try:
                         if send_q.get_nowait(
                         ) == "Send" and k != iterations - 1:
                             event_q.put([self.W, k, Loss])
                     except Exception:
                         pass
                 else:
                     if pconn2.recv() == "Send":
                         pconn.send(self.W)
                         pconn3.send([k, Loss])
             if printy:
                 print(
                     str(self.predictrelu(x)) + '\n iteration :' +
                     str(k + 1))
             else:
                 print('iteration : {}'.format(k + 1))
     except KeyboardInterrupt:
         pass
     if printw:
         for i in range(lw):
             print('W[%d]=\n' % i, self.W[i], '\n')
     if plot and vmode == "queue":
         event_q.put("close")
         nnplotter.plt.close()
         p.join()
     return 0
Esempio n. 10
0
# constants
ALPHA = 1 	# learning rate
IN_NO = 46	# num of input nodes
LAYER = 2	# num of hidden layers
NODES = 30	# num of nodes in each hidden layers
OUT_NO = 2	# num of output nodes

BOUND = 0.5 	# the boundary of initial weights

MAX_ITER = int(sys.argv[4])
MODE = int(sys.argv[5])
BIAS = -1

# weights stored in list
getArr = lambda x: [random.uniform(-BOUND, BOUND) for _ in range(x)]
W = [mx(getArr((IN_NO+1)*NODES)).reshape(IN_NO+1, NODES)]
W += [mx(getArr((NODES+1)*NODES)).reshape(NODES+1, NODES) for _ in range(LAYER-1)]
W += [mx(getArr((NODES+1)*OUT_NO)).reshape(NODES+1, OUT_NO)]
DELTA = copy.deepcopy(W)

# cache
gradient = mx(np.empty([LAYER, NODES]))
gradientOut = mx(np.empty([1, OUT_NO]))
a = mx(np.empty([LAYER, NODES+1])) 	# activation
for i in range(len(a)):		# bias units
	a[i, NODES] = BIAS

# functions
def readData(src):
	f = file(src)
	data_list = f.read().splitlines()
# ax = Axes3D(plt.gcf())
# ax.plot_surface(alpha, beta, Area[:,:,0])

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(alpha, beta, Area[:,0,:], rstride=1, cstride=1)
cset = ax.contour(alpha, beta, Area[:,0,:], zdir='x', offset=0)
cset = ax.contour(alpha, beta, Area[:,0,:], zdir='y', offset=90)
cset = ax.contour(alpha, beta, Area[:,0,:], zdir='z', offset=0)
ax.set_xlabel('alpha $[\degree]$')
ax.set_xlim(0, 90)
ax.set_ylabel('gamma $[\degree]$')
ax.set_ylim(0, 90)
ax.set_zlabel('area $[m^2]$')
ax.set_zlim(0, 60)
plt.show()

plt.plot(gamma, Area[0,:,0])
plt.plot(gamma, Area[:,-1,0])
plt.plot(gamma, Area[0,0,:])
plt.plot([rad2deg(arctan(6/7)),rad2deg(arctan(6/7))],[24,38])
plt.show()

mxA = mx(Area)
A = where(Area==mxA)
# print(Area[7,5,5])

# alpha = 90
# beta  = 0
# gamma = rad2deg(arctan(4/7))
# print(A(alpha,beta,gamma))
Esempio n. 12
0
	def write_my_file(A):
		file = open('my_array', 'w')
		file.writelines('%s' %str(A))
		file.close()
		
	def read_my_file():
		file = open('my_array', 'r')
		B = file.read()
		file.close()
		return B

B = random.randint(4,45,6)
B.tofile('my_array', sep=',', format='%s')
B = _file_operations.read_my_file()
B = ar(B.split(','), dtype=int16)
C = mx([[3],[2],[5]])
D = C*B
_file_operations.write_my_file(D)
print(_file_operations.read_my_file())

"""
HW assignment 2 Official solution below
1. Include a sec4on with your name
2. Create matrix A with size (3,5) containing random numbers
3. Find the size and length of matrix A
4. Resize (crop) matrix A to size (3,4)
5. Find the transpose of matrix A and assign it to B
6. Find the minimum value in column 1 of matrix B
7. Find the minimum and maximum values for the en4re matrix A
8. Create Vector X (an array) with 4 random numbers
9. Create a func4on and pass Vector X and matrix A in it
Esempio n. 13
0
    def kalman_filter(self, pos):
        # Sort data
        F = np.array([[1., self.deltaT, 0.5 * self.deltaT**2],
                      [0., 1., self.deltaT], [0., 0., 1.]])
        H = np.array([[1., 0., 0.]])

        z = np.array([[pos]])

        #Prediction
        self.x_k1_k = mx(F, self.x_k_k)
        self.P_k1_k = mx(F, mx(self.P_k_k, np.transpose(F))) + self.Q

        #Update
        self.x_k_k = self.x_k1_k + mx(self.K, (z - mx(H, self.x_k1_k)))
        #self.P_k_k = mx(mx((self.I - mx(self.K, H)), self.P_k1_k), np.transpose(self.I - mx(self.K, H))) + mx(self.K, mx(self.R, np.transpose(self.K)))
        self.P_k_k = mx(self.I - mx(self.K, H), self.P_k1_k)

        self.K = mx(mx(self.P_k1_k, np.transpose(H)),
                    inv(mx(H, mx(self.P_k1_k, np.transpose(H))) + self.R))
        self.q = self.x_k_k[0][0]
        self.qd = self.x_k_k[1][0]
        self.qdd = self.x_k_k[2][0]
Esempio n. 14
0
 def trainrelu(self,
               x,
               y,
               iterations,
               learningrate,
               plot=False,
               printy=True,
               printw=True):
     '''Relu Activation for Hidden layers and Sigmoid on final output.'''
     if plot:
         event_q = Queue()
         send_q = Queue()
         p = Process(target=self.processplotter, args=(
             event_q,
             send_q,
         ))
         p.start()
         send_q.get(block=True, timeout=3)  #checking startsignal
     Wcorr = self.W * 0
     lw = len(self.W)
     result = [[] for i in range(lw)]
     #Lsum=[[] for i in range(len(W))]
     if plot:
         nnplotter.plotinit()
     q = lambda z, y: mx(
         matmul(pad(x, (
             (0, 0), (1, 0)), 'constant', constant_values=1), self.W[z].T),
         0) if z == 0 else (expit(
             matmul(
                 pad(q(z - 1, y), ((0, 0), (1, 0)),
                     'constant',
                     constant_values=1), self.W[z].T)) if (z == y) else mx(
                         matmul(
                             pad(q(z - 1, y), ((0, 0), (1, 0)),
                                 'constant',
                                 constant_values=1), self.W[z].T), 0))
     try:
         for k in range(iterations):
             for i in range(lw - 1, -1, -1):
                 result[i] = pad(q(i, lw - 1), ((0, 0), (1, 0)),
                                 'constant',
                                 constant_values=1)
             for i in range(len(x)):
                 X = pad(x[i], ((1, 0)), 'constant',
                         constant_values=1)  #input bias
                 for j in range(lw - 1, -1, -1):
                     if j == lw - 1:
                         Wcorr[j] = array([
                             (result[j][i] - y[i]) * (result[j][i] *
                                                      (1 - result[j][i]))
                         ])
                     else:
                         Wcorr[j] = (
                             matmul(Wcorr[j + 1][0][1:], self.W[j + 1]) *
                             array([(result[j][i] > 0) * 1]))
                 for j in range(lw - 1, -1, -1):
                     if j == 0:
                         self.W[0] = self.W[0] - learningrate * delete(
                             matmul(Wcorr[0].T, array([X])), 0, 0)
                     else:
                         self.W[j] = self.W[j] - learningrate * delete(
                             matmul(Wcorr[j].T, array([result[j - 1][i]])),
                             0, 0)
             Loss = (np.mean((self.predictrelu(x) - y)**2)) / len(x)
             if plot:
                 try:
                     if send_q.get_nowait(
                     ) == "Send" and k != iterations - 1:
                         event_q.put([self.W, k, Loss])
                 except Exception:
                     pass
             if printy:
                 print(self.predictrelu(x))
             print('iteration : {}'.format(k + 1))
     except KeyboardInterrupt:
         pass
     if printw:
         for i in range(lw):
             print('W[%d]=\n' % i, self.W[i], '\n')
     event_q.put("close")
     p.join()
     return