def processplotterpipe(self, cconn, cconn2, cconn3): nnplotter.plotinit() cconn2.send("Send") while True: nnplotter.ax.clear() try: tmp = cconn.recv() cconn2.send("") k = cconn3.recv() except Exception: cconn.close() cconn2.close() cconn3.close() nnplotter.plt.close() break for i in range(len(tmp)): nnplotter.plotweights(tmp[i], i) nnplotter.ax.text(0, -0.25, s="iteration {: <5} Loss = {: <10}".format( str(k[0]), str(k[1]))) nnplotter.plt.pause(0.00000001) try: cconn2.send("Send") except Exception: cconn.close() cconn2.close() cconn3.close() nnplotter.plt.close() break
def weights(self, plot=False): '''prints out the weight matrixes w1 and w2''' if plot: nnplotter.plotinit() for i in range(len(self.W)): nnplotter.plotweights(self.W[i], i) nnplotter.plt.show() for i in range(len(self.W)): print('W[%d]=\n' % i, self.W[i], '\n') return
def weights(plot=False): '''prints out the weight matrixes w1 and w2''' global w1, w2 if plot: nnplotter.plotinit() nnplotter.plotweights(w1, 0) nnplotter.plotweights(w2, 1) nnplotter.plt.show() print('w1:\n', w1, '\nw2:\n', w2) return
def weights(): ''' prints out the weight matrices w1 and w2 ''' global w1, w2 print('w1:\n', w1, '\nw2:\n', w2) nnplotter.plotinit() nnplotter.plotweights(w1, 0) nnplotter.plotweights(w2, 1) nnplotter.plt.pause(0.5) return
def train(x, y, iterations, learningrate=0.1, printy=True, printw=True, plot=False, plot_delay=0.00000001): '''over the iterations, this function optimizes the values of w1 and w2 to reduce output error. if printy is set True (default) it prints the output on each iterations, if printw is set True (default) it prints the weight matrices on the end of iterations NOTE: learning rate is set default to 0.1 which sometimes is morethan required (result: gradient descent will not converge) or less than required (result: slow training). feel free to experiment with different values as this module is for basic understanding :) ''' global w1, w2 if plot: nnplotter.plotinit() for j in range(iterations): for i in range(len(x)): Hsum = np.matmul(x[i], w1.T) result1 = expit(Hsum) result1 = np.append([1], result1) sum = np.matmul(result1, w2.T) result = expit(sum) e0 = result - y[i] dtotal_dsum = e0 * result * (1 - result) w2corr = np.matmul(dtotal_dsum.reshape(-1, 1), [result1]) w1corr = np.matmul( np.array([ np.matmul(dtotal_dsum, w2) * (result1 * (1 - result1)) ]).T, [x[i]]) w2 = w2 - learningrate * w2corr w1 = w1 - learningrate * np.delete(w1corr, 0, 0) if plot: nnplotter.ax.clear() nnplotter.plotweights(w1, 0) nnplotter.plotweights(w2, 1) nnplotter.plt.pause(plot_delay) if printy: print( 'y= ', expit( np.matmul( np.pad(expit(np.matmul(x, w1.T)), ((0, 0), (1, 0)), 'constant', constant_values=(1)), w2.T))) if printw: print('w1:\n', w1, '\nw2:\n', w2) return
def weights(plot=False): '''prints out the weight matrixes W[0], W[1] and W[2]''' global W if plot: nnplotter.plotinit() for i in range(len(W)): nnplotter.plotweights(W[i], i) nnplotter.plt.show() for i in range(len(W)): print('W[%d]=\n' % i, W[i], '\n') return
def train(self,x,y,iterations,learningrate=0.1,plot=False,Plotfreq=1,Plotmaxm=0,printy=True,printw=True,plot_delay=0.00000001): '''over the iterations, this function optimizes the values of all weights to reduce output error. if printy is set True (default) it prints the output on each iterations, if printw is set True (default) it prints the weight matrices on the end of iterations NOTE: learning rate is set default to 0.1 which sometimes is morethan required (result: gradient descent will not converge) or otherwise less than required (result: slow training). So feel free to experiment with different values as this module is for basic understanding and experiments :) ''' Wcorr=self.W*0 lw= len(self.W) result=[[] for i in range(lw)] #Lsum=[[] for i in range(len(W))] if plot: nnplotter.plotinit() p=lambda z:expit(matmul(pad(x,((0,0),(1,0)), 'constant',constant_values=1),self.W[z].T))if z==0 else expit(matmul( pad(p(z-1),((0,0),(1,0)),'constant',constant_values=1),self.W[z].T)) for k in range(iterations): for i in range(lw-1,-1,-1): result[i]=pad(p(i),((0,0),(1,0)),'constant',constant_values=1) for i in range(len(x)): X=pad(x[i],((1,0)),'constant',constant_values=1) for j in range(lw-1,-1,-1): if j==lw-1: Wcorr[j]=array([(result[j][i]-y[i])*(result[j][i]*(1-result[j][i]))]) else: Wcorr[j]=(matmul(Wcorr[j+1][0][1:],self.W[j+1])*array([(result[j][i]*(1-result[j][i]))])) for j in range(lw-1,-1,-1): if j==0: self.W[0]=self.W[0]-learningrate*delete(matmul(Wcorr[0].T,array([X])),0,0) else: self.W[j]=self.W[j]-learningrate*delete(matmul(Wcorr[j].T,array([result[j-1][i]])),0,0) if plot: if k==0 and Plotmaxm: figManager = nnplotter.plt.get_current_fig_manager() figManager.window.showMaximized() if k%Plotfreq == 0: nnplotter.ax.clear() for i in range(lw): nnplotter.plotweights(self.W[i],i) nnplotter.ax.text(0,0,s='iteration {}'.format(k)) nnplotter.plt.pause(plot_delay) if printy: print(self.predict(x)) print('iteration : {}'.format(k+1)) if printw: for i in range(lw): print('W[%d]=\n'%i,self.W[i],'\n') return
def processplotterqueue(self, event_q, send_q): nnplotter.plotinit() send_q.put("Startsignal") send_q.put("Send") while True: nnplotter.ax.clear() tmp = event_q.get(block=True) if type(tmp) == str and tmp == "close": event_q.close() send_q.close() nnplotter.plt.close() break else: for i in range(len(tmp[0])): nnplotter.plotweights(tmp[0][i], i) nnplotter.ax.text(0, -0.25, s="iteration {: <5} Loss = {: <10}".format( tmp[1], tmp[2])) nnplotter.plt.pause(0.00000001) send_q.put("Send")
def train(x, y, iterations, learningrate=0.1, plot=False, printy=True, printw=True, plot_delay=0.00000001): '''over the iterations, this function optimizes the values of W[0],W[1] and W[2] to reduce output error. if printy is set True (default) it prints the output on each iterations, if printw is set True (default) it prints the weight matrices on the end of iterations NOTE: learning rate is set default to 0.1 which sometimes is morethan required (result: gradient descent will not converge) or less than required (result: slow training). feel free to experiment with different values as this module is for basic understanding :) ''' global W Wcorr = W * 0 result = [[] for i in range(len(W))] #Lsum=[[] for i in range(len(W))] if plot: nnplotter.plotinit() p = lambda z: expit( np.matmul(np.pad(x, ((0, 0), (1, 0)), 'constant', constant_values=1), W[z].T) ) if z == 0 else expit( np.matmul( np.pad(p(z - 1), ((0, 0), (1, 0)), 'constant', constant_values=1), W[z].T)) for j in range(iterations): for i in range(len(W) - 1, -1, -1): result[i] = p(i) if i != len(W) - 1: result[i] = np.pad(result[i], ((0, 0), (1, 0)), 'constant', constant_values=1) for i in range(len(x)): for j in range(len(W) - 1, -1, -1): X = np.pad(x[i], ((1, 0)), 'constant', constant_values=1) Wcorr[2] = np.matmul( np.array([(result[2][i] - y[i]) * (result[2][i] * (1 - result[2][i]))]).T, np.array([result[1][i]])) Wcorr[1] = np.matmul((np.matmul( np.array([(result[2][i] - y[i]) * (result[2][i] * (1 - result[2][i]))]), W[2]) * np.array([(result[1][i] * (1 - result[1][i]))])).T, np.array([result[0][i]])) Wcorr[0] = np.matmul( (np.matmul( (np.matmul( np.array([(result[2][i] - y[i]) * (result[2][i] * (1 - result[2][i]))]), W[2]) * np.array([(result[1][i] * (1 - result[1][i]))]))[0][1:], W[1]) * np.array([(result[0][i] * (1 - result[0][i]))])).T, [X]) W[2] = W[2] - learningrate * Wcorr[2] W[1] = W[1] - (learningrate / 2) * np.delete(Wcorr[1], 0, 0) W[0] = W[0] - (learningrate / 4) * np.delete(Wcorr[0], 0, 0) if plot: nnplotter.ax.clear() for i in range(len(W)): nnplotter.plotweights(W[i], i) nnplotter.plt.pause(plot_delay) if printy: print(predict(x)) if printw: for i in range(len(W)): print('W[%d]=\n' % i, W[i], '\n') return