コード例 #1
0
    def feedforward(self, s_inst, s_trans):

        y_r = act.sigmoid(s_inst, self.V_r)
        if self.H_r != 0:
            y_h_r = act.sigmoid(y_r, self.W_r)
        else:
            y_h_r = None

        y_m, self.cumulative_memory = act.sigmoid_acc_leaky(
            s_trans, self.V_m, self.cumulative_memory, self.memory_leak)
        if self.H_m != 0:
            y_h_m = act.sigmoid(y_m, self.W_m)
        else:
            y_h_m = None

        if self.H_r != 0 and self.H_m != 0:
            y_tot = np.concatenate((y_h_r, y_h_m), axis=1)
            W_tot = np.concatenate((self.W_h_r, self.W_h_m), axis=0)
        elif self.H_r == 0 and self.H_m != 0:
            y_tot = np.concatenate((y_r, y_h_m), axis=1)
            W_tot = np.concatenate((self.W_r, self.W_h_m), axis=0)
        elif self.H_r != 0 and self.H_m == 0:
            y_tot = np.concatenate((y_h_r, y_m), axis=1)
            W_tot = np.concatenate((self.W_h_r, self.W_m), axis=0)
        else:
            y_tot = np.concatenate((y_r, y_m), axis=1)
            W_tot = np.concatenate((self.W_r, self.W_m), axis=0)
        #print(y_tot)
        Q = act.linear(y_tot, W_tot)

        return y_r, y_m, Q, y_h_r, y_h_m
コード例 #2
0
    def feedforward(self, s_inst, s_trans):

        y_r = act.sigmoid(s_inst, self.V_r)

        g = act.softmax(s_inst, self.W_g, self.g_strength, self.level_bias)
        g = np.transpose(g)
        l_sel = self.select_level(g)

        y_m = np.zeros((self.L, 1, self.M))
        for l in np.arange(self.L):
            if l == l_sel:
                y_m[l, :, :], self.cumulative_memory[
                    l, :, :] = act.sigmoid_acc_leaky(
                        s_trans, self.V_m[l, :, :],
                        self.cumulative_memory[l, :, :], self.LEAK[l, 0,
                                                                   0], g[l, 0])
            else:
                self.cumulative_memory[l, :, :] *= self.LEAK[l, 0, 0]
                y_m[l, :, :] = act.sigmoidal(self.cumulative_memory[l, :, :])
            print('\t\t\t\t MEMORY_LEVEL ', l, '\t ', y_m[l, :, :])

        inp_h = np.zeros((1, self.H))
        for l in np.arange(self.L):
            inp_h = act.linear(y_m[l, :, :], self.W_m[l, :, :])
        y_h = act.sigmoidal(inp_h)

        Q = act.linear(y_r, self.W_r) + act.linear(y_h, self.W_h)

        return y_r, y_m, y_h, g, l_sel, Q
コード例 #3
0
	def feedforward(self,s_inst,s_trans):

		y_r = act.sigmoid(s_inst, self.V_r)
		y_m,self.cumulative_memory = act.sigmoid_acc_leaky(s_trans, self.V_m, self.cumulative_memory,self.memory_leak)

		y_tot = np.concatenate((y_r, y_m),axis=1)
		W_tot = np.concatenate((self.W_r, self.W_m),axis=0)
		Q = act.linear(y_tot, W_tot)

		return y_r, y_m, Q
コード例 #4
0
    def feedforward(self, s_inst, s_trans):

        y_r = act.sigmoid(s_inst, self.V_r)
        g = act.softmax(s_inst, self.W_g, self.g_strength)
        g = np.transpose(g)

        y_m = 1e-6 * np.ones((self.L, 1, self.M))
        Q = act.linear(y_r, self.W_r)
        for l in np.arange(self.L):
            y_m[l, :, :], self.memory_content[l, :, :] = act.sigmoid_acc_leaky(
                s_trans, self.V_m[l, :, :], self.memory_content[l, :, :],
                1 - g[l, 0], g[l, 0])
            Q += act.linear(y_m[l, :, :], self.W_m[l, :, :])
            #print('\t MEM STATE ',str(l),':', y_m[l,:,:],'\t alpha=',self.ALPHA[l,0,0],'\t gate=',g[l,:],'\t forget=',f[l,:])

        return y_r, y_m, g, Q
コード例 #5
0
    def feedforward(self, s_inst, s_trans):

        g = act.hard_sigmoid(s_inst, self.W_g, self.g_strength)
        g = np.transpose(g)
        #g = np.ones((self.L,1))

        y_m = 1e-6 * np.ones((self.L, 1, self.M))
        inp_r = 1e-6 * np.ones((self.L, 1, self.R))
        for l in np.arange(self.L):
            y_m[l, :, :], self.memory_content[l, :, :] = act.sigmoid_acc_leaky(
                s_trans, self.V_m[l, :, :], self.memory_leak, g[l, 0])
            inp_r[l, :, :] = act.linear(y_m[l, :, :], self.W_m[l, :, :])
            #print('\t MEM STATE ',str(l),':', y_m[l,:,:],'\t alpha=',self.ALPHA[l,0,0],'\t gate=',g[l,:])

        inp_r = np.sum(inp_r, axis=0)
        inp_r += act.linear(s_inst, self.V_r)
        y_r = act.sigmoidal(inp_r)
        #print(y_r)

        Q = act.linear(y_r, self.W_r)

        return y_r, y_m, g, Q
コード例 #6
0
	def feedforward(self,s_inst,s_trans):

		y_r = act.sigmoid(s_inst, self.V_r)
		if self.H_r!=0:		
			y_h_r = act.sigmoid(y_r,self.W_r)
			if self.perc_active!=1:
				max_ind = np.argsort(-np.abs(y_h_r))[0,:self.num_active_reg]
				y_h_r_filtered = np.zeros(np.shape(y_h_r))
				y_h_r_filtered[0,max_ind] = y_h_r[0,max_ind]
			else:
				y_h_r_filtered = y_h_r
		else:		
			y_h_r_filtered = None

		y_m,self.cumulative_memory = act.sigmoid_acc_leaky(s_trans, self.V_m, self.cumulative_memory,self.memory_leak)
		if self.H_m!=0:		
			y_h_m = act.sigmoid(y_m,self.W_m)
			if self.perc_active!=1:
				max_ind = np.argsort(-np.abs(y_h_m))[0,:self.num_active_mem]
				y_h_m_filtered = np.zeros(np.shape(y_h_m))
				y_h_m_filtered[0,max_ind] = y_h_m[0,max_ind]
			else:
				y_h_m_filtered = y_h_m
		else:		
			y_h_m_filtered = None

		if self.H_r!=0 and self.H_m!=0:
			y_tot = np.concatenate((y_h_r, y_h_m),axis=1)
			W_tot = np.concatenate((self.W_h_r, self.W_h_m),axis=0)
		elif self.H_r==0 and self.H_m!=0:
			y_tot = np.concatenate((y_r, y_h_m),axis=1)
			W_tot = np.concatenate((self.W_r, self.W_h_m),axis=0)
		if self.H_r!=0 and self.H_m==0:
			y_tot = np.concatenate((y_h_r, y_m),axis=1)
			W_tot = np.concatenate((self.W_h_r, self.W_m),axis=0)
		Q = act.linear(y_tot, W_tot)


		return y_r, y_m, Q, y_h_r_filtered, y_h_m_filtered 
コード例 #7
0
    def feedforward(self, s_inst, s_trans):

        g_strength = 4

        y_r = act.sigmoid(s_inst, self.V_r)
        g = act.hard_sigmoid(s_inst, self.W_g, g_strength)
        g = np.transpose(g)

        y_m = 1e-6 * np.ones((self.L, 1, self.M))
        Q = act.linear(y_r, self.W_r)
        for l in np.arange(self.L):
            y_m[l, :, :], self.cumulative_memory[
                l, :, :] = act.sigmoid_acc_leaky(
                    s_trans, self.V_m[l, :, :],
                    self.cumulative_memory[l, :, :], self.LEAK[l, 0, 0], g[l,
                                                                           0])
            Q += act.linear(y_m[l, :, :], self.W_m[l, :, :])
            print('\t MEM STATE ', str(l), ':', y_m[l, :, :], '\t gate=',
                  g[l, :], '\t alpha=', self.ALPHA[l, 0, 0], '\t leak=',
                  self.LEAK[l, 0, 0])

        return y_r, y_m, g, Q
コード例 #8
0
    def feedforward(self, s_inst, s_trans):

        y_r = act.sigmoid(s_inst, self.V_r)

        g = act.softmax(s_inst, self.W_g, self.g_strength)
        g = np.transpose(g)
        #g = np.ones((self.L,1))

        y_m = np.zeros((self.L, 1, self.M))
        inp_h = np.zeros((1, self.H))
        for l in np.arange(self.L):
            y_m[l, :, :], self.cumulative_memory[
                l, :, :] = act.sigmoid_acc_leaky(
                    s_trans, self.V_m[l, :, :],
                    self.cumulative_memory[l, :, :], self.LEAK[l, 0, 0], g[l,
                                                                           0])
            inp_h += act.linear(y_m[l, :, :], self.W_m[l, :, :])
            #print('\t MEM STATE ',str(l),':', y_m[l,:,:],'\t alpha=',self.ALPHA[l,0,0],'\t gate=',g[l,:],'\t forget=',f[l,:])
        y_h = act.sigmoidal(inp_h)

        Q = act.linear(y_r, self.W_r) + act.linear(y_h, self.W_h)

        return y_r, y_m, y_h, g, Q
コード例 #9
0
	def feedforward(self,s_inst,s_trans):

		g_strength = 3
		f_strength = 3

		y_r = act.sigmoid(s_inst, self.V_r)
		g = act.softmax(s_inst,self.W_g, g_strength)
		g = np.transpose(g)
		f = act.hard_sigmoid(s_inst,self.W_f, f_strength)
		f = np.transpose(f)
dwz
		y_m = 1e-6*np.ones((self.L,1,self.M))
		Q = act.linear(y_r, self.W_r)
		for l in np.arange(self.L):
			y_m[l,:,:], self.memory_content[l,:,:] = act.sigmoid_acc_leaky(s_trans, self.V_m[l,:,:], self.memory_content[l,:,:],f[l,0],g[l,0])
			Q += act.linear(y_m[l,:,:], self.W_m[l,:,:])
			#print('\t MEM STATE ',str(l),':', y_m[l,:,:],'\t alpha=',self.ALPHA[l,0,0],'\t gate=',g[l,:],'\t forget=',f[l,:])

		return y_r, y_m, g, f, Q

######### TRAINING + TEST FOR CPT TASKS LIKE 12AX

	def training(self,N_ep,p_c,conv_criterion='strong',stop=True,verbose=False):

		self.initialize_weights_and_tags()

		zero = np.zeros((1,self.S))
		E = np.zeros((N))

		correct = 0
コード例 #10
0
	def update_memory(self, s_trans):

		y_m,self.cumulative_memory = act.sigmoid_acc_leaky(s_trans, self.memory_weights, self.cumulative_memory, self.memory_leak)

		return y_m