Exemplo n.º 1
0
	def addConnections(self, connections):
		global delta, Wmin, Wmax, awe
		self.connections = self.connections + connections
		i=0
		for i, c in enumerate(connections):
			j = self.i + i
			# Weights
			self.weights.append(
				theano.shared( 
					sp.csc_matrix(
					np.asarray( 
					c.generateConnectionMatrix(self.o_shape, generate), 
					dtype=self.input.dtype) ), name ='Wi_' + str(j)))
			self.Wmax.append(
				theano.shared(
					sp.csc_matrix(
					np.asarray( 
					np.ones((sizeFromShape(c.i_shape),sizeFromShape(self.o_shape)))*Wmax, 
					dtype=self.input.dtype) ), name ='WM_' + str(i)))
			self.Wmin.append(
				theano.shared(
					sp.csc_matrix(
					np.asarray( 
					np.ones((sizeFromShape(c.i_shape),sizeFromShape(self.o_shape)))*Wmin, 
					dtype=self.input.dtype) ), name ='WM_' + str(i)))
			# yw
			# out: nx1
			# Wi: mxn
			# outT x WiT : 1xm
			self.yw.append(
				sparse.structured_dot(
					sparse.transpose(self.output),
					sparse.transpose(self.weights[j])))
			# x_yw
			# in: nx1
			self.x_yw.append(
				sparse.sub(
					sparse.transpose(c.input),
					self.yw[j]))
			
			print len(self.weights)
			print self.weights[i].type
			print self.weights[i].type.ndim
			print 
			if self.weights:
				auxX=sparse.sub(self.Wmax[j], self.weights[i])
				auxY=sparse.sub(self.weights[i], self.Wmin[j])
				self.LR.append(delta*(
					sparse.sub(
						sparse.structured_pow(
							sparse.sub(self.Wmax[j], self.weights[i]),
							1), 
						sparse.structured_pow(
							sparse.sub(self.Wmin[j], self.weights[i]),
							1))))
				self.xy.append(
					self.LR[i]*sparse.structured_dot(
						c.input,
						sparse.transpose(self.output)))
				self.AWW.append(
					awe*delta*sparse.structured_pow(
								sparse.sub(self.Wmax[j], self.weights[i]),
								1)*self.weights[i])
		self.i +=i
		self.params[2] = self.weights
Exemplo n.º 2
0
    def propagate_thy_beliefs(self):
        '''
			Call this function to receive a string containing the path of the belief propagation algorithm.
			We implement the algorithm listed in the paper mentioned in the comments above
			

			Pseudocode:
				-> Create an empty theano vector whose definitions will be iteratively changed.
				-> Call compile_message_node_to_factor from the o node of the head predicate. 
				-> Let the functions recursively call each other
				-> Collect their things somehow. @TODO: how. what format. Shall we use theano variables altogether or what
				-> Return said stuff.
		'''

        # print "graph:bp: Starting belief propagation."
        equation = self._comiple_message_node_(self.head_predicate.o,
                                               "Fictional Label")
        symbols = self._comiple_message_symbols_node_(self.head_predicate.o,
                                                      "Fictional Label")

        #Define an empty dvector to be used as the 'y' label (which will later contain n hot information about desired entities)
        y = sparse.csr_dmatrix('y')

        # Do a softmax over the final BP Equation
        equation = sparse.structured_exp(equation)
        equation = sparse.row_scale(equation,
                                    1.0 / sparse.sp_sum(equation, axis=1))

        # Collect all the parameters (shared vars), found in the factors of this graph.
        #parameters is a list of matrices (relation)
        parameters = [x.M for x in symbols]

        #Cross entropy loss
        # loss = - y * T.log(equation) + (y - 1)*T.log(1-equation) # unregularized cross-entropy loss in theano
        a = sparse.mul(y, sparse.structured_log(equation))
        b = sparse.mul(
            sparse.structured_add(y, -1.0),
            sparse.structured_log(sparse.structured_add(equation, -1.0)))
        loss = sparse.sub(b, a)

        # Unregularied Loss
        loss_dense = sparse.dense_from_sparse(loss)
        cost = loss_dense.mean()
        # cost = sparse.sp_sum(loss, axis = 1)/float(ne)

        gradients = theano.grad(cost, parameters)

        updated_matrices = [
            sparse.sub(parameters[i], 0.1 * gradients[i])
            for i in range(len(parameters))
        ]
        # updated_matrices = [sparse.sub(parameters[i], sparse.row_scale(gradients[i], 0.1)) for i in range(len(parameters))]
        # updated_matrices = [parameters[i] - 0.1 * gradients[i] for i in range(len(parameters))]

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        #  DEBUG
        # print "Equation: ", equation
        # print "Type of equation: ",type(equation)
        # print "Symbols: ", symbols
        # print "graph:bp: Belief propagation complete."

        # print "Parameters are"
        # for p in parameters:
        # 	print p," and the type is :",type(p)

        # print gradients
        # print "Updated Matrices are :", type(updated_matrices[0])

        # print colored(type(self.head_predicate.i.u),'red')

        # print "Inputs: \n"
        # print type(self.head_predicate.i.u)
        # print type(y)
        # print [ type(x) for x in parameters ]

        # raw_input("Verify Symbols and Gradients ")
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        function = theano.function(
            inputs=[self.head_predicate.i.u, y] +
            parameters,  #Inputs to this is the head predicates' symbolic var, and another dvector
            # inputs = [self.head_predicate.i.u,parameters[0]],		#Inputs to this is the head predicates' symbolic var, and another dvector
            # outputs = updated_matrices			#Output to this thing is the BP algorithm's output expression
            outputs=[equation] + updated_matrices
            # mode=theano.compile.MonitorMode(
            #               pre_func=self.inspect_inputs,
            #               post_func=self.inspect_outputs)			#Output to this thing is the BP algorithm's output expression
            # updates=tuple([(parameters[i], parameters[i] - 0.1 * gradients[i]) for i in range(len(parameters))])		#Updates are the gradients of cost wrt parameters
        )

        return function, symbols
Exemplo n.º 3
0
	def __init__(self, input, filter_shape, sigma,i_shape,o_shape, Wi = False, Wr = False):
		global generate
		# Mean neuron density ~80k/mm^3 in V2 (skoglund 1996)
		# Synapse length follow a power law ()
		# Synapse length for feedback interareal ~10-40mm, feedforward same, but less connections
		# Synapse lengths is found by Sholl analysis. 
		# Ahould compare RF data with Van den Bergh 2010

		# Initialize weights as a shared variable
		#n_col=input.shape[1]

		try: 
			if generate:
				np.load('asd')
			else:
				Wi=np.load(i_file)
				print '[info] Weights loaded from file!'
				print 'Shape = ' + str(Wi.shape)
		except IOError:
			print "[info] Weights file wasn't found. Generating new connections"
			kern1 = gkern2(filter_shape,sigma)
			Wi = kernel2connection(kern1, i_shape, o_shape)
			#Wi /= np.sum(Wi,1).reshape((Wi.shape[0],1))*15
			print 'Shape = ' + str(Wi.shape)
			np.save(i_file,Wi)

		try: 
			if generate:
				np.load('asd')
			else:
				Wr=np.load(r_file)
				print 'Weights loaded from file!'
		except IOError:
			print "Weights file wasn't found. Generating new connections"
			kern2 = gkern2(filter_shape,sigma)
			Wr = kernel2connection(kern2, o_shape,o_shape)
			#Wr /= np.sum(Wi,1)
			np.save(r_file,Wr)

		if np.sum(Wi,1)[0] != 1:
			Wi /= np.sum(Wi,1).reshape((Wi.shape[0],1))*5
		if np.sum(Wr,1)[0] != 1:
			Wr /= np.sum(Wr,1).reshape((Wr.shape[0],1))
		print np.sum(Wi,0)
		print np.sum(Wi,1)
		plt.plot(Wi[1,:])
		plt.show()


		self.Wi= theano.shared( 
				sp.csc_matrix(
				np.asarray( 
				Wi, 
				dtype=input.dtype) ), name ='Wi')
		self.Wr = theano.shared( 
				sp.csc_matrix(
				np.asarray( 
				Wr, 
				dtype=input.dtype) ), name ='Wr')
		# Output of the layer is the sigmoid of the convolved network
		self.state = theano.shared( 
			sp.csc_matrix(
			np.asarray( 
			np.zeros((o_shape[0]*o_shape[1],1)), 
			dtype=input.dtype) ), name ='St')

		self.input = input

		# I could do the same with biases if needed
		#print self.input.get_value().shape
		#print self.Wi.get_value().shape
		self.output = theano.shared( 
			sp.csc_matrix(
			np.asarray( 
			np.zeros((o_shape[0]*o_shape[1],1)), 
			dtype=input.dtype) ), name ='Out')
		#sparse.structured_sigmoid(sparse.structured_dot(self.input, self.Wi))  #T.dot(self.input, self.Wi))
		# input = external + recursive (from layer)
		# self.input = T.dot(input, self.Wi) #+ T.sum(T.dot(self.state,self.Wr),1)

		# out: nx1
		# Wi: mxn
		# outT x WiT : 1xm
		self.yw = sparse.structured_dot(
						sparse.transpose(self.output),
						sparse.transpose(self.Wi))
		# in: nx1
		self.x_yw = sparse.sub(
						sparse.transpose(self.input),
						self.yw)


		# optional: self.output = T.nnet.sigmoid(conv_out+self.output)
		self.params = [self.Wi, self.Wr, self.state, self.output]
Exemplo n.º 4
0
def sparse_mean_squared_error(y_true, y_pred):
    _assert_sparse_module()
    T.mean
    return sparse_mean(th_sparse_module.sqr(
        th_sparse_module.sub(y_true, y_pred)),
                       axis=-1)