Beispiel #1
0
    def doForwardPropagation(self, X, weights, biases):
        ''' 
		Computes the forward propagation of the input in the SMNN:
		
		Z{l+1} = W{l}*H{l} + B{l}
		H{l+1} = f(Z{l+1})
		
		where {l} and {l+1} denote layers,
		B is the bias matrix, columnwise repetition of the bias vector with the number of samples,
		Z is the output matrix of neurons before the activation function is applied,
		f(.) is the activation function
		H is the output matrix of neurons after the activation function is applied (h{1}=X),
		
		Arguments
		X			: data matrix in the form [input dim., number of samples]
		weights		: list of weight matrices of each layer
		biases		: list of bias vectors of each layer
		
		Returns
		outputs		: list of output matrices (z) of each layer (output of neuron before activation function)
		activities	: list of activation matrices (h) of each layer (output of neuron after activation function)
		'''
        assert self.isInitialized, 'ERROR:SMNN:doForwardPropagation: The instance is not properly initialized'

        # Default behaviour is bad implementation
        #if len(weights)==0 or len(biases)==0:
        #	[weights, biases] = self.unrollParameters(self.params);

        assert AuxFunctions.checkNetworkParameters(
            weights, self.weightPrototypes
        ), 'ERROR:SMNN:doForwardPropagation: weight dimension does not match the network topology'
        assert AuxFunctions.checkNetworkParameters(
            biases, self.biasPrototypes
        ), 'ERROR:SMNN:doForwardPropagation: bias dimension does not match the network topology'

        outputs = []
        activities = []
        for layer in range(self.nLayers - 1):

            if layer == 0:
                x = X
            else:
                x = activities[layer - 1]

            z = np.dot(weights[layer], x) + np.repeat(biases[layer],
                                                      x.shape[1], 1)

            if self.activation_fun == SMNN_ACTIVATION_FUNCTIONS[
                    SMNN_ACTFUN_SIGMOID]:
                h = AuxFunctions.sigmoid(z)
            else:
                # Should not be here
                print 'ERROR:SMNN:doForwardPropagation: Wrong activation function'
                sys.exit()

            outputs.append(z)
            activities.append(h)

        return [outputs, activities]
Beispiel #2
0
def convolve(filterDim, numFilters, X, W, b):
	'''
	Returns the convolution of the features given by W and b with the given data X
	
	Arguments
	filterDim			: filter (feature) dimension
	numFilters			: number of feature maps
	X					: input data in the form images(r, c, image number)
	W					: weights i.e. features, is of shape (filterDim,filterDim,numFilters)
	b					: biases, is of shape (numFilters,1)
	
	Returns
	convolvedFeatures	: matrix of convolved features in the form convolvedFeatures(imageRow, imageCol, featureNum, imageNum)
	'''
	inputDimX = X.shape[INDEX_X];
	inputDimY = X.shape[INDEX_Y];
	numData   = X.shape[2];
	
	convDimX = inputDimX - filterDim[INDEX_X] + 1;
	convDimY = inputDimY - filterDim[INDEX_Y] + 1;

	convolvedFeatures = np.zeros([convDimX, convDimY, numFilters, numData]);

	for i in range(numData):
	  for filterNum in range (numFilters):

		# Convolution of image with feature matrix
		convolvedImage = np.zeros([convDimX, convDimY]);

		# Obtain the feature (filterDim x filterDim) needed during the convolution
		filter = W[:,:,filterNum];
		
		# Flip the feature matrix because of the definition of convolution, as explained later
		filter = np.rot90(filter, 2);
		  
		# Obtain data
		data = X[:,:,i];

		#Convolve "filter" with "data", adding the result to convolvedImage
		convolvedImage = scipy.signal.convolve2d(data, filter, mode='valid');
		
		# Add the bias unit
		# Then, apply the sigmoid function to get the hidden activation
		convolvedImage = AuxFunctions.sigmoid(convolvedImage + b[filterNum]);
		
		convolvedFeatures[:,:,filterNum,i] = convolvedImage;
		
	return convolvedFeatures