def __init__(self, model_type='SparseFilter', weight_dims=(100, 256), layer_input=None,
                 p=None, group_size=None, step=None, lr=0.01, c='n', weights=None):
        
        """
        Builds a layer for the network by constructing a model. 
        
        Parameters: 
        ----------
        model_type : str
            The model type to build into a given layer. 
        weight_dims : list of tuples
            The dimensions of the weight matrices for each layer. 
            fully connected: [neurons x input_dim ^ 2]
            convolutional: [filters x dim x dim]
        layer_input : ndarray (symbolic Theano variable)
            The input to a given layer. 
        p : int
            The pooling size (assumed to be square).
        group_size : int
            The group size for group sparse filtering. 
        step : int
            The step size for group sparse filtering. 
        lr : int
            The learning rate for gradient descent. 
        """
        
        # assign network inputs to layer
        self.m = model_type
        self.weight_dims = weight_dims
        self.x = layer_input
        self.p = p    
        self.lr = lr
        self.c = c

        if weights is None:
            self.w = init_weights(weight_dims)
        elif weights is not None:
            self.w = weights
        
        # build model based on model_type
        self.model = None
        if model_type == 'SparseFilter':
            self.model = SparseFilter(self.w, self.x)
        elif model_type == 'ConvolutionalSF':
            self.model = ConvolutionalSF(self.w, self.x)
        elif model_type == 'GroupSF':
            self.g_mat = connections.gMatToroidal(self.weight_dims[0], group_size, step, centered='n')
            self.model = GroupSF(self.w, self.x, self.g_mat)
        elif model_type == 'MultiGroupSF':
            self.g_mat1 = connections.groupMat(self.weight_dims[0], group_size, step)
            self.g_mat2 = connections.groupMat(
                np.square(np.sqrt(self.weight_dims[0]) / 2),
                group_size, step,
            )
            self.model = MultiGroupSF(self.w, self.x, self.g_mat1, self.g_mat2)
        elif model_type == 'GroupConvolutionalSF':
            self.g_mat = connections.gMatToroidal(self.weight_dims[0], group_size, step, centered='n')
            self.model = GroupConvolutionalSF(self.w, self.x, self.g_mat)
        assert self.model is not None
 def __init__(self, model_type='SparseFilter', weight_dims=(100, 256), layer_input=None,
              p=None, group_size=None, step=None, lr=0.01, c = 'n'):   
     
     """
     Builds a layer for the network by constructing a model. 
     
     Parameters: 
     ----------
     model_type : str
         The model type to build into a given layer. 
     weight_dims : list of tuples
         The dimensions of the weight matrices for each layer. 
         fully connected: [neurons x input_dim ^ 2]
         convolutional: [filters x dim x dim]
     layer_input : ndarray (symbolic Theano variable)
         The input to a given layer. 
     p : int
         The pooling size (assumed to be square).
     group_size : int
         The group size for group sparse filtering. 
     step : int
         The step size for group sparse filtering. 
     lr : int
         The learning rate for gradient descent. 
     """
     
     # assign network inputs to layer
     self.m = model_type
     self.weight_dims = weight_dims
     self.w = init_weights(weight_dims)  # TODO: constrain L2-norm of weights to sum to unity
     self.x = layer_input
     self.p = p    
     self.lr = lr
     self.c = c
     
     # build model based on model_type
     self.model = None
     if model_type == 'SparseFilter':
         self.model = SparseFilter(self.w, self.x)
     elif model_type == 'ConvolutionalSF':
         self.model = ConvolutionalSF(self.w, self.x)
     elif model_type == 'GroupSF':
         self.g_mat = connections.gMatToroidal(self.weight_dims[0], group_size, step, centered='n')
         self.model = GroupSF(self.w, self.x, self.g_mat)
     elif model_type == 'GroupConvolutionalSF':
         self.g_mat = connections.gMatToroidal(self.weight_dims[0], group_size, step, centered='n')
         self.model = GroupConvolutionalSF(self.w, self.x, self.g_mat)
     assert self.model is not None
Exemplo n.º 3
0
    # visualize the receptive fields of the first layer
    visualize.drawplots(weights['layer0'].T,
                        color='gray',
                        convolution=convolution,
                        pad=0,
                        examples=None,
                        channels=channels)

    # get activations of first layer and save in dictionary
    f_hat, _, _, _, _, _ = outputs[0](data)
    f_hats[model_type[0]] = f_hat

# project activations of both networks up using local connections
group_matrix = connections.gMatToroidal(n_filters,
                                        topographic_parameters[0],
                                        topographic_parameters[1],
                                        centered='n')
gf_hats = {}
for model in model_type_meta:
    model = model[0]
    gf_hats[model] = np.dot(f_hats[model].T, group_matrix)

# evaluate the sparseness of the distributions
pl.figure(1)
bins = np.linspace(0, 1, 100)
pl.hist(gf_hats['SparseFilter'].flatten(),
        bins=bins,
        alpha=0.5,
        label='Sparse Filtering')
pl.hist(gf_hats['GroupSF'].flatten(),
        bins=bins,