Beispiel #1
0
 def __init__(self, **kwargs):
     """Initializes an inner product layer. 
     
     kwargs:
         num_output: the number of outputs.
         reg: the regularizer to be used to add regularization terms.
             should be a sparse.base.Regularizer instance. Default None. 
         filler: a filler to initialize the weights. Should be a
             sparse.base.Filler instance. Default None.
         bias_filler: a filler to initialize the bias.
         bias: if True, the inner product will contain a bias term.
             Default True.
     """
     base.Layer.__init__(self, **kwargs)
     self._num_output = self.spec.get('num_output', 0)
     if self._num_output <= 0:
         raise base.InvalidLayerError(
             'Incorrect or unspecified num_output for %s' % self.name)
     self._reg = self.spec.get('reg', None)
     self._filler = self.spec.get('filler', None)
     self._weight = base.Blob(filler=self._filler)
     self._has_bias = self.spec.get('bias', True)
     if self._has_bias:
         self._bias_filler = self.spec.get('bias_filler', None)
         self._bias = base.Blob(filler=self._bias_filler)
         self._param = [self._weight, self._bias]
     else:
         self._param = [self._weight]
Beispiel #2
0
def whitened_images(dtype=np.float64):
    """Returns the whitened images provided in the Sparsenet website:
        http://redwood.berkeley.edu/bruno/sparsenet/
    The returned data will be in the shape (10,512,512,1) to fit
    the blob convension.
    """
    npzdata = np.load(os.path.join(_DATA_PATH, 'whitened_images.npz'))
    blob = base.Blob(npzdata['images'].shape, dtype)
    blob.data().flat = npzdata['images'].flat
    return blob
Beispiel #3
0
    def __init__(self, **kwargs):
        """Initializes a Dropout layer.

        kwargs:
            name: the layer name.
            ratio: the ratio to carry out dropout.
            debug_freeze: a debug flag. If set True, the mask will only
                be generated once when running. You should not use it other
                than purposes like gradient check.
        """
        base.Layer.__init__(self, **kwargs)
        filler = fillers.DropoutFiller(ratio=self.spec['ratio'])
        self._mask = base.Blob(filler=filler)
 def initialize_status(self):
     """Initializes the status."""
     # we need to maintain the momentum history
     params = self._sparse_net.params()
     self._accum = [
         base.Blob(p.data().shape,
                   p.data().dtype) for p in params
     ]
     for accum in self._accum:
         accum_data = accum.data()
         accum_data[:] = self.spec['base_accum']
         # we initialize the diff as a buffer when computing things later.
         accum.init_diff()
    def __init__(self, **kwargs):
        """Initalizes the layer. 
        
        kwargs:

            k, alpha, beta: as defined in the equation.
            size: the local range.
        """
        base.Layer.__init__(self, **kwargs)
        self._k = self.spec['k']
        self._alpha = self.spec['alpha']
        self._beta = self.spec['beta']
        self._size = self.spec['size']
        self._scale = base.Blob()
 def _collect_params(self, realloc=False):
     """Collect the network parameters into a long vector.
     """
     params_list = self._sparse_net.params()
     if self._param is None or realloc:
         total_size = sum(p.data().size for p in params_list)
         dtype = max(p.data().dtype for p in params_list)
         self._param = base.Blob(shape=total_size, dtype=dtype)
         self._param.init_diff()
     current = 0
     collected_param = self._param.data()
     collected_diff = self._param.diff()
     for param in params_list:
         size = param.data().size
         collected_param[current:current + size] = param.data().flat
         # If we are computing mpi, we will need to reduce the diff.
         diff = param.diff()
         if mpi.SIZE > 1:
             part = collected_diff[current:current + size]
             part.shape = diff.shape
             mpi.COMM.Allreduce(diff, part)
         else:
             collected_diff[current:current + size] = diff.flat
         current += size
Beispiel #7
0
NUM_HIDDEN = 25
INIT_SCALE = np.sqrt(6. / (NUM_HIDDEN + PSIZE * PSIZE + 1))
MAXFUN = 500
np.random.seed(1701)

################################################
# Generating training data.
################################################

logging.getLogger().setLevel(logging.INFO)
logging.info('*** Get patches ***')
images = smalldata.whitened_images()
patch_extractor = core_layers.RandomPatchLayer(name='extractor',
                                               psize=PSIZE,
                                               factor=NUM_PATCHES / 10)
patches = base.Blob()
patch_extractor.forward([images], [patches])
logging.info('*** Patch stats: %s', str(patches.data().shape))
logging.info('*** Normalize patches ***')
patches_data = patches.data()
# subtract mean
patches_data -= patches_data.mean(axis=0)
std = patches_data.std()
np.clip(patches_data, -std * 3, std * 3, out=patches_data)
# We shrink the patch range a little, to [0.1, 0.9]
patches_data *= 0.4 / std / 3.
patches_data += 0.5
logging.info('*** Finished Patch Preparation ***')

#############################################
# Creating the decaf net for the autoencoder.
 def __getstate__(self):
     """When pickling, we will remove the intermediate data."""
     self._scale = base.Blob()
     return self.__dict__
Beispiel #9
0
 def __init__(self, **kwargs):
     base.LossLayer.__init__(self, **kwargs)
     self._prob = base.Blob()
Beispiel #10
0
else:
    # EXECUTION
    file_name = net.layers['input-layer'].input_file.split('/')[-1].split('.')[0]
    net.load_from(NET_MODEL)
    net.layers['fc8']._num_output = 2
    fp = open('feat.%s'%(file_name), 'w')
    fp_rnorm1 = open('plibsvm_1.%s'%(file_name), 'w')
    fp_rnorm2 = open('plibsvm_2.%s'%(file_name), 'w')
    fp_conv3 = open('plibsvm_3.%s'%(file_name), 'w')
    fp_conv4 = open('plibsvm_4.%s'%(file_name), 'w')
    fp_conv5 = open('plibsvm_5.%s'%(file_name), 'w')
    fp_flatten = open('plibsvm_6.%s'%(file_name), 'w')

    for i in range(net.layers['input-layer'].get_num_images()):
        print "\nprocessing image %d"%(i)
        top = [base.Blob(), base.Blob()]
        
        feat = net.predict(output_blobs=['rnorm1_cudanet_out', 'rnorm2_cudanet_out', 'conv3_neuron_cudanet_out', 
                    'conv4_neuron_cudanet_out', 'conv5_neuron_cudanet_out', '_sparse_fc6_flatten_out', 'labels',])
        
        label = np.where(feat['labels'][0] == 1.)[0][0]
        fp_rnorm1.write('%d '%(label))
        fp_rnorm2.write('%d '%(label))
        fp_conv3.write('%d '%(label))
        fp_conv4.write('%d '%(label))
        fp_conv5.write('%d '%(label))
        fp_flatten.write('%d '%(label))
        
        #first layer
        cnt = 1
        for val in feat['rnorm1_cudanet_out'].reshape((10, np.prod(feat['rnorm1_cudanet_out'].shape[1:])))[4]:
    def __init__(self, **kwargs):
        """Initializes the convolution layer. Strictly, this is a correlation
        layer since the kernels are not reversed spatially as in a classical
        convolution operation.

        kwargs:
            name: the name of the layer.
            num_kernels: the number of kernels.
            ksize: the kernel size. Kernels will be square shaped and have the
                same number of channels as the data.
            stride: the kernel stride.
            mode: 'valid', 'same', or 'full'.
            pad: if set, this value will overwrite the mode and we will use 
                the given pad size. Default None.
            reg: the regularizer to be used to add regularization terms.
                should be a sparse.base.Regularizer instance. Default None. 
            filler: a filler to initialize the weights. Should be a
                sparse.base.Filler instance. Default None.
            has_bias: specifying if the convolutional network should have a
                bias term. Note that the same bias is going to be applied
                regardless of the location. Default True.
            bias_filler: a filler to unitialize the bias. Should be a
                sparse.base.Filler instance. Default None.
            large_mem: if set True, the layer will consume a lot of memory by
                storing all the intermediate im2col results, but will increase
                the backward operation time. Default False.
        When computing convolutions, we will always start from the top left
        corner, and any rows/columns on the right and bottom sides that do not
        fit the stride will be discarded. To enforce the 'same' mode to return
        results of the same size as the data, we require the 'same' mode to be
        paired with an odd number as the kernel size.
        """
        base.Layer.__init__(self, **kwargs)
        #now self.spec = kwargs
        self._num_kernels = self.spec['num_kernels']
        self._ksize = self.spec['ksize']
        self._stride = self.spec['stride'] #<------- ------- -------- -------- How much we should move (step)
        self._large_mem = self.spec.get('large_mem', False)
        self._reg = self.spec.get('reg', None)
        self._has_bias = self.spec.get('has_bias', True)
        if self._ksize <= 1:
            raise ValueError('Invalid kernel size. Kernel size should > 1.')
        # since the im2col operation often creates large intermediate matrices,
        # we will process them in batches.
        self._padded = base.Blob()
        self._col = base.Blob()
        # set up the parameter
        self._kernels = base.Blob(filler=self.spec.get('filler', None))
        self._base_kernels = base.Blob(filler=self.spec.get('filler', None))

        if self._has_bias:
            self._bias = base.Blob(filler=self.spec.get('bias_filler', None))
            self._param = [self._kernels, self._bias]
        else:
            self._param = [self._kernels]
        self._pad_size = self.spec.get('pad', None)
        if self._pad_size is None:
            self._mode = self.spec['mode']
            if self._mode == 'same' and self._ksize % 2 == 0:
                raise ValueError(
                    'The "same" mode should have an odd kernel size.')
            if self._mode == 'valid':
                self._pad_size = 0
            elif self._mode == 'full':
                self._pad_size = self._ksize - 1
            elif self._mode == 'same':
                self._pad_size = int(self._ksize / 2)
            else:
                raise ValueError('Unknown mode: %s' % self._mode)