def add_border_rectification(self, name, kernelsize, weights, inplace = True ): args = (gpudm.LayerParameter(), kernelsize) self.add_layer(name, gpudm.BorderRectifyLayerFloat, args, inplace=inplace) self.set_parameters({name:weights}, verbose=0) # computed based on a convolution mask
def add_convolution(self, name, kernelsize, num_output, stride = 1, group = 1, pad = 0, biasterm = True, weight_filler = {}, bias_filler = {}, weights=None): cp = gpudm.ConvolutionParameter() cp.set_kernel_h(kernelsize) cp.set_kernel_w(kernelsize) cp.set_num_output(num_output) assert stride>0, 'error: stride is 0 for '+name cp.set_stride_h(stride ) cp.set_stride_w(stride ) cp.set_group(group) cp.set_pad_h(pad) cp.set_pad_w(pad) cp.set_bias_term(biasterm) self.set_filler_params(cp.mutable_weight_filler(), weight_filler) self.set_filler_params(cp.mutable_bias_filler(), bias_filler) lp = gpudm.LayerParameter() lp.set_allocated_convolution_param(cp) cp.this.disown() # otherwise it will be freed 2 times self.add_layer(name, gpudm.ConvolutionLayerFloat, lp) if weights: self.set_parameters({name:weights}, verbose=0)
def add_sparse_convolution(self, name, sp_pattern, use_sp_data = True, kernelsize = 1, stride = 1, pad = 0, biasterm = False, weight_filler = {}, bias_filler = {}, blobs_lr=[1.0,2.0], weight_decays=[1.0,1.0], weights = None ): from scipy import sparse assert sparse.isspmatrix(sp_pattern) num_output = sp_pattern.shape[0] # sparsity pattern is given in input cp = gpudm.ConvolutionParameter() cp.set_kernel_h(kernelsize) cp.set_kernel_w(kernelsize) cp.set_num_output(num_output) assert stride>0, 'error: stride is 0 for '+name cp.set_stride_h(stride) cp.set_stride_w(stride) cp.set_pad_h(pad) cp.set_pad_w(pad) cp.set_bias_term(biasterm) self.set_filler_params(cp.mutable_weight_filler(), weight_filler) self.set_filler_params(cp.mutable_bias_filler(), bias_filler) lp = gpudm.LayerParameter() lp.set_allocated_convolution_param(cp) cp.this.disown() # otherwise it will be freed 2 times def arrToBlob(arr): # dirty function but simpler for now bb = gpudm.BlobFloat(1,1,1,arr.size) bb.mutable_to_numpy_ref().view(arr.dtype)[:] = arr.ravel() return bb if sparse.isspmatrix_csr(sp_pattern): sparsity_args = (sp_pattern.nnz, arrToBlob(sp_pattern.indptr), arrToBlob(sp_pattern.indices), arrToBlob(sp_pattern.data) if use_sp_data else None) self.add_layer(name, gpudm.CSR_SparseConvolutionLayerFloat, lp) elif sparse.isspmatrix_bsr(sp_pattern): br,bc = sp_pattern.blocksize assert br == bc, "error: not implemented for non-square blocks" sparsity_args = (sp_pattern.nnz/(br*bc), br, arrToBlob(sp_pattern.indptr), arrToBlob(sp_pattern.indices), arrToBlob(sp_pattern.data) if use_sp_data else None) self.add_layer(name, gpudm.BSR_SparseConvolutionLayerFloat, lp) else: assert False, "This sparse matrix type is not implemented" # define sparsity pattern now self.layers[-1][1].SetSparsityPattern( *sparsity_args ) self.layers[-1][1].blobs_lr = blobs_lr self.layers[-1][1].weight_decays = weight_decays if weights: self.set_parameters({name:weights}, verbose=0)
def add_power_law(self, name, power, scale=1, shift=0, inplace=False): pp = gpudm.PowerParameter() pp.set_power(power) pp.set_scale(scale) pp.set_shift(shift) lp = gpudm.LayerParameter() lp.set_allocated_power_param(pp) pp.this.disown() # otherwise it will be freed 2 times self.add_layer(name, gpudm.PowerLayerFloat, lp, inplace=inplace)
def add_patch_correlation(self, name, kernelsize = 1, pad = None, nghrad = -1, normalize_borders = 'dynamic' ): if nghrad>=0 and pad is None: pad = nghrad # smart default lp = gpudm.LayerParameter() norm_modes = {'dynamic':'d', 'd':'d', 'static':'s', 's':'s', 'none':0} self.add_layer(name, gpudm.PatchConvolutionLayerFloat, (lp, kernelsize, pad, nghrad, norm_modes[normalize_borders]) )
def add_dm_argmax(self, name, shape, nlevels, nghrad, tag='pow%d', step=4): lp = gpudm.LayerParameter() self.add_layer(name, gpudm.DeepMatchingArgMaxLayerFloat, (lp, shape[0], shape[1], step, nghrad) ) # append activation blobs of previous layers blobs = self.layers[-1][1].blobs() for layer_name,activation_blob in self.activation_blobs: if layer_name == tag%len(blobs): blobs.push_back(activation_blob) assert blobs.size() == nlevels
def add_reshape_layer(self, name, dims, inplace=True): shape = gpudm.BlobShape() for d in dims: shape.add_dim(d) rp = gpudm.ReshapeParameter() rp.set_allocated_shape(shape) shape.this.disown() # otherwise it will be freed 2 times lp = gpudm.LayerParameter() lp.set_allocated_reshape_param(rp) rp.this.disown() # otherwise it will be freed 2 times self.add_layer(name, gpudm.ReshapeLayerFloat, lp, inplace=inplace)
def add_pooling(self, name, kernelsize, stride=1, pool=MAX, pad=0): if kernelsize == 'full': last_blob = blob_shape(self.activation_blobs[-1][1]) assert last_blob[2] == last_blob[3] kernelsize = last_blob[-1] pp = gpudm.PoolingParameter() pp.set_kernel_h(kernelsize) pp.set_kernel_w(kernelsize) pp.set_stride_h(stride) pp.set_stride_w(stride) pp.set_pad_h(pad) pp.set_pad_w(pad) pp.set_pool(pool) lp = gpudm.LayerParameter() lp.set_allocated_pooling_param(pp) pp.this.disown() # otherwise it will be freed 2 times self.add_layer(name, gpudm.PoolingLayerFloat, lp)
def add_pixel_norm(self, name, norm=1.0, inplace=True): lp = gpudm.LayerParameter() self.add_layer(name, gpudm.PixelNormLayerFloat, (lp, norm), inplace=inplace)
def add_rectified_sigmoid(self, name): lp = gpudm.LayerParameter() self.add_layer(name, gpudm.RectifiedSigmoidLayerFloat, lp)
def add_relu(self, name, inplace = True ): lp = gpudm.LayerParameter() self.add_layer(name, gpudm.ReLULayerFloat, lp, inplace=inplace)