def residual_block(m, a, b, leakiness=0.01, dimensions=2): """ append to a sequence module: produce output of [identity,3x3+3x3] then add together inputs ------ m [scn.Sequential module] network to add layers to a [int]: number of input channels b [int]: number of output channels leakiness [float]: leakiness of ReLU activations dimensions [int]: dimensions of input sparse tensor modifies -------- m: adds layers """ m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add(scn.BatchNormLeakyReLU( a, leakiness=leakiness)).add( scn.SubmanifoldConvolution(dimensions, a, b, 3, False)).add( scn.BatchNormLeakyReLU(b, leakiness=leakiness)).add( scn.SubmanifoldConvolution( dimensions, b, b, 3, False)))).add(scn.AddTable())
def U(nPlanes, n_input_planes=-1): #Recursive function m = scn.Sequential() for i in range(reps): block(m, n_input_planes if n_input_planes != -1 else nPlanes[0], nPlanes[0]) n_input_planes = -1 if len(nPlanes) > 1: m.add(scn.ConcatTable().add(scn.Identity()).add( scn.Sequential().add( scn.BatchNormLeakyReLU( nPlanes[0], leakiness=leakiness)).add( scn.Convolution(dimension, nPlanes[0], nPlanes[1], downsample[0], downsample[1], False)).add(U(nPlanes[1:])).add( scn.BatchNormLeakyReLU( nPlanes[1], leakiness=leakiness)).add( scn.Deconvolution( dimension, nPlanes[1], nPlanes[0], downsample[0], downsample[1], False)))) m.add(scn.JoinTable()) for i in range(reps): block(m, nPlanes[0] * (2 if i == 0 else 1), nPlanes[0]) return m
def residual_block(m, ninputchs, noutputchs, leakiness=0.01, dimensions=2): """ Residual Modulae Block intention is to append to a sequence module (m) produce output of [identity,3x3+3x3] then add together inputs ------ m [scn.Sequential module] network to add layers to ninputchs [int]: number of input channels noutputchs [int]: number of output channels leakiness [float]: leakiness of ReLU activations dimensions [int]: dimensions of input sparse tensor modifies -------- m: adds layers """ inoutsame = ninputchs == noutputchs m.add(scn.ConcatTable().add( scn.Identity() if inoutsame else scn. NetworkInNetwork(ninputchs, noutputchs, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(ninputchs, leakiness=leakiness)).add( scn.SubmanifoldConvolution( dimensions, ninputchs, noutputchs, 3, False)).add( scn.BatchNormLeakyReLU( noutputchs, leakiness=leakiness)).add( scn.SubmanifoldConvolution( dimensions, noutputchs, noutputchs, 3, False)))).add(scn.AddTable())
def block(m, a, b): if residual_blocks: #ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU( a, momentum=bn_momentum, leakiness=leakiness, track_running_stats=track_running_stats) ).add(scn.SubmanifoldConvolution( dimension, a, b, 3, False)).add( scn.BatchNormLeakyReLU( b, momentum=bn_momentum, leakiness=leakiness, track_running_stats=track_running_stats)).add( scn.SubmanifoldConvolution( dimension, b, b, 3, False)))).add(scn.AddTable()) else: #VGG style blocks m.add(scn.Sequential().add( scn.BatchNormLeakyReLU( a, momentum=bn_momentum, leakiness=leakiness, track_running_stats=track_running_stats)).add( scn.SubmanifoldConvolution(dimension, a, b, 3, False))) operation = {'kernel': [1, 1, 1], 'stride': [1, 1, 1]} return operation
def block(self, n_in, n_out): m = scn.Sequential() if self.residual_blocks: # ResNet style blocks m.add(scn.ConcatTable().add( scn.Identity() if n_in == n_out else scn.NetworkInNetwork(n_in, n_out, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU( n_in, leakiness=self.leakiness)).add( scn.SubmanifoldConvolution( self.dimension, n_in, n_out, 3, False)).add( scn.BatchNormLeakyReLU( n_out, leakiness=self.leakiness)).add( scn.SubmanifoldConvolution( self.dimension, n_out, n_out, 3, False)))) m.add(scn.AddTable()) else: # VGG style blocks m.add(scn.BatchNormLeakyReLU(n_in, leakiness=self.leakiness)) m.add( scn.SubmanifoldConvolution(self.dimension, n_in, n_out, 3, False)) return m
def __init__(self, cfg, name='yresnet_decoder'): super(YResNetDecoder, self).__init__(cfg, name='network_base') self.model_config = cfg[name] self.reps = self.model_config.get('reps', 2) # Conv block repetition factor self.kernel_size = self.model_config.get('kernel_size', 2) self.num_strides = self.model_config.get('num_strides', 5) self.num_filters = self.model_config.get('filters', 16) self.nPlanes = [ i * self.num_filters for i in range(1, self.num_strides + 1) ] self.downsample = [self.kernel_size, 2] # [filter size, filter stride] self.concat = scn.JoinTable() self.add = scn.AddTable() dropout_prob = self.model_config.get('dropout_prob', 0.5) self.encoder_num_filters = self.model_config.get( 'encoder_num_filters', None) if self.encoder_num_filters is None: self.encoder_num_filters = self.num_filters self.encoder_nPlanes = [ i * self.encoder_num_filters for i in range(1, self.num_strides + 1) ] # Define Sparse YResNet Decoder. self.decoding_block = scn.Sequential() self.decoding_conv = scn.Sequential() for idx, i in enumerate(list(range(self.num_strides - 2, -1, -1))): if idx == 0: m = scn.Sequential().add( scn.BatchNormLeakyReLU(self.encoder_nPlanes[i + 1], leakiness=self.leakiness)).add( scn.Deconvolution( self.dimension, self.encoder_nPlanes[i + 1], self.nPlanes[i], self.downsample[0], self.downsample[1], self.allow_bias)) else: m = scn.Sequential().add( scn.BatchNormLeakyReLU( self.nPlanes[i + 1], leakiness=self.leakiness)).add( scn.Deconvolution(self.dimension, self.nPlanes[i + 1], self.nPlanes[i], self.downsample[0], self.downsample[1], self.allow_bias)).add( scn.Dropout(p=dropout_prob)) self.decoding_conv.add(m) m = scn.Sequential() for j in range(self.reps): self._resnet_block(m, self.nPlanes[i] + (self.encoder_nPlanes[i] \ if j == 0 else 0), self.nPlanes[i]) self.decoding_block.add(m)
def block(m, a, b): # ResNet style blocks m.add(scn.ConcatTable() .add(scn.Identity() if a == b else scn.NetworkInNetwork(a, b, False)) .add(scn.Sequential() .add(scn.BatchNormLeakyReLU(a, leakiness=leakiness)) .add(scn.SubmanifoldConvolution(self._dimension, a, b, 3, False)) .add(scn.BatchNormLeakyReLU(b, leakiness=leakiness)) .add(scn.SubmanifoldConvolution(self._dimension, b, b, 3, False))) ).add(scn.AddTable())
def iter_unet(self, n_input_planes): # different from scn implementation, which is a recursive function enc_convs = scn.Sequential() dec_convs = scn.Sequential() for n_planes_in, n_planes_out in zip(self.n_planes[:-1], self.n_planes[1:]): # encode conv1x1 = scn.Sequential() for i in range(self.block_reps): conv1x1.add( self.block( n_input_planes if n_input_planes != -1 else n_planes_in, n_planes_in)) n_input_planes = -1 conv = scn.Sequential() conv.add( scn.BatchNormLeakyReLU(n_planes_in, leakiness=self.leakiness)) conv.add( scn.Convolution(self.dimension, n_planes_in, n_planes_out, self.downsample[0], self.downsample[1], False)) enc_conv = scn.Sequential() enc_conv.add(conv1x1) enc_conv.add(conv) enc_convs.add(enc_conv) # decode(corresponding stage of encode; symmetric with U) b_join = scn.Sequential() # before_join b_join.add( scn.BatchNormLeakyReLU(n_planes_out, leakiness=self.leakiness)) b_join.add( scn.Deconvolution(self.dimension, n_planes_out, n_planes_in, self.downsample[0], self.downsample[1], False)) join_table = scn.JoinTable() a_join = scn.Sequential() # after_join for i in range(self.block_reps): a_join.add( self.block(n_planes_in * (2 if i == 0 else 1), n_planes_in)) dec_conv = scn.Sequential() dec_conv.add(b_join) dec_conv.add(join_table) dec_conv.add(a_join) dec_convs.add(dec_conv) middle_conv = scn.Sequential() for i in range(self.block_reps): middle_conv.add( self.block( n_input_planes if n_input_planes != -1 else self.n_planes[-1], self.n_planes[-1])) n_input_planes = -1 return enc_convs, middle_conv, dec_convs
def block(self, m, a, b, dimension=3, residual_blocks=False, leakiness=0): # default using residual_block if residual_blocks: #ResNet style blocks m.add(scn.ConcatTable() .add(scn.Identity() if a == b else scn.NetworkInNetwork(a, b, False)) .add(scn.Sequential() .add(scn.BatchNormLeakyReLU(a,leakiness=leakiness)) .add(scn.SubmanifoldConvolution(dimension, a, b, 3, False)) .add(scn.BatchNormLeakyReLU(b,leakiness=leakiness)) .add(scn.SubmanifoldConvolution(dimension, b, b, 3, False))) ).add(scn.AddTable()) else: #VGG style blocks m.add(scn.Sequential() .add(scn.BatchNormLeakyReLU(a,leakiness=leakiness)) .add(scn.SubmanifoldConvolution(dimension, a, b, 3, False)))
def foo(m,np): for _ in range(reps): if residual: #ResNet style blocks m.add(scn.ConcatTable() .add(scn.Identity()) .add(scn.Sequential() .add(scn.BatchNormLeakyReLU(np,leakiness=leakiness)) .add(scn.SubmanifoldConvolution(dimension, np, np, 3, False)) .add(scn.BatchNormLeakyReLU(np,leakiness=leakiness)) .add(scn.SubmanifoldConvolution(dimension, np, np, 3, False))) ).add(scn.AddTable()) else: #VGG style blocks m.add(scn.BatchNormLeakyReLU(np,leakiness=leakiness) ).add(scn.SubmanifoldConvolution(dimension, np, np, 3, False))
def __init__(self, inplanes, outplanes, batch_norm, leaky_relu): nn.Module.__init__(self) self.batch_norm = batch_norm self.leaky_relu = leaky_relu self.conv1 = scn.SubmanifoldConvolution(dimension=3, nIn = inplanes, nOut = outplanes, filter_size = 3, bias=False) if self.batch_norm: if self.leaky_relu: self.bn1 = scn.BatchNormLeakyReLU(outplanes) else: self.bn1 = scn.BatchNormReLU(outplanes) self.conv2 = scn.SubmanifoldConvolution(dimension=3, nIn = outplanes, nOut = outplanes, filter_size = 3, bias = False) if self.batch_norm: self.bn2 = scn.BatchNormalization(outplanes) self.residual = scn.Identity() if self.leaky_relu: self.relu = scn.LeakyReLU() else: self.relu = scn.ReLU() self.add = scn.AddTable()
def block(self, m, a, b, dimension=3, residual_blocks=False, leakiness=0, kernel_size=3, use_batch_norm=True): # default using residual_block if use_batch_norm: Activation = lambda channels: scn.BatchNormLeakyReLU( channels, leakiness=leakiness) else: Activation = lambda channels: scn.LeakyReLU(leakiness) if residual_blocks: #ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add(Activation(a)).add( scn.SubmanifoldConvolution(dimension, a, b, kernel_size, False)).add(Activation(b)).add( scn.SubmanifoldConvolution( dimension, b, b, kernel_size, False)))).add( scn.AddTable()) else: #VGG style blocks m.add(scn.Sequential().add(Activation(a)).add( scn.SubmanifoldConvolution(dimension, a, b, kernel_size, False)))
def make_encoder_layer(self, ninputchs, noutputchs, nreps, leakiness=0.01, downsample=[2, 2]): """ inputs ------ ninputchs [int]: number of features going into layer noutputchs [int]: number of features output by layer nreps [int]: number of times residual modules repeated leakiness [int]: leakiness of LeakyReLU layers downsample [length 2 list of int]: stride in [height,width] dims outputs ------- scn.Sequential module with resnet and downsamping layers """ encode_blocks = create_resnet_layer(nreps, ninputchs, noutputchs, downsample=downsample) if downsample is not None: # if we specify downsize factor for each dimension, we apply # it to the output of the residual layers encode_blocks.add( scn.BatchNormLeakyReLU(noutputchs, leakiness=leakiness)) encode_blocks.add( scn.Convolution(self.dimension, noutputchs, noutputchs, downsample[0], downsample[1], False)) return encode_blocks
def __init__(self, cfg, name='yresnet_encoder'): super(YResNetEncoder, self).__init__(cfg, name='network_base') self.model_config = cfg[name] # YResNet Configurations # Conv block repetition factor self.reps = self.model_config.get('reps', 2) self.kernel_size = self.model_config.get('kernel_size', 2) self.num_strides = self.model_config.get('num_strides', 5) self.num_filters = self.model_config.get('filters', 16) self.nPlanes = [ i * self.num_filters for i in range(1, self.num_strides + 1) ] # [filter size, filter stride] self.downsample = [self.kernel_size, 2] dropout_prob = self.model_config.get('dropout_prob', 0.5) # Define Sparse YResNet Encoder self.encoding_block = scn.Sequential() self.encoding_conv = scn.Sequential() for i in range(self.num_strides): m = scn.Sequential() for _ in range(self.reps): self._resnet_block(m, self.nPlanes[i], self.nPlanes[i]) self.encoding_block.add(m) m = scn.Sequential() if i < self.num_strides - 1: m.add( scn.BatchNormLeakyReLU(self.nPlanes[i], leakiness=self.leakiness)).add( scn.Convolution(self.dimension, self.nPlanes[i], self.nPlanes[i+1], \ self.downsample[0], self.downsample[1], self.allow_bias)).add( scn.Dropout(p=dropout_prob)) self.encoding_conv.add(m)
def __init__(self): super(CNN, self).__init__() ############################### # Hardcoded settings ############################### self._dimension = 3 reps = 2 kernel_size = 2 num_strides = 7 init_num_features = 8 nInputFeatures = 1 spatial_size = 128 #padding the rest for 169 PMTs num_classes = 2 # good versus ghost nPlanes = [(2**i) * init_num_features for i in range(0, num_strides) ] # every layer double the number of features downsample = [kernel_size, 2] leakiness = 0 ################################# # Input layer ################################# self.input = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self._dimension, nInputFeatures, init_num_features, 3, False)) # Kernel size 3, no bias self.concat = scn.JoinTable() ################################# # Encode layers #################################\ self.encoding_conv = scn.Sequential() for i in range(num_strides): if i < 4: #hardcoded self.encoding_conv.add( scn.BatchNormLeakyReLU( nPlanes[i], leakiness=leakiness)).add( scn.Convolution(self._dimension, nPlanes[i], nPlanes[i + 1], downsample[0], downsample[1], False)) elif i < num_strides - 1: self.encoding_conv.add(scn.MaxPooling(self._dimension, 2, 2)) self.output = scn.Sequential().add( scn.SparseToDense(self._dimension, nPlanes[-1])) ################################### # Final linear layer ################################### self.deepest_layer_num_features = int( nPlanes[-1] * np.power(spatial_size / (2**(num_strides - 1)), 3.)) self.classifier = torch.nn.Sequential( torch.nn.ReLU(), torch.nn.Linear(self.deepest_layer_num_features, 2), )
def _block(self, module, a, b, kernel=3): ''' Utility Method for attaching 2 x (Conv-BN) Blocks. INPUTS: - module (scn Module): network module to attach ResNet block. - a (int): number of input feature dimension - b (int): number of output feature dimension RETURNS: None (operation is in-place) ''' module.add(scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=self.leakiness)).add( scn.SubmanifoldConvolution( self.dimension, a, b, kernel, self.allow_bias)).add( scn.BatchNormLeakyReLU(b, leakiness=self.leakiness)).add( scn.SubmanifoldConvolution( self.dimension, b, b, kernel, self.allow_bias)))
def _resnet_block(self, module, a, b): ''' Utility Method for attaching ResNet-Style Blocks. INPUTS: - module (scn Module): network module to attach ResNet block. - a (int): number of input feature dimension - b (int): number of output feature dimension RETURNS: None (operation is in-place) ''' module.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, self.allow_bias)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=self.leakiness)).add( scn.SubmanifoldConvolution( self.dimension, a, b, 3, self.allow_bias)).add( scn.BatchNormLeakyReLU( b, leakiness=self.leakiness)).add( scn.SubmanifoldConvolution( self.dimension, b, b, 3, self.allow_bias)))).add(scn.AddTable())
def _nin_block(self, module, a, b): ''' Utility Method for attaching feature dimension reducing BN + NetworkInNetwork blocks. INPUTS: - module (scn Module): network module to attach ResNet block. - a (int): number of input feature dimension - b (int): number of output feature dimension RETURNS: None (operation is in-place) ''' module.add(scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=self.leakiness)).add( scn.NetworkInNetwork(a, b, self.allow_bias)))
def baz(nPlanes): m=scn.Sequential() foo(m,nPlanes[0]) if len(nPlanes)==1: bar(m,nPlanes[0],True) else: a=scn.Sequential() bar(a,nPlanes,False) b=scn.Sequential( scn.BatchNormLeakyReLU(nPlanes,leakiness=leakiness), scn.Convolution(dimension, nPlanes[0], nPlanes[1], downsample[0], downsample[1], False), baz(nPlanes[1:]), scn.UnPooling(dimension, downsample[0], downsample[1])) m.add(ConcatTable(a,b)) m.add(scn.AddTable())
def make_decoder_layer(self, ilayer, ninputchs, noutputchs, nreps, leakiness=0.01, downsample=[2, 2], islast=False): """ defines two layers: 1) the deconv layer pre-concat 2) residual blocks post-concat inputs ------ ilayer [int]: layer ID ninputchs [int]: number of features going into layer noutputchs [int]: number of features output by layer nreps [int]: number of times residual modules should repeat leakiness [float]: leakiness of LeakyReLU activiation functions downsample [list of ints size 2]: upsampling factor in weight and height islast [bool]: last decoder layer does not have skip connection """ # resnet block decode_blocks = create_resnet_layer(nreps, ninputchs, 2 * noutputchs, downsample=downsample) # deconv decode_blocks.add( scn.BatchNormLeakyReLU(2 * noutputchs, leakiness=leakiness)) decode_blocks.add( scn.Deconvolution(self.dimension, 2 * noutputchs, noutputchs, downsample[0], downsample[1], False)) setattr(self, "deconv%d" % (ilayer), decode_blocks) if self._verbose: print "DecoderLayer[", ilayer, "] inputchs[", ninputchs, print " -> resout[", 2 * noutputchs, "] -> deconv output[", noutputchs, "]" if not islast: # joiner for skip connections joiner = scn.JoinTable() setattr(self, "skipjoin%d" % (ilayer), joiner) else: joiner = None return decode_blocks, joiner
def up(m, nPlane_in, nPlane_uped, scale): #print(f'up, scale={scale}, feature={nPlane_in}->{nPlane_uped}, kernel={self.down_kernels[scale]}, stride={self.down_strides[scale]}') m.add( scn.BatchNormLeakyReLU( nPlane_in, momentum=bn_momentum, leakiness=leakiness, track_running_stats=track_running_stats)).add( scn.Deconvolution(dimension, nPlane_in, nPlane_uped, self.down_kernels[scale], self.down_strides[scale], False)) operation = { 'kernel': self.down_kernels[scale], 'stride': self.down_strides[scale] } return operation
def __init__(self): super(Model, self).__init__() self.inputLayer = scn.InputLayer(dimension, spatial_size=512, mode=3) self.initialconv = scn.SubmanifoldConvolution(dimension, nPlanes, 64, 7, False) self.residual = scn.Identity() self.add = scn.AddTable() self.sparsebl11 = scn.Sequential().add( scn.SubmanifoldConvolution(dimension, 64, 64, 3, False)).add( scn.BatchNormLeakyReLU(64)).add( scn.SubmanifoldConvolution(dimension, 64, 64, 3, False)) self.sparsebl12 = scn.Sequential().add( scn.SubmanifoldConvolution(dimension, 64, 64, 3, False)).add( scn.BatchNormLeakyReLU(64)).add( scn.SubmanifoldConvolution(dimension, 64, 64, 3, False)) self.sparsebl21 = scn.Sequential().add( scn.SubmanifoldConvolution(dimension, 128, 128, 3, False)).add( scn.BatchNormLeakyReLU(128)).add( scn.SubmanifoldConvolution(dimension, 128, 128, 3, False)) self.sparsebl22 = scn.Sequential().add( scn.SubmanifoldConvolution(dimension, 128, 128, 3, False)).add( scn.BatchNormLeakyReLU(128)).add( scn.SubmanifoldConvolution(dimension, 128, 128, 3, False)) self.relu1 = scn.LeakyReLU(64) self.relu2 = scn.LeakyReLU(128) self.downsample1 = scn.Sequential().add( scn.Convolution(dimension, 64, 64, [2, 2, 2], [2, 2, 2], False)).add(scn.BatchNormLeakyReLU(64)) self.downsample2 = scn.Sequential().add( scn.Convolution(dimension, 64, 128, [2, 2, 2], [2, 2, 2], False)).add(scn.BatchNormLeakyReLU(128)) self.downsample3 = scn.Sequential().add( scn.Convolution(dimension, 128, 64, [4, 4, 4], [4, 4, 4], False)).add(scn.BatchNormLeakyReLU(64)) self.downsample4 = scn.Sequential().add( scn.Convolution(dimension, 64, 2, [4, 4, 4], [4, 4, 4], False)).add(scn.BatchNormLeakyReLU(2)) self.sparsetodense = scn.SparseToDense(dimension, 2) self.dropout1 = nn.Dropout(0.5) self.dropout2 = nn.Dropout(0.5) self.linear2 = nn.Linear(2 * 8 * 8 * 8, 2) self.linear3 = nn.Linear(2, 1)
def make_decoder_layer(self, ilayer, ninputchs, noutputchs, nreps, leakiness=0.01, downsample=[2, 2], islast=False): """ defines two layers: 1) the deconv layer pre-concat 2) residual blocks post-concat inputs ------ ninputchs: number of features going into layer noutputchs: number of features output by layer """ # resnet block decode_blocks = create_resnet_layer(nreps, ninputchs, 2 * noutputchs, downsample=downsample) # deconv decode_blocks.add( scn.BatchNormLeakyReLU(2 * noutputchs, leakiness=leakiness)) decode_blocks.add( scn.Deconvolution(self.dimension, 2 * noutputchs, noutputchs, downsample[0], downsample[1], False)) setattr(self, "deconv%d" % (ilayer), decode_blocks) if self._verbose: print "DecoderLayer[", ilayer, "] inputchs[", ninputchs, print " -> resout[", 2 * noutputchs, "] -> deconv output[", noutputchs, "]" if not islast: # joiner for skip connections joiner = scn.JoinTable() setattr(self, "skipjoin%d" % (ilayer), joiner) else: joiner = None return decode_blocks, joiner
def __init__(self, inplanes, outplanes, batch_norm, leaky_relu, nplanes=1): nn.Module.__init__(self) self.batch_norm = batch_norm self.leaky_relu = leaky_relu self.conv1 = scn.SubmanifoldConvolution(dimension=3, nIn=inplanes, nOut=outplanes, filter_size=[nplanes, 3, 3], bias=False) if self.batch_norm: if self.leaky_relu: self.bn1 = scn.BatchNormLeakyReLU(outplanes) else: self.bn1 = scn.BatchNormReLU(outplanes) else: if self.leaky_relu: self.relu = scn.LeakyReLU() else: self.relu = scn.ReLU()
def get_batchnorm_leaky_relu( num_dims, sparse, input_channels, *, eps=1e-4, momentum=0.9, leakiness=0): stride = np.full(num_dims, 1) if sparse: if leakiness: layer = scn.BatchNormLeakyReLU( input_channels, eps, momentum, leakiness) else: layer = scn.BatchNormReLU( input_channels, eps, momentum) else: batchnorm_class = get_dense_batchnorm_class(num_dims) batchnorm = batchnorm_class(input_channels, eps, momentum) if leakiness: relu = nn.LeakyReLU(leakiness, inplace=True) else: relu = nn.ReLU(inplace=True) layer = nn.Sequential(batchnorm, relu) return sparse, stride, input_channels, layer
def __init__(self, cfg): super(PPNUResNet, self).__init__() import sparseconvnet as scn self._model_config = cfg['modules']['uresnet_ppn_type'] self._dimension = self._model_config.get('data_dim', 3) nInputFeatures = self._model_config.get('features', 1) spatial_size = self._model_config.get('spatial_size', 512) num_classes = self._model_config.get('num_classes', 5) m = self._model_config.get('filters', 16) # Unet number of features num_strides = self._model_config.get('num_strides', 5) reps = 2 # Conv block repetition factor kernel_size = 2 # Use input_spatial_size method for other values? nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level # nPlanes = [(2**i) * m for i in range(1, num_strides+1)] # UNet number of features per level downsample = [kernel_size, 2] # downsample = [filter size, filter stride] self.last = None leakiness = 0 def block(m, a, b): # ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, a, b, 3, False)).add( scn.BatchNormLeakyReLU( b, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, b, b, 3, False)))).add(scn.AddTable()) self.input = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m, 3, False)) # Kernel size 3, no bias self.concat = scn.JoinTable() # Encoding self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness) # self.encoding = [] self.encoding_block = scn.Sequential() self.encoding_conv = scn.Sequential() module = scn.Sequential() for i in range(num_strides): module = scn.Sequential() for _ in range(reps): block(module, nPlanes[i], nPlanes[i]) self.encoding_block.add(module) module2 = scn.Sequential() if i < num_strides - 1: module2.add( scn.BatchNormLeakyReLU( nPlanes[i], leakiness=leakiness)).add( scn.Convolution(self._dimension, nPlanes[i], nPlanes[i + 1], downsample[0], downsample[1], False)) # self.encoding.append(module) self.encoding_conv.add(module2) self.encoding = module # Decoding self.decoding_conv, self.decoding_blocks = scn.Sequential( ), scn.Sequential() for i in range(num_strides - 2, -1, -1): module1 = scn.Sequential().add( scn.BatchNormLeakyReLU(nPlanes[i + 1], leakiness=leakiness)).add( scn.Deconvolution( self._dimension, nPlanes[i + 1], nPlanes[i], downsample[0], downsample[1], False)) self.decoding_conv.add(module1) module2 = scn.Sequential() for j in range(reps): block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i]) self.decoding_blocks.add(module2) self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add( scn.OutputLayer(self._dimension)) self.linear = torch.nn.Linear(m, num_classes) # PPN stuff self.half_stride = int(num_strides / 2.0) self.ppn1_conv = scn.SubmanifoldConvolution(self._dimension, nPlanes[-1], nPlanes[-1], 3, False) self.ppn1_scores = scn.SubmanifoldConvolution(self._dimension, nPlanes[-1], 2, 3, False) self.selection1 = Selection() self.selection2 = Selection() self.unpool1 = scn.Sequential() for i in range(num_strides - self.half_stride - 1): self.unpool1.add( scn.UnPooling(self._dimension, downsample[0], downsample[1])) self.unpool2 = scn.Sequential() for i in range(self.half_stride): self.unpool2.add( scn.UnPooling(self._dimension, downsample[0], downsample[1])) middle_filters = int(m * self.half_stride * (self.half_stride + 1) / 2.0) self.ppn2_conv = scn.SubmanifoldConvolution(self._dimension, middle_filters, middle_filters, 3, False) self.ppn2_scores = scn.SubmanifoldConvolution(self._dimension, middle_filters, 2, 3, False) self.multiply1 = Multiply() self.multiply2 = Multiply() self.ppn3_conv = scn.SubmanifoldConvolution(self._dimension, nPlanes[0], nPlanes[0], 3, False) self.ppn3_pixel_pred = scn.SubmanifoldConvolution( self._dimension, nPlanes[0], self._dimension, 3, False) self.ppn3_scores = scn.SubmanifoldConvolution(self._dimension, nPlanes[0], 2, 3, False) self.ppn3_type = scn.SubmanifoldConvolution(self._dimension, nPlanes[0], num_classes, 3, False) self.add_labels1 = AddLabels() self.add_labels2 = AddLabels()
def __init__(self, cfg, name="uresnet_lonely"): super(UResNet, self).__init__() import sparseconvnet as scn if 'modules' in cfg: self.model_config = cfg['modules'][name] else: self.model_config = cfg # Whether to compute ghost mask separately or not self._dimension = self.model_config.get('data_dim', 3) reps = self.model_config.get('reps', 2) # Conv block repetition factor kernel_size = self.model_config.get('kernel_size', 2) num_strides = self.model_config.get('num_strides', 5) m = self.model_config.get('filters', 16) # Unet number of features nInputFeatures = self.model_config.get('features', 1) spatial_size = self.model_config.get('spatial_size', 512) leakiness = self.model_config.get('leak', 0.0) nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level print("nPlanes: ", nPlanes) downsample = [kernel_size, 2] # [filter size, filter stride] self.last = None def block(m, a, b): # ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, a, b, 3, False)).add( scn.BatchNormLeakyReLU( b, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, b, b, 3, False)))).add(scn.AddTable()) self.input = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m, 3, False)) # Kernel size 3, no bias self.concat = scn.JoinTable() # Encoding self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness) self.encoding_block = scn.Sequential() self.encoding_conv = scn.Sequential() module = scn.Sequential() for i in range(num_strides): module = scn.Sequential() for _ in range(reps): block(module, nPlanes[i], nPlanes[i]) self.encoding_block.add(module) module2 = scn.Sequential() if i < num_strides - 1: module2.add( scn.BatchNormLeakyReLU( nPlanes[i], leakiness=leakiness)).add( scn.Convolution(self._dimension, nPlanes[i], nPlanes[i + 1], downsample[0], downsample[1], False)) self.encoding_conv.add(module2) self.encoding = module # Decoding self.decoding_conv, self.decoding_blocks = scn.Sequential( ), scn.Sequential() for i in range(num_strides - 2, -1, -1): module1 = scn.Sequential().add( scn.BatchNormLeakyReLU(nPlanes[i + 1], leakiness=leakiness)).add( scn.Deconvolution( self._dimension, nPlanes[i + 1], nPlanes[i], downsample[0], downsample[1], False)) self.decoding_conv.add(module1) module2 = scn.Sequential() for j in range(reps): block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i]) self.decoding_blocks.add(module2) self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add( scn.OutputLayer(self._dimension))
def __init__(self, cfg, name="uresnet_clustering"): super(UResNet, self).__init__() import sparseconvnet as scn self._model_config = cfg['modules'][name] # Whether to compute ghost mask separately or not self._ghost = self._model_config.get('ghost', False) self._dimension = self._model_config.get('data_dim', 3) reps = self._model_config.get('reps', 2) # Conv block repetition factor kernel_size = self._model_config.get('kernel_size', 2) num_strides = self._model_config.get('num_strides', 5) m = self._model_config.get('filters', 16) # Unet number of features nInputFeatures = self._model_config.get('features', 1) spatial_size = self._model_config.get('spatial_size', 512) num_classes = self._model_config.get('num_classes', 5) self._N = self._model_config.get('num_cluster_conv', 0) self._simpleN = self._model_config.get('simple_conv', True) self._add_coordinates = self._model_config.get('cluster_add_coords', False) self._density_estimate = self._model_config.get( 'density_estimate', False) nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level downsample = [kernel_size, 2] # [filter size, filter stride] self.last = None leakiness = 0 def block(m, a, b): # ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, a, b, 3, False)).add( scn.BatchNormLeakyReLU( b, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, b, b, 3, False)))).add(scn.AddTable()) self.input = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m, 3, False)) # Kernel size 3, no bias self.concat = scn.JoinTable() # Encoding self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness) self.encoding_block = scn.Sequential() self.encoding_conv = scn.Sequential() module = scn.Sequential() for i in range(num_strides): module = scn.Sequential() for _ in range(reps): block(module, nPlanes[i], nPlanes[i]) self.encoding_block.add(module) module2 = scn.Sequential() if i < num_strides - 1: module2.add( scn.BatchNormLeakyReLU( nPlanes[i], leakiness=leakiness)).add( scn.Convolution(self._dimension, nPlanes[i], nPlanes[i + 1], downsample[0], downsample[1], False)) self.encoding_conv.add(module2) self.encoding = module # Decoding self.decoding_conv, self.decoding_blocks = scn.Sequential( ), scn.Sequential() for i in range(num_strides - 2, -1, -1): inFeatures = nPlanes[i + 1] * (2 if (self._N > 0 and i < num_strides - 2) else 1) module1 = scn.Sequential().add( scn.BatchNormLeakyReLU(inFeatures, leakiness=leakiness)).add( scn.Deconvolution(self._dimension, inFeatures, nPlanes[i], downsample[0], downsample[1], False)) self.decoding_conv.add(module1) module2 = scn.Sequential() for j in range(reps): block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i]) self.decoding_blocks.add(module2) # Clustering convolutions if self._N > 0: self.clustering_conv = scn.Sequential() for i in range(num_strides - 2, -1, -1): conv = scn.Sequential() for j in range(self._N): if self._simpleN: conv.add( scn.SubmanifoldConvolution( self._dimension, nPlanes[i] + (4 if j == 0 and self._add_coordinates else 0), nPlanes[i], 3, False)) conv.add( scn.BatchNormLeakyReLU(nPlanes[i], leakiness=leakiness)) else: block( conv, nPlanes[i] + (4 if j == 0 and self._add_coordinates else 0), nPlanes[i]) self.clustering_conv.add(conv) outFeatures = m * (2 if self._N > 0 else 1) self.output = scn.Sequential().add(scn.BatchNormReLU(outFeatures)).add( scn.OutputLayer(self._dimension)) self.linear = torch.nn.Linear(outFeatures, num_classes) if self._density_estimate: self._density_layer = [] for i in range(num_strides - 2, -1, -1): self._density_layer.append(torch.nn.Linear(nPlanes[i], 2)) self._density_layer = torch.nn.Sequential(*self._density_layer)
def __init__(self, cfg, name='ynet_full'): super().__init__(cfg, name) self.model_config = cfg[name] self.num_filters = self.model_config.get('filters', 16) self.seed_dim = self.model_config.get('seed_dim', 1) self.sigma_dim = self.model_config.get('sigma_dim', 1) self.embedding_dim = self.model_config.get('embedding_dim', 3) self.inputKernel = self.model_config.get('input_kernel_size', 3) self.coordConv = self.model_config.get('coordConv', False) # YResNet Configurations # operation for mapping latent secondary features to primary features self.mapping_op = self.model_config.get('mapping_operation', 'pool') assert self.mapping_op in self.supported_mapping_ops # Network Freezing Options self.encoder_freeze = self.model_config.get('encoder_freeze', False) self.embedding_freeze = self.model_config.get('embedding_freeze', False) self.seediness_freeze = self.model_config.get('seediness_freeze', False) # Input Layer Configurations and commonly used scn operations. self.input = scn.Sequential().add( scn.InputLayer(self.dimension, self.spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self.dimension, self.nInputFeatures, \ self.num_filters, self.inputKernel, self.allow_bias)) # Kernel size 3, no bias self.add = scn.AddTable() # Preprocessing logic for secondary self.t_bn = scn.BatchNormLeakyReLU(1, leakiness=self.leakiness) self.netinnet = scn.Sequential() self._resnet_block(self.netinnet, 1, self.num_filters) # Timing information max_seq_len = self.model_config.get('max_seq_len', 5) self.pe = SinusoidalPositionalEncoding(max_seq_len, 1) # Backbone YResNet. Do NOT change namings! self.primary_encoder = YResNetEncoder(cfg, name='yresnet_encoder') self.secondary_encoder = YResNetEncoder(cfg, name='yresnet_encoder') if self.mapping_op == 'conv': self.mapping = ConvolutionalFeatureMapping(self.dimension, self.nPlanes[-1], self.nPlanes[-1], 2, 2, False) elif self.mapping_op == 'pool': self.mapping = PoolFeatureMapping( self.dimension, 2, 2, ) self.seed_net = YResNetDecoder(cfg, name='seediness_decoder') self.cluster_net = YResNetDecoder(cfg, name='embedding_decoder') # Encoder-Decoder 1x1 Connections encoder_planes = [i for i in self.primary_encoder.nPlanes] cluster_planes = [i for i in self.cluster_net.nPlanes] seed_planes = [i for i in self.seed_net.nPlanes] self.skip_mode = self.model_config.get('skip_mode', 'default') self.cluster_skip = scn.Sequential() self.seed_skip = scn.Sequential() # Output Layers self.output_cluster = scn.Sequential() self._nin_block(self.output_cluster, self.cluster_net.num_filters, 4) self.output_cluster.add(scn.OutputLayer(self.dimension)) self.output_seediness = scn.Sequential() self._nin_block(self.output_seediness, self.seed_net.num_filters, 1) self.output_seediness.add(scn.OutputLayer(self.dimension)) if self.skip_mode == 'default': for p1, p2 in zip(encoder_planes, cluster_planes): self.cluster_skip.add(scn.Identity()) for p1, p2 in zip(encoder_planes, seed_planes): self.seed_skip.add(scn.Identity()) elif self.skip_mode == '1x1': for p1, p2 in zip(encoder_planes, cluster_planes): self._nin_block(self.cluster_skip, p1, p2) for p1, p2 in zip(encoder_planes, seed_planes): self._nin_block(self.seed_skip, p1, p2) else: raise ValueError('Invalid skip connection mode!') # Freeze Layers if self.encoder_freeze: for p in self.encoder.parameters(): p.requires_grad = False if self.embedding_freeze: for p in self.cluster_net.parameters(): p.requires_grad = False for p in self.output_cluster.parameters(): p.requires_grad = False if self.seediness_freeze: for p in self.seed_net.parameters(): p.requires_grad = False for p in self.output_seediness.parameters(): p.requires_grad = False # Pytorch Activations self.tanh = nn.Tanh() self.sigmoid = nn.Sigmoid()
def BatchNormLeakyReLU(nPlanes, eps=1e-4, momentum=0.9, leakiness=0.333): return scn.BatchNormLeakyReLU(nPlanes, eps, momentum, leakiness)