Example #1
0
 def v(depth, nPlanes):
     m = scn.Sequential()
     if depth == 1:
         for _ in range(reps):
             res(m, nPlanes, nPlanes, dropout_p)
     else:
         m = scn.Sequential()
         for _ in range(reps):
             res(m, nPlanes, nPlanes, dropout_p)
         if dropout_width:
             m.add(scn.ConcatTable().add(scn.Identity()).add(
                 scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                     #In place of Maxpooling
                     scn.Convolution(
                         dimension, nPlanes, nPlanes, 2, 2,
                         False)).add(scn.Dropout(dropout_p)).add(
                             v(depth - 1, nPlanes)).add(
                                 scn.BatchNormReLU(nPlanes)).add(
                                     scn.Deconvolution(
                                         dimension, nPlanes, nPlanes, 2, 2,
                                         False))))
         else:
             m.add(scn.ConcatTable().add(scn.Identity()).add(
                 scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                     scn.Convolution(dimension, nPlanes, nPlanes, 2, 2,
                                     False)).add(v(depth - 1, nPlanes)).add(
                                         scn.BatchNormReLU(nPlanes)).add(
                                             scn.Deconvolution(
                                                 dimension, nPlanes,
                                                 nPlanes, 2, 2, False))))
         m.add(scn.JoinTable())
         for i in range(reps):
             res(m, 2 * nPlanes if i == 0 else nPlanes, nPlanes, dropout_p)
     return m
Example #2
0
 def block(self, n_in, n_out):
     m = scn.Sequential()
     if self.residual_blocks:  # ResNet style blocks
         m.add(scn.ConcatTable().add(
             scn.Identity() if n_in ==
             n_out else scn.NetworkInNetwork(n_in, n_out, False)).add(
                 scn.Sequential().add(
                     scn.BatchNormLeakyReLU(
                         n_in, leakiness=self.leakiness)).add(
                             scn.SubmanifoldConvolution(
                                 self.dimension, n_in, n_out, 3,
                                 False)).add(
                                     scn.BatchNormLeakyReLU(
                                         n_out,
                                         leakiness=self.leakiness)).add(
                                             scn.SubmanifoldConvolution(
                                                 self.dimension, n_out,
                                                 n_out, 3, False))))
         m.add(scn.AddTable())
     else:  # VGG style blocks
         m.add(scn.BatchNormLeakyReLU(n_in, leakiness=self.leakiness))
         m.add(
             scn.SubmanifoldConvolution(self.dimension, n_in, n_out, 3,
                                        False))
     return m
Example #3
0
 def __init__(self, nf_in, nf, input_sparsetensor, return_sparsetensor,
              max_data_size):
     nn.Module.__init__(self)
     data_dim = 3
     self.nf_in = nf_in
     self.nf = nf
     self.input_sparsetensor = input_sparsetensor
     self.return_sparsetensor = return_sparsetensor
     self.max_data_size = max_data_size
     if not self.input_sparsetensor:
         self.p0 = scn.InputLayer(data_dim, self.max_data_size, mode=0)
     self.p1 = scn.SubmanifoldConvolution(data_dim,
                                          nf_in,
                                          nf,
                                          filter_size=FSIZE0,
                                          bias=False)
     self.p2 = scn.Sequential()
     self.p2.add(scn.ConcatTable().add(scn.Identity()).add(
         scn.Sequential().add(scn.BatchNormReLU(nf)).add(
             scn.SubmanifoldConvolution(
                 data_dim, nf, nf, FSIZE0,
                 False)).add(scn.BatchNormReLU(nf)).add(
                     scn.SubmanifoldConvolution(data_dim, nf, nf, FSIZE0,
                                                False)))).add(
                                                    scn.AddTable())
     self.p2.add(scn.BatchNormReLU(nf))
     # downsample space by factor of 2
     self.p3 = scn.Sequential().add(
         scn.Convolution(data_dim, nf, nf, FSIZE1, 2, False))
     self.p3.add(scn.BatchNormReLU(nf))
     if not self.return_sparsetensor:
         self.p4 = scn.SparseToDense(data_dim, nf)
Example #4
0
    def _make_transpose(self, transblock, planes, blocks, stride=1):

        upsample = None
        if stride != 1:
            upsample = scn.Sequential(
                scn.SparseToDense(2,self.inplanes * transblock.expansion),
                nn.ConvTranspose2d(self.inplanes * transblock.expansion, planes,
                                  kernel_size=2, stride=stride, padding=0, bias=False),
                scn.DenseToSparse(2),
                scn.BatchNormalization(planes)                
            )            
        elif self.inplanes * transblock.expansion != planes:
            upsample = scn.Sequential(
                scn.NetworkInNetwork(self.inplanes * transblock.expansion, planes, False),
                scn.BatchNormalization(planes)
            )

        layers = []
        
        for i in range(1, blocks):
            layers.append(transblock(self.inplanes, self.inplanes * transblock.expansion))

        layers.append(transblock(self.inplanes, planes, stride, upsample))
        self.inplanes = planes // transblock.expansion

        return scn.Sequential(*layers)
Example #5
0
    def block(self,
              m,
              a,
              b,
              dimension=3,
              residual_blocks=False,
              leakiness=0,
              kernel_size=3,
              use_batch_norm=True):  # default using residual_block
        if use_batch_norm:
            Activation = lambda channels: scn.BatchNormLeakyReLU(
                channels, leakiness=leakiness)
        else:
            Activation = lambda channels: scn.LeakyReLU(leakiness)

        if residual_blocks:  #ResNet style blocks
            m.add(scn.ConcatTable().add(scn.Identity(
            ) if a == b else scn.NetworkInNetwork(a, b, False)).add(
                scn.Sequential().add(Activation(a)).add(
                    scn.SubmanifoldConvolution(dimension, a, b, kernel_size,
                                               False)).add(Activation(b)).add(
                                                   scn.SubmanifoldConvolution(
                                                       dimension, b, b,
                                                       kernel_size,
                                                       False)))).add(
                                                           scn.AddTable())
        else:  #VGG style blocks
            m.add(scn.Sequential().add(Activation(a)).add(
                scn.SubmanifoldConvolution(dimension, a, b, kernel_size,
                                           False)))
Example #6
0
 def block(self, nPlanes, n, reps, stride):
     m = scn.Sequential()
     for rep in range(reps):
         if rep == 0:
             m.add(scn.BatchNormReLU(nPlanes))
             m.add(scn.ConcatTable().add(self.residual(
                 nPlanes, n, stride)).add(scn.Sequential().add(
                     scn.SubmanifoldConvolution(self.dimension, nPlanes, n,
                                                3, False) if stride ==
                     1 else scn.Convolution(
                         self.dimension, nPlanes, n, 2, stride, False)).add(
                             scn.BatchNormReLU(n)).add(
                                 scn.SubmanifoldConvolution(
                                     self.dimension, n, n, 3, False))))
         else:
             m.add(scn.ConcatTable().add(scn.Sequential().add(
                 scn.BatchNormReLU(nPlanes)).add(
                     scn.SubmanifoldConvolution(
                         self.dimension, nPlanes, n, 3,
                         False)).add(scn.BatchNormReLU(n)).add(
                             scn.SubmanifoldConvolution(
                                 self.dimension, n, n, 3,
                                 False))).add(scn.Identity()))
         m.add(scn.AddTable())
         nPlanes = n
     return m
Example #7
0
 def __init__(self, sgc_config):
     nn.Module.__init__(self)
     self.stage1 = scn.Sequential().add(
        scn.ValidConvolution(3, 1, 16, 3, False))
     self.stage1_2 = scn.MaxPooling(3, 2, 2)
     self.stage2 = scn.Sequential()
     res(self.stage2, 3, 16, 64)
     self.stage2_2 = scn.MaxPooling(3, 2, 2)
     self.stage3 = UNet6(3, nClasses, sgc_config=sgc_config)
     self.densePred = scn.SparseToDense(3, nClasses)
     self.sgc_config = sgc_config
Example #8
0
    def __init__(self):
        super(CNN, self).__init__()
        ###############################
        # Hardcoded settings
        ###############################
        self._dimension = 3
        reps = 2
        kernel_size = 2
        num_strides = 7
        init_num_features = 8
        nInputFeatures = 1
        spatial_size = 128  #padding the rest for 169 PMTs
        num_classes = 2  # good versus ghost

        nPlanes = [(2**i) * init_num_features for i in range(0, num_strides)
                   ]  # every layer double the number of features
        downsample = [kernel_size, 2]
        leakiness = 0

        #################################
        # Input layer
        #################################
        self.input = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(self._dimension, nInputFeatures,
                                           init_num_features, 3,
                                           False))  # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        #################################
        # Encode layers
        #################################\
        self.encoding_conv = scn.Sequential()
        for i in range(num_strides):
            if i < 4:  #hardcoded
                self.encoding_conv.add(
                    scn.BatchNormLeakyReLU(
                        nPlanes[i], leakiness=leakiness)).add(
                            scn.Convolution(self._dimension, nPlanes[i],
                                            nPlanes[i + 1], downsample[0],
                                            downsample[1], False))
            elif i < num_strides - 1:
                self.encoding_conv.add(scn.MaxPooling(self._dimension, 2, 2))

        self.output = scn.Sequential().add(
            scn.SparseToDense(self._dimension, nPlanes[-1]))
        ###################################
        # Final linear layer
        ###################################
        self.deepest_layer_num_features = int(
            nPlanes[-1] * np.power(spatial_size / (2**(num_strides - 1)), 3.))
        self.classifier = torch.nn.Sequential(
            torch.nn.ReLU(),
            torch.nn.Linear(self.deepest_layer_num_features, 2),
        )
Example #9
0
 def baz(depth, nPlanes):
     if depth == 1:
         return scn.Sequential().add(foo(nPlanes)).add(bar(nPlanes, True))
     else:
         return scn.Sequential().add(foo(nPlanes)).add(scn.ConcatTable().add(bar(nPlanes,False)).add(
             scn.Sequential()\
                 .add(scn.BatchNormReLU(nPlanes))\
                 .add(scn.Convolution(dimension, nPlanes, l(nPlanes), 2, 2, False))\
                 .add(baz(depth-1,l(nPlanes)))\
                 .add(scn.UnPooling(dimension, 2, 2))
         )).add(scn.AddTable())
Example #10
0
 def decoder_block(self, nPlanes, n, reps, stride):
     m = scn.Sequential()
     for rep in range(reps):
         m.add(scn.ConcatTable().add(
             scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                 scn.SubmanifoldConvolution(self.dimension, nPlanes, n, 3,
                                            False))
             # .add(scn.BatchNormReLU(n))
             # .add(scn.SubmanifoldConvolution(dimension, n, n, 3, False))
         ).add(scn.Identity()))
         m.add(scn.AddTable())
         nPlanes = n
     return m
Example #11
0
    def iter_unet(self, n_input_planes):
        # different from scn implementation, which is a recursive function
        enc_convs = scn.Sequential()
        dec_convs = scn.Sequential()
        for n_planes_in, n_planes_out in zip(self.n_planes[:-1],
                                             self.n_planes[1:]):
            # encode
            conv1x1 = scn.Sequential()
            for i in range(self.block_reps):
                conv1x1.add(
                    self.block(
                        n_input_planes
                        if n_input_planes != -1 else n_planes_in, n_planes_in))
                n_input_planes = -1

            conv = scn.Sequential()
            conv.add(
                scn.BatchNormLeakyReLU(n_planes_in, leakiness=self.leakiness))
            conv.add(
                scn.Convolution(self.dimension, n_planes_in, n_planes_out,
                                self.downsample[0], self.downsample[1], False))
            enc_conv = scn.Sequential()
            enc_conv.add(conv1x1)
            enc_conv.add(conv)
            enc_convs.add(enc_conv)

            # decode(corresponding stage of encode; symmetric with U)
            b_join = scn.Sequential()  # before_join
            b_join.add(
                scn.BatchNormLeakyReLU(n_planes_out, leakiness=self.leakiness))
            b_join.add(
                scn.Deconvolution(self.dimension, n_planes_out, n_planes_in,
                                  self.downsample[0], self.downsample[1],
                                  False))
            join_table = scn.JoinTable()
            a_join = scn.Sequential()  # after_join
            for i in range(self.block_reps):
                a_join.add(
                    self.block(n_planes_in * (2 if i == 0 else 1),
                               n_planes_in))
            dec_conv = scn.Sequential()
            dec_conv.add(b_join)
            dec_conv.add(join_table)
            dec_conv.add(a_join)
            dec_convs.add(dec_conv)

        middle_conv = scn.Sequential()
        for i in range(self.block_reps):
            middle_conv.add(
                self.block(
                    n_input_planes if n_input_planes != -1 else
                    self.n_planes[-1], self.n_planes[-1]))
            n_input_planes = -1

        return enc_convs, middle_conv, dec_convs
Example #12
0
 def block(self, m, a, b, dimension=3, residual_blocks=False, leakiness=0):  # default using residual_block
     if residual_blocks: #ResNet style blocks
         m.add(scn.ConcatTable()
               .add(scn.Identity() if a == b else scn.NetworkInNetwork(a, b, False))
               .add(scn.Sequential()
                 .add(scn.BatchNormLeakyReLU(a,leakiness=leakiness))
                 .add(scn.SubmanifoldConvolution(dimension, a, b, 3, False))
                 .add(scn.BatchNormLeakyReLU(b,leakiness=leakiness))
                 .add(scn.SubmanifoldConvolution(dimension, b, b, 3, False)))
          ).add(scn.AddTable())
     else: #VGG style blocks
         m.add(scn.Sequential()
              .add(scn.BatchNormLeakyReLU(a,leakiness=leakiness))
              .add(scn.SubmanifoldConvolution(dimension, a, b, 3, False)))
 def baz(nPlanes):
     m=scn.Sequential()
     foo(m,nPlanes[0])
     if len(nPlanes)==1:
         bar(m,nPlanes[0],True)
     else:
         a=scn.Sequential()
         bar(a,nPlanes,False)
         b=scn.Sequential(
             scn.BatchNormLeakyReLU(nPlanes,leakiness=leakiness),
             scn.Convolution(dimension, nPlanes[0], nPlanes[1], downsample[0], downsample[1], False),
             baz(nPlanes[1:]),
             scn.UnPooling(dimension, downsample[0], downsample[1]))
         m.add(ConcatTable(a,b))
         m.add(scn.AddTable())
Example #14
0
 def __init__(self,
              is_3d,
              num_strides=3,
              base_num_outputs=16,
              num_classes=3,
              spatialSize=192):
     nn.Module.__init__(self)
     dimension = 3 if is_3d else 2
     reps = 2  # Conv block repetition factor
     kernel_size = 2  # Use input_spatial_size method for other values?
     m = base_num_outputs  # Unet number of features
     nPlanes = [i * m for i in range(1, num_strides + 1)
                ]  # UNet number of features per level
     # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level
     nInputFeatures = 1
     self.sparseModel = scn.Sequential().add(
         scn.InputLayer(dimension, spatialSize, mode=3)).add(
             scn.SubmanifoldConvolution(
                 dimension, nInputFeatures, m, 3,
                 False)).add(  # Kernel size 3, no bias
                     UNet(dimension,
                          reps,
                          nPlanes,
                          residual_blocks=True,
                          downsample=[kernel_size, 2])
                 ).add(  # downsample = [filter size, filter stride]
                     scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension))
     self.linear = nn.Linear(m, num_classes)
 def __init__(self, flags):
     import sparseconvnet as scn
     super(UResNet, self).__init__()
     self._flags = flags
     dimension = flags.DATA_DIM
     reps = 2  # Conv block repetition factor
     kernel_size = 2  # Use input_spatial_size method for other values?
     m = flags.URESNET_FILTERS  # Unet number of features
     nPlanes = [i * m for i in range(1, flags.URESNET_NUM_STRIDES + 1)
                ]  # UNet number of features per level
     # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level
     nInputFeatures = 1
     self.sparseModel = scn.Sequential().add(
         scn.InputLayer(dimension, flags.SPATIAL_SIZE, mode=3)).add(
             scn.SubmanifoldConvolution(
                 dimension, nInputFeatures, m, 3,
                 False)).add(  # Kernel size 3, no bias
                     scn.UNet(dimension,
                              reps,
                              nPlanes,
                              residual_blocks=True,
                              downsample=[kernel_size, 2])
                 ).add(  # downsample = [filter size, filter stride]
                     scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension))
     self.linear = torch.nn.Linear(m, flags.NUM_CLASS)
Example #16
0
    def __init__(self, cfg):
        import sparseconvnet as scn
        super(UResNet, self).__init__()
        self._model_config = cfg['modules']['uresnet']

        self._dimension = self._model_config.get('data_dim', 3)
        num_strides = self._model_config.get('num_strides', 5)
        spatial_size = self._model_config.get('spatial_size', 512)
        num_classes = self._model_config.get('num_classes', 5)
        m = self._model_config.get('filters', 16)  # Unet number of features
        nInputFeatures = self._model_config.get('features', 1)

        reps = 2  # Conv block repetition factor
        kernel_size = 2  # Use input_spatial_size method for other values?
        nPlanes = [i * m for i in range(1, num_strides + 1)
                   ]  # UNet number of features per level
        # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level

        self.sparseModel = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(
                    self._dimension, nInputFeatures, m, 3,
                    False)).add(  # Kernel size 3, no bias
                        scn.UNet(self._dimension,
                                 reps,
                                 nPlanes,
                                 residual_blocks=True,
                                 downsample=[kernel_size, 2])
                    ).add(  # downsample = [filter size, filter stride]
                        scn.BatchNormReLU(m)).add(
                            scn.OutputLayer(self._dimension))
        self.linear = torch.nn.Linear(m, num_classes)