コード例 #1
0
ファイル: torch_model.py プロジェクト: sailfish009/molmimic
 def v(depth, nPlanes):
     m = scn.Sequential()
     if depth == 1:
         for _ in range(reps):
             res(m, nPlanes, nPlanes, dropout_p)
     else:
         m = scn.Sequential()
         for _ in range(reps):
             res(m, nPlanes, nPlanes, dropout_p)
         if dropout_width:
             m.add(scn.ConcatTable().add(scn.Identity()).add(
                 scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                     #In place of Maxpooling
                     scn.Convolution(
                         dimension, nPlanes, nPlanes, 2, 2,
                         False)).add(scn.Dropout(dropout_p)).add(
                             v(depth - 1, nPlanes)).add(
                                 scn.BatchNormReLU(nPlanes)).add(
                                     scn.Deconvolution(
                                         dimension, nPlanes, nPlanes, 2, 2,
                                         False))))
         else:
             m.add(scn.ConcatTable().add(scn.Identity()).add(
                 scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                     scn.Convolution(dimension, nPlanes, nPlanes, 2, 2,
                                     False)).add(v(depth - 1, nPlanes)).add(
                                         scn.BatchNormReLU(nPlanes)).add(
                                             scn.Deconvolution(
                                                 dimension, nPlanes,
                                                 nPlanes, 2, 2, False))))
         m.add(scn.JoinTable())
         for i in range(reps):
             res(m, 2 * nPlanes if i == 0 else nPlanes, nPlanes, dropout_p)
     return m
コード例 #2
0
    def __init__(self, inplanes, outplanes, bias, batch_norm):
        nn.Module.__init__(self)

        self.conv1 = scn.SubmanifoldConvolution(dimension=3,
                                                nIn=inplanes,
                                                nOut=outplanes,
                                                filter_size=3,
                                                bias=bias)

        if batch_norm:
            self.activation1 = scn.BatchNormReLU(outplanes, momentum=0.5)
        else:
            self.activation1 = scn.ReLU()

        self.conv2 = scn.SubmanifoldConvolution(dimension=3,
                                                nIn=outplanes,
                                                nOut=outplanes,
                                                filter_size=3,
                                                bias=bias)

        if batch_norm:
            self.activation2 = scn.BatchNormReLU(outplanes, momentum=0.5)
        else:
            self.activation2 = scn.ReLU()

        self.residual = scn.Identity()

        self.add = scn.AddTable()
コード例 #3
0
 def block(self, nPlanes, n, reps, stride):
     m = scn.Sequential()
     for rep in range(reps):
         if rep == 0:
             m.add(scn.BatchNormReLU(nPlanes))
             m.add(scn.ConcatTable().add(self.residual(
                 nPlanes, n, stride)).add(scn.Sequential().add(
                     scn.SubmanifoldConvolution(self.dimension, nPlanes, n,
                                                3, False) if stride ==
                     1 else scn.Convolution(
                         self.dimension, nPlanes, n, 2, stride, False)).add(
                             scn.BatchNormReLU(n)).add(
                                 scn.SubmanifoldConvolution(
                                     self.dimension, n, n, 3, False))))
         else:
             m.add(scn.ConcatTable().add(scn.Sequential().add(
                 scn.BatchNormReLU(nPlanes)).add(
                     scn.SubmanifoldConvolution(
                         self.dimension, nPlanes, n, 3,
                         False)).add(scn.BatchNormReLU(n)).add(
                             scn.SubmanifoldConvolution(
                                 self.dimension, n, n, 3,
                                 False))).add(scn.Identity()))
         m.add(scn.AddTable())
         nPlanes = n
     return m
コード例 #4
0
 def __init__(self, nf_in, nf, input_sparsetensor, return_sparsetensor,
              max_data_size):
     nn.Module.__init__(self)
     data_dim = 3
     self.nf_in = nf_in
     self.nf = nf
     self.input_sparsetensor = input_sparsetensor
     self.return_sparsetensor = return_sparsetensor
     self.max_data_size = max_data_size
     if not self.input_sparsetensor:
         self.p0 = scn.InputLayer(data_dim, self.max_data_size, mode=0)
     self.p1 = scn.SubmanifoldConvolution(data_dim,
                                          nf_in,
                                          nf,
                                          filter_size=FSIZE0,
                                          bias=False)
     self.p2 = scn.Sequential()
     self.p2.add(scn.ConcatTable().add(scn.Identity()).add(
         scn.Sequential().add(scn.BatchNormReLU(nf)).add(
             scn.SubmanifoldConvolution(
                 data_dim, nf, nf, FSIZE0,
                 False)).add(scn.BatchNormReLU(nf)).add(
                     scn.SubmanifoldConvolution(data_dim, nf, nf, FSIZE0,
                                                False)))).add(
                                                    scn.AddTable())
     self.p2.add(scn.BatchNormReLU(nf))
     # downsample space by factor of 2
     self.p3 = scn.Sequential().add(
         scn.Convolution(data_dim, nf, nf, FSIZE1, 2, False))
     self.p3.add(scn.BatchNormReLU(nf))
     if not self.return_sparsetensor:
         self.p4 = scn.SparseToDense(data_dim, nf)
コード例 #5
0
    def __init__(self,
                 output_shape,
                 use_norm=True,
                 num_input_features=128,
                 num_filters_down1=[64],
                 num_filters_down2=[64, 64],
                 name='SparseMiddleExtractor'):
        super(SparseMiddleExtractor, self).__init__()
        self.name = name
        if use_norm:
            BatchNorm1d = change_default_args(
                eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
            Linear = change_default_args(bias=False)(nn.Linear)
        else:
            BatchNorm1d = Empty
            Linear = change_default_args(bias=True)(nn.Linear)
        sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
        # sparse_shape[0] = 11
        print(sparse_shape)
        self.scn_input = scn.InputLayer(3, sparse_shape.tolist())
        self.voxel_output_shape = output_shape
        middle_layers = []

        num_filters = [num_input_features] + num_filters_down1
        # num_filters = [64] + num_filters_down1
        filters_pairs_d1 = [[num_filters[i], num_filters[i + 1]]
                            for i in range(len(num_filters) - 1)]

        for i, o in filters_pairs_d1:
            middle_layers.append(scn.SubmanifoldConvolution(3, i, o, 3, False))
            middle_layers.append(scn.BatchNormReLU(o, eps=1e-3, momentum=0.99))
        middle_layers.append(
            scn.Convolution(
                3,
                num_filters[-1],
                num_filters[-1], (3, 1, 1), (2, 1, 1),
                bias=False))
        middle_layers.append(
            scn.BatchNormReLU(num_filters[-1], eps=1e-3, momentum=0.99))
        # assert len(num_filters_down2) > 0
        if len(num_filters_down1) == 0:
            num_filters = [num_filters[-1]] + num_filters_down2
        else:
            num_filters = [num_filters_down1[-1]] + num_filters_down2
        filters_pairs_d2 = [[num_filters[i], num_filters[i + 1]]
                            for i in range(len(num_filters) - 1)]
        for i, o in filters_pairs_d2:
            middle_layers.append(scn.SubmanifoldConvolution(3, i, o, 3, False))
            middle_layers.append(scn.BatchNormReLU(o, eps=1e-3, momentum=0.99))
        middle_layers.append(
            scn.Convolution(
                3,
                num_filters[-1],
                num_filters[-1], (3, 1, 1), (2, 1, 1),
                bias=False))
        middle_layers.append(
            scn.BatchNormReLU(num_filters[-1], eps=1e-3, momentum=0.99))
        middle_layers.append(scn.SparseToDense(3, num_filters[-1]))
        self.middle_conv = Sequential(*middle_layers)
コード例 #6
0
ファイル: sparseinfill.py プロジェクト: NuTufts/sparse_infill
    def __init__(self, inputshape, reps, nin_features, nout_features, nplanes,
                 show_sizes):
        nn.Module.__init__(self)
        """
        inputs
        ------
        inputshape [list of int]: dimensions of the matrix or image
        reps [int]: number of residual modules per layer (for both encoder and decoder)
        nin_features [int]: number of features in the first convolutional layer
        nout_features [int]: number of features that feed into the regression layer
        nPlanes [int]: the depth of the U-Net
        show_sizes [bool]: if True, print sizes while running forward
        """
        self._mode = 0
        self._dimension = 2
        self._inputshape = inputshape
        if len(self._inputshape) != self._dimension:
            raise ValueError(
                "expected inputshape to contain size of 2 dimensions only." +
                "given %d values" % (len(self._inputshape)))
        self._reps = reps
        self._nin_features = nin_features
        self._nout_features = nout_features
        self._nplanes = [
            nin_features, 2 * nin_features, 3 * nin_features, 4 * nin_features,
            5 * nin_features
        ]
        self._show_sizes = show_sizes

        self.sparseModel = scn.Sequential().add(
            scn.InputLayer(
                self._dimension, self._inputshape, mode=self._mode)).add(
                    scn.SubmanifoldConvolution(
                        self._dimension, 1, self._nin_features, 3, False)).add(
                            scn.UNet(
                                self._dimension,
                                self._reps,
                                self._nplanes,
                                residual_blocks=True,
                                downsample=[2, 2])).add(
                                    scn.BatchNormReLU(self._nin_features)).add(
                                        scn.OutputLayer(self._dimension))

        self.input = scn.InputLayer(self._dimension,
                                    self._inputshape,
                                    mode=self._mode)
        self.conv1 = scn.SubmanifoldConvolution(self._dimension, 1,
                                                self._nin_features, 3, False)
        self.unet = scn.UNet(self._dimension,
                             self._reps,
                             self._nplanes,
                             residual_blocks=True,
                             downsample=[2, 2])
        self.batchnorm = scn.BatchNormReLU(self._nin_features)
        self.output = scn.OutputLayer(self._dimension)
        self.conv2 = scn.SubmanifoldConvolution(self._dimension,
                                                self._nin_features, 1, 3,
                                                False)
コード例 #7
0
 def __init__(self, inplanes, kernel, dim=3):
     torch.nn.Module.__init__(self)
     self.bnr1 = scn.BatchNormReLU(inplanes)
     self.subconv1 = scn.SubmanifoldConvolution(dim, inplanes, inplanes,
                                                kernel, 0)
     self.bnr2 = scn.BatchNormReLU(inplanes)
     self.subconv2 = scn.SubmanifoldConvolution(dim, inplanes, inplanes,
                                                kernel, 0)
     self.add = scn.AddTable()
コード例 #8
0
def res(m, dimension, a, b):
    m.add(scn.ConcatTable()
          .add(scn.Identity() if a == b else scn.NetworkInNetwork(a, b, False))
          .add(scn.Sequential()
               .add(scn.BatchNormReLU(a))
               .add(scn.SubmanifoldConvolution(dimension, a, b, 3, False))
               .add(scn.BatchNormReLU(b))
               .add(scn.SubmanifoldConvolution(dimension, b, b, 3, False))))\
     .add(scn.AddTable())
コード例 #9
0
def SparseResNet(dimension, nInputPlanes, layers):
    import sparseconvnet as scn
    """
    pre-activated ResNet
    e.g. layers = {{'basic',16,2,1},{'basic',32,2}}
    """
    nPlanes = nInputPlanes
    m = scn.Sequential()

    def residual(nIn, nOut, stride):
        if stride > 1:
            return scn.Convolution(dimension, nIn, nOut, 2, stride, False)
        elif nIn != nOut:
            return scn.NetworkInNetwork(nIn, nOut, False)
        else:
            return scn.Identity()

    for n, reps, stride in layers:
        for rep in range(reps):
            if rep == 0:
                m.add(scn.BatchNormReLU(nPlanes))
                tab = scn.ConcatTable()
                tab_seq = scn.Sequential()
                if stride == 1:
                    tab_seq.add(
                        scn.SubmanifoldConvolution(dimension, nPlanes, n, 3,
                                                   False))
                else:
                    tab_seq.add(
                        scn.Convolution(dimension, nPlanes, n, 2, stride,
                                        False))
                tab_seq.add(scn.BatchNormReLU(n))
                tab_seq.add(
                    scn.SubmanifoldConvolution(dimension, n, n, 3, False))
                tab.add(tab_seq)
                tab.add(residual(nPlanes, n, stride))
                m.add(tab)
            else:
                tab = scn.ConcatTable()
                tab_seq = scn.Sequential()
                tab_seq.add(scn.BatchNormReLU(nPlanes))
                tab_seq.add(
                    scn.SubmanifoldConvolution(dimension, nPlanes, n, 3,
                                               False))
                tab_seq.add(scn.BatchNormReLU(n))
                tab_seq.add(
                    scn.SubmanifoldConvolution(dimension, n, n, 3, False))
                tab.add(tab_seq)
                tab.add(scn.Identity())
                m.add(tab)
            nPlanes = n
            m.add(scn.AddTable())
    m.add(scn.BatchNormReLU(nPlanes))
    return m
コード例 #10
0
 def block(m, a, b):
     if residual_blocks:  #ResNet style blocks
         m.add(scn.ConcatTable().add(scn.Identity(
         ) if a == b else scn.NetworkInNetwork(a, b, False)).add(
             scn.Sequential().add(scn.BatchNormReLU(a)).add(
                 scn.SubmanifoldConvolution(dimension, a, b, 3, False)).add(
                     scn.BatchNormReLU(b)).add(
                         scn.SubmanifoldConvolution(dimension, b, b, 3,
                                                    False)))).add(
                                                        scn.AddTable())
     else:  #VGG style blocks
         m.add(scn.Sequential().add(scn.BatchNormReLU(a)).add(
             scn.SubmanifoldConvolution(dimension, a, b, 3, False)))
コード例 #11
0
def conv_op3d(kernel, n_filters, sparse=False):

    if sparse:
        m = torch.nn.ModuleList()
        m.append(
            scn.SubmanifoldConvolution(dimension=3,
                                       nIn=n_filters,
                                       nOut=n_filters,
                                       filter_size=kernel,
                                       bias=True))

        m.append(scn.BatchNormReLU(n_filters))
        return m
    else:

        m = torch.nn.ModuleList()
        m.append(
            torch.nn.Conv3d(in_channels=n_filters,
                            out_channels=n_filters,
                            kernel_size=kernel,
                            bias=True))

        m.append(torch.nn.BatchNorm3d(n_filters))

        m.append(torch.nn.ReLU())

        return m
コード例 #12
0
 def __init__(self, flags):
     import sparseconvnet as scn
     super(UResNet, self).__init__()
     self._flags = flags
     dimension = flags.DATA_DIM
     reps = 2  # Conv block repetition factor
     kernel_size = 2  # Use input_spatial_size method for other values?
     m = flags.URESNET_FILTERS  # Unet number of features
     nPlanes = [i * m for i in range(1, flags.URESNET_NUM_STRIDES + 1)
                ]  # UNet number of features per level
     # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level
     nInputFeatures = 1
     self.sparseModel = scn.Sequential().add(
         scn.InputLayer(dimension, flags.SPATIAL_SIZE, mode=3)).add(
             scn.SubmanifoldConvolution(
                 dimension, nInputFeatures, m, 3,
                 False)).add(  # Kernel size 3, no bias
                     scn.UNet(dimension,
                              reps,
                              nPlanes,
                              residual_blocks=True,
                              downsample=[kernel_size, 2])
                 ).add(  # downsample = [filter size, filter stride]
                     scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension))
     self.linear = torch.nn.Linear(m, flags.NUM_CLASS)
コード例 #13
0
    def _make_skip_layer(self, inplanes, planes):

        layers = scn.Sequential(
            scn.NetworkInNetwork(inplanes, planes, False),
            scn.BatchNormReLU(planes)
        )
        return layers
コード例 #14
0
ファイル: models.py プロジェクト: yueqiw/deep-neuro-morpho
 def __init__(self, n_classes):
     nn.Module.__init__(self)
     self.n_classes = n_classes
     self.sparseModel = scn.Sequential(
         # 255x255
         scn.SubmanifoldConvolution(
             2, 2, 16, 3, False),  # dimension, nIn, nOut, filter_size, bias
         scn.MaxPooling(2, 3, 2),  # dimension, pool_size, pool_stride
         # 127x127
         scn.SparseResNet(
             2,
             16,
             [  # dimension, nInputPlanes, layers
                 ['b', 16, 2, 1],  # 63x63  # blockType, n, reps, stride
                 ['b', 32, 2, 2],  # 63x63
                 ['b', 48, 2, 2],  # 31x31
                 ['b', 96, 2, 2],  # 15x15 
                 ['b', 144, 2, 2],  # 7x7
                 ['b', 192, 2, 2]
             ]),  # 3x3
         scn.Convolution(
             2, 192, 256, 3, 1, False
         ),  # 1x1 # dimension, nIn, nOut, filter_size, filter_stride, bias
         scn.BatchNormReLU(256))  # dimension, nPlanes
     self.sparse_to_dense = scn.SparseToDense(2, 256)
     #self.spatial_size= self.sc.input_spatial_size(torch.LongTensor([1, 1]))
     self.spatial_size = self.sparseModel.input_spatial_size(
         torch.LongTensor([1, 1]))
     self.inputLayer = scn.InputLayer(2, self.spatial_size,
                                      2)  # dimension, spatial_size, mode
     self.linear = nn.Linear(256, self.n_classes)
     print(self.spatial_size)
コード例 #15
0
    def __init__(self, nr_classes):
        super(FBSparseVGGTest, self).__init__()
        self.sparseModel = scn.SparseVggNet(
            2,
            nInputPlanes=2,
            layers=[['C', 16], ['C', 16], 'MP', ['C', 32], ['C', 32], 'MP',
                    ['C', 64], ['C', 64], 'MP', ['C', 128], ['C', 128], 'MP',
                    ['C', 256], ['C', 256], 'MP', ['C', 512]]).add(
                        scn.Convolution(2,
                                        512,
                                        256,
                                        3,
                                        filter_stride=2,
                                        bias=False)).add(
                                            scn.BatchNormReLU(256)).add(
                                                scn.SparseToDense(2, 256))

        cnn_spatial_output_size = [2, 3]
        self.spatial_size = self.sparseModel.input_spatial_size(
            torch.LongTensor(cnn_spatial_output_size))
        self.inputLayer = scn.InputLayer(dimension=2,
                                         spatial_size=self.spatial_size,
                                         mode=2)
        self.linear_input_features = cnn_spatial_output_size[
            0] * cnn_spatial_output_size[1] * 256
        self.linear = nn.Linear(self.linear_input_features, nr_classes)
コード例 #16
0
    def __init__(self, inplanes, outplanes, batch_norm, leaky_relu):
        nn.Module.__init__(self)

        self.batch_norm = batch_norm
        self.leaky_relu = leaky_relu

        self.conv1 = scn.SubmanifoldConvolution(dimension=3,
            nIn         = inplanes,
            nOut        = outplanes,
            filter_size = 3,
            bias=False)

        if self.batch_norm:
            if self.leaky_relu: self.bn1 = scn.BatchNormLeakyReLU(outplanes)
            else:                self.bn1 = scn.BatchNormReLU(outplanes)

        self.conv2 = scn.SubmanifoldConvolution(dimension=3,
            nIn         = outplanes,
            nOut        = outplanes,
            filter_size = 3,
            bias        = False)

        if self.batch_norm:
            self.bn2 = scn.BatchNormalization(outplanes)

        self.residual = scn.Identity()

        if self.leaky_relu: self.relu = scn.LeakyReLU()
        else:                self.relu = scn.ReLU()

        self.add = scn.AddTable()
コード例 #17
0
 def __init__(self,
              is_3d,
              num_strides=3,
              base_num_outputs=16,
              num_classes=3,
              spatialSize=192):
     nn.Module.__init__(self)
     dimension = 3 if is_3d else 2
     reps = 2  # Conv block repetition factor
     kernel_size = 2  # Use input_spatial_size method for other values?
     m = base_num_outputs  # Unet number of features
     nPlanes = [i * m for i in range(1, num_strides + 1)
                ]  # UNet number of features per level
     # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level
     nInputFeatures = 1
     self.sparseModel = scn.Sequential().add(
         scn.InputLayer(dimension, spatialSize, mode=3)).add(
             scn.SubmanifoldConvolution(
                 dimension, nInputFeatures, m, 3,
                 False)).add(  # Kernel size 3, no bias
                     UNet(dimension,
                          reps,
                          nPlanes,
                          residual_blocks=True,
                          downsample=[kernel_size, 2])
                 ).add(  # downsample = [filter size, filter stride]
                     scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension))
     self.linear = nn.Linear(m, num_classes)
コード例 #18
0
 def __init__(self):
     super(get_model, self).__init__()
     self.part_num = 3
     self.resolution = 150
     self.dimension = 3
     self.reps = 2  #Conv block repetition factor
     self.m = 32  #Unet number of features
     self.nPlanes = [
         self.m, 2 * self.m, 3 * self.m, 4 * self.m, 5 * self.m
     ]  #UNet number of features per level
     self.sparseModel = scn.Sequential().add(
         scn.InputLayer(
             self.dimension,
             torch.LongTensor([self.resolution * 8 + 15] * 3),
             mode=3)).add(
                 scn.SubmanifoldConvolution(
                     self.dimension, 1, self.m, 3, False)).add(
                         scn.FullyConvolutionalNet(
                             self.dimension,
                             self.reps,
                             self.nPlanes,
                             residual_blocks=False,
                             downsample=[3, 2])).add(
                                 scn.BatchNormReLU(sum(self.nPlanes))).add(
                                     scn.OutputLayer(self.dimension))
     self.nc = 64
     self.linear = nn.Linear(sum(self.nPlanes), self.nc)
     self.convs1 = torch.nn.Conv1d(self.nc * 3, 128, 1)
     self.convs2 = torch.nn.Conv1d(128, 64, 1)
     self.convs3 = torch.nn.Conv1d(64, self.part_num, 1)
     self.bns1 = nn.BatchNorm1d(128)
     self.bns2 = nn.BatchNorm1d(64)
コード例 #19
0
ファイル: reproduce.py プロジェクト: junzeliu/SparseConvNet
 def __init__(self):
     nn.Module.__init__(self)
     self.sparseModel = scn.Sequential(
         scn.SubmanifoldConvolution(
             3, 1, 64, 7,
             False),  # sscn(dimension, nIn, nOut, filter_size, bias)
         scn.BatchNormReLU(64),
         scn.MaxPooling(3, 3,
                        2),  # MaxPooling(dimension, pool_size, pool_stride)
         scn.SparseResNet(
             3,
             64,
             [  # SpraseResNet(dimension, nInputPlanes, layers=[])
                 ['b', 64, 2, 1],  # [block_type, nOut, rep, stride]
                 ['b', 64, 2, 1],
                 ['b', 128, 2, 2],
                 ['b', 128, 2, 2],
                 ['b', 256, 2, 2],
                 ['b', 256, 2, 2],
                 ['b', 512, 2, 2],
                 ['b', 512, 2, 2]
             ]),
         scn.SparseToDense(3, 256))
     self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
     # self.spatial_size= self.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
     self.spatial_size = torch.LongTensor([101, 101, 101])
     self.inputLayer = scn.InputLayer(3, self.spatial_size, mode=3)
コード例 #20
0
 def __init__(self, flags):
     torch.nn.Module.__init__(self)
     import sparseconvnet as scn
     self._flags = flags
     dimension = self._flags.DATA_DIM
     num_class = self._flags.NUM_CLASS
     image_size = self._flags.SPATIAL_SIZE
     num_filter = self._flags.BASE_NUM_FILTERS
     assert image_size == 128
     net = scn.Sequential()
     net.add(scn.InputLayer(dimension, image_size, mode=3))
     net.add(scn.SubmanifoldConvolution(dimension, 1, num_filter, 3, False))
     net.add(scn.MaxPooling(dimension, 2, 2))
     net.add(
         SparseResNet(dimension, num_filter,
                      [[num_filter * 1, 2, 1], [num_filter * 2, 2, 2],
                       [num_filter * 4, 2, 2], [num_filter * 8, 2, 2]]))
     net.add(
         scn.Convolution(dimension, num_filter * 8, num_filter * 16, 3, 1,
                         False))
     net.add(scn.BatchNormReLU(num_filter * 16))
     net.add(scn.SparseToDense(dimension, num_filter * 16))
     net.add(torch.nn.AvgPool3d(6))
     self._net = net
     self.linear = torch.nn.Linear(num_filter * 16, num_class)
コード例 #21
0
 def __init__(self,
              dimension=3,
              size=1536,
              nFeatures=16,
              depth=5,
              nClasses=1):
     super(UResNet, self).__init__()
     self.dimension = dimension
     self.size = size
     self.nFeatures = nFeatures
     self.depth = depth
     self.nClasses = nClasses
     reps = 2  # Conv block repetition factor
     kernel_size = 2  # Use input_spatial_size method for other values?
     m = nFeatures  # Unet number of features
     nPlanes = [i * m for i in range(1, depth + 1)
                ]  # UNet number of features per level
     # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level
     nInputFeatures = 6
     self.sparseModel = scn.Sequential().add(
         scn.InputLayer(dimension, size, mode=3)).add(
             scn.SubmanifoldConvolution(
                 dimension, nInputFeatures, m, 3,
                 False)).add(  # Kernel size 3, no bias
                     scn.UNet(dimension,
                              reps,
                              nPlanes,
                              residual_blocks=True,
                              downsample=[kernel_size, 2])
                 ).add(  # downsample = [filter size, filter stride]
                     scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension))
     #         self.linear = torch.nn.Linear(m, nClasses)
     self.linear = torch.nn.Sequential(torch.nn.Linear(m, m // 2),
                                       torch.nn.ReLU(0.1),
                                       torch.nn.Linear(m // 2, nClasses))
コード例 #22
0
    def __init__(self, inplanes, outplanes, nplanes=1):
        nn.Module.__init__(self)
        
        
        self.conv1 = scn.SubmanifoldConvolution(dimension=3, 
            nIn         = inplanes, 
            nOut        = outplanes, 
            filter_size = [nplanes,3,3], 
            bias=False)
        

        # if FLAGS.BATCH_NORM:
        self.bn1 = scn.BatchNormReLU(outplanes)

        self.conv2 = scn.SubmanifoldConvolution(dimension=3, 
            nIn         = outplanes,
            nOut        = outplanes,
            filter_size = [nplanes,3,3],
            bias        = False)

        # if FLAGS.BATCH_NORM:
        self.bn2 = scn.BatchNormalization(outplanes)

        self.residual = scn.Identity()
        self.relu = scn.ReLU()

        self.add = scn.AddTable()
コード例 #23
0
    def __init__(self, *, inplanes, outplanes, nplanes=1, params):
        nn.Module.__init__(self)

        self.conv1 = scn.SubmanifoldConvolution(dimension=3,
                                                nIn=inplanes,
                                                nOut=outplanes,
                                                filter_size=[nplanes, 3, 3],
                                                bias=params.use_bias)

        self.do_batch_norm = False
        if params.batch_norm:
            self.do_batch_norm = True
            self.bn1 = scn.BatchNormReLU(outplanes)

        self.conv2 = scn.SubmanifoldConvolution(dimension=3,
                                                nIn=outplanes,
                                                nOut=outplanes,
                                                filter_size=[nplanes, 3, 3],
                                                bias=False)

        if params.batch_norm:
            self.bn2 = scn.BatchNormalization(outplanes)

        self.residual = scn.Identity()
        self.relu = scn.ReLU()

        self.add = scn.AddTable()
コード例 #24
0
    def __init__(self, cfg):
        import sparseconvnet as scn
        super(UResNet, self).__init__()
        self._model_config = cfg['modules']['uresnet']

        self._dimension = self._model_config.get('data_dim', 3)
        num_strides = self._model_config.get('num_strides', 5)
        spatial_size = self._model_config.get('spatial_size', 512)
        num_classes = self._model_config.get('num_classes', 5)
        m = self._model_config.get('filters', 16)  # Unet number of features
        nInputFeatures = self._model_config.get('features', 1)

        reps = 2  # Conv block repetition factor
        kernel_size = 2  # Use input_spatial_size method for other values?
        nPlanes = [i * m for i in range(1, num_strides + 1)
                   ]  # UNet number of features per level
        # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level

        self.sparseModel = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(
                    self._dimension, nInputFeatures, m, 3,
                    False)).add(  # Kernel size 3, no bias
                        scn.UNet(self._dimension,
                                 reps,
                                 nPlanes,
                                 residual_blocks=True,
                                 downsample=[kernel_size, 2])
                    ).add(  # downsample = [filter size, filter stride]
                        scn.BatchNormReLU(m)).add(
                            scn.OutputLayer(self._dimension))
        self.linear = torch.nn.Linear(m, num_classes)
コード例 #25
0
 def bar(nPlanes, bias):
     m = scn.Sequential()
     m.add(scn.BatchNormReLU(nPlanes))
     m.add(scn.NetworkInNetwork(
         nPlanes, nClasses,
         bias))  #accumulte softmax input, only one set of biases
     return m
コード例 #26
0
ファイル: instance.py プロジェクト: xuhuahaoren/MASC
    def __init__(self, full_scale=127, use_normal=False):
        nn.Module.__init__(self)

        dimension = 3
        m = 32  # 16 or 32
        residual_blocks = True  #True or False
        block_reps = 2  #Conv block repetition factor: 1 or 2

        blocks = [['b', m * k, 2, 2] for k in [1, 2, 3, 4, 5]]
        self.num_final_channels = m * len(blocks)

        self.sparseModel = scn.Sequential().add(
            scn.InputLayer(dimension, full_scale, mode=4)).add(
                scn.SubmanifoldConvolution(
                    dimension, 3 + 3 * int(use_normal), m, 3,
                    False)).add(scn.MaxPooling(dimension, 3, 2)).add(
                        scn.SparseResNet(dimension, m, blocks)).add(
                            scn.BatchNormReLU(self.num_final_channels)).add(
                                scn.SparseToDense(dimension,
                                                  self.num_final_channels))

        self.num_labels = 20
        self.label_encoder = nn.Sequential(nn.Linear(self.num_labels, 64),
                                           nn.ReLU())
        #self.pred = nn.Linear(m * len(blocks), 1)
        self.pred = nn.Sequential(nn.Linear(self.num_final_channels + 64, 64),
                                  nn.ReLU(), nn.Linear(64, 1))
        return
コード例 #27
0
ファイル: ResNet_sparse.py プロジェクト: mmkekic/NEXT_torch
def SparseResNet(dimension, nInputPlanes, layers, mom=0.99):
    """
    pre-activated ResNet
    e.g. layers = {{'basic',16,2,1},{'basic',32,2}}
    """
    nPlanes = nInputPlanes
    m = scn.Sequential()

    def residual(nIn, nOut, stride):
        if stride > 1:
            return scn.Convolution(dimension, nIn, nOut, 3, stride, False)
        elif nIn != nOut:
            return scn.NetworkInNetwork(nIn, nOut, False)
        else:
            return scn.Identity()

    for blockType, n, reps, stride in layers:
        for rep in range(reps):
            if blockType[0] == 'b':  # basic block
                if rep == 0:
                    m.add(scn.BatchNormReLU(nPlanes, momentum=mom, eps=1e-5))
                    m.add(scn.ConcatTable().add(scn.Sequential().add(
                        scn.SubmanifoldConvolution(dimension, nPlanes, n, 3,
                                                   False) if stride ==
                        1 else scn.Convolution(
                            dimension, nPlanes, n, 3, stride, False)).add(
                                scn.BatchNormReLU(
                                    n, momentum=mom, eps=1e-5)).add(
                                        scn.SubmanifoldConvolution(
                                            dimension, n, n, 3, False))).add(
                                                residual(nPlanes, n, stride)))
                else:
                    m.add(scn.ConcatTable().add(scn.Sequential().add(
                        scn.BatchNormReLU(
                            nPlanes, momentum=mom, eps=1e-5)).add(
                                scn.SubmanifoldConvolution(
                                    dimension, nPlanes, n, 3, False)).add(
                                        scn.BatchNormReLU(
                                            n, momentum=mom, eps=1e-5)).add(
                                                scn.SubmanifoldConvolution(
                                                    dimension, n, n, 3,
                                                    False))).add(
                                                        scn.Identity()))
            nPlanes = n
            m.add(scn.AddTable())
    m.add(scn.BatchNormReLU(nPlanes, momentum=mom, eps=1e-5))
    return m
コード例 #28
0
 def foo(nPlanes):
     m = scn.Sequential()
     for _ in range(reps):
         m.add(scn.BatchNormReLU(nPlanes))
         m.add(
             scn.SubmanifoldConvolution(dimension, nPlanes, nPlanes, 3,
                                        False))
     return m
コード例 #29
0
ファイル: unet_add.py プロジェクト: zjhthu/SGC-Release
    def __init__(self, dimension, nPlanes_down, nPlanes_up, reps, type, predict=False, nClasses=None, extract=False, kernel_size=None):
        super(up, self).__init__()
        self.dimension = dimension
        self.type = type
        self.predict = predict
        self.extract = extract
        self.kernel_size = kernel_size

        if type == 'c':
            self.upsample = scn.Sequential().add(scn.BatchNormReLU(nPlanes_down)).add(scn.DenseDeconvolution(dimension, nPlanes_down, nPlanes_up, 2, 2, False))
        elif type == 'v':
            self.upsample = scn.Sequential().add(scn.BatchNormReLU(nPlanes_down)).add(scn.Deconvolution(dimension, nPlanes_down, nPlanes_up, 2, 2, False))
        self.conv = scn.Sequential()
        for i in range(reps):
                res(self.conv, dimension, nPlanes_up, nPlanes_up)
        if predict:
            self.Linear = scn.Sequential().add(scn.BatchNormReLU(nPlanes_up)).add(scn.Linear(dimension, nPlanes_up, nClasses))
コード例 #30
0
ファイル: VGG-C.py プロジェクト: shlpu/SparseConvNet
 def __init__(self):
     nn.Module.__init__(self)
     self.sparseModel = scn.SparseVggNet(
         2, 3,
         [['C', 16], ['C', 16], 'MP', ['C', 32], ['C', 32], 'MP', ['C', 48],
          ['C', 48], 'MP', ['C', 64], ['C', 64], 'MP', ['C', 96], ['C', 96]
          ]).add(scn.Convolution(2, 96, 128, 3, 2, False)).add(
              scn.BatchNormReLU(128)).add(scn.SparseToDense(2, 128))
     self.linear = nn.Linear(128, 3755)