Exemplo n.º 1
0
 def __init__(self, shape, algo):
     super().__init__()
     self.net = spconv.SparseSequential(
         spconv.SubMConv3d(3,
                           128,
                           3,
                           bias=False,
                           indice_key="c0",
                           algo=algo),
         # spconv.SubMConv3d(32,
         #                   32,
         #                   3,
         #                   bias=False,
         #                   indice_key="c0",
         #                   algo=algo),
         # # nn.BatchNorm1d(32),
         # # nn.ReLU(),
         # # spconv.SparseConv3d(64, 64, 2, 2, bias=False,
         # #                   algo=algo),
         # spconv.SubMConv3d(32, 64, 3, bias=False, indice_key="c0",
         #                   algo=algo),
         spconv.SubMConv3d(128,
                           128,
                           3,
                           bias=False,
                           indice_key="c0",
                           algo=algo),
         # nn.BatchNorm1d(32),
         # nn.ReLU(),
         # spconv.SparseMaxPool3d(2, 2),
         # spconv.SubMConv3d(256,
         #                   512,
         #                   3,
         #                   bias=False,
         #                   indice_key="c1",
         #                   algo=algo),
         # spconv.SubMConv3d(512,
         #                   512,
         #                   3,
         #                   bias=False,
         #                   indice_key="c1",
         #                   algo=algo),
     )
     max_batch_size = 1
     # grid (dense map) is used for indice generation. use pre-allocated grid can run faster.
     self.grid = torch.full([max_batch_size, *shape], -1,
                            dtype=torch.int32).cuda()
     # self.grid = None
     self.shape = shape
Exemplo n.º 2
0
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
    return spconv.SubMConv3d(in_planes,
                             out_planes,
                             kernel_size=1,
                             stride=stride,
                             padding=1,
                             bias=False,
                             indice_key=indice_key)
Exemplo n.º 3
0
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
    return spconv.SubMConv3d(in_planes,
                             out_planes,
                             kernel_size=(1, 1, 3),
                             stride=stride,
                             padding=(0, 0, 1),
                             bias=False,
                             indice_key=indice_key)
Exemplo n.º 4
0
 def __init__(self, num_layers, ndim, shape, in_channels, out_channels,
              kernel_size, stride):
     super().__init__()
     layers = [
         spconv.SubMConv3d(in_channels,
                           out_channels,
                           kernel_size,
                           bias=False,
                           indice_key="subm0")
     ]
     for i in range(1, num_layers):
         layers.append(
             spconv.SubMConv3d(out_channels,
                               out_channels,
                               kernel_size,
                               bias=False))
     self.net = nn.Sequential(*layers, )
     self.shape = shape
Exemplo n.º 5
0
 def __init__(self,
              num_layers,
              ndim,
              shape,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              padding,
              dilation,
              algo=spconv.ConvAlgo.Native):
     super().__init__()
     layers = [
         spconv.SubMConv3d(in_channels,
                           out_channels,
                           kernel_size,
                           stride,
                           padding=padding,
                           dilation=dilation,
                           bias=False,
                           algo=algo)
     ]
     for i in range(1, num_layers):
         layers.append(
             spconv.SubMConv3d(out_channels,
                               out_channels,
                               kernel_size,
                               stride,
                               padding=padding,
                               dilation=dilation,
                               bias=False,
                               algo=algo))
     self.net = spconv.SparseSequential(*layers, )
     # self.grid = torch.full([3, *shape], -1, dtype=torch.int32).cuda()
     self.grid = None
     self.shape = shape
Exemplo n.º 6
0
    def __init__(self, shape, algo):
        super().__init__()
        pool_algo = algo
        # pool_algo = ConvAlgo.Native
        self.net = spconv.SparseSequential(
            spconv.SubMConv3d(3, 64, 3, bias=False, indice_key="c0",
                              algo=algo),
            # spconv.SubMConv3d(32,
            #                   32,
            #                   3,
            #                   bias=False,
            #                   indice_key="c0",
            #                   algo=algo),
            # # nn.BatchNorm1d(32),
            # # nn.ReLU(),
            # # spconv.SparseConv3d(64, 64, 2, 2, bias=False,
            # #                   algo=algo),
            # spconv.SubMConv3d(32, 64, 3, bias=False, indice_key="c0",
            #                   algo=algo),
            spconv.SubMConv3d(64,
                              64,
                              3,
                              bias=False,
                              indice_key="c0",
                              algo=algo),
            # nn.BatchNorm1d(32),
            # nn.ReLU(),
            # spconv.SparseConv3d(64, 64, 2, 2, bias=False, indice_key="m0"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(64,
                              96,
                              3,
                              bias=False,
                              indice_key="c1",
                              algo=algo),
            spconv.SubMConv3d(96,
                              96,
                              3,
                              bias=False,
                              indice_key="c1",
                              algo=algo),
            # nn.BatchNorm1d(64),
            # nn.ReLU(),
            # spconv.SparseConv3d(96, 96, 2, 2, bias=False, indice_key="m1"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(96,
                              128,
                              3,
                              bias=False,
                              indice_key="c2",
                              algo=algo),
            spconv.SubMConv3d(128,
                              128,
                              3,
                              bias=False,
                              indice_key="c2",
                              algo=algo),
            # nn.BatchNorm1d(128),
            # nn.ReLU(),
            # spconv.SparseConv3d(128, 128, 2, 2, bias=False, indice_key="m2"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(128,
                              160,
                              3,
                              bias=False,
                              indice_key="c3",
                              algo=algo),
            spconv.SubMConv3d(160,
                              160,
                              3,
                              bias=False,
                              indice_key="c3",
                              algo=algo),
            # nn.BatchNorm1d(128),
            # nn.ReLU(),
            # spconv.SparseConv3d(160, 160, 2, 2, bias=False, indice_key="m3"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(160,
                              192,
                              3,
                              bias=False,
                              indice_key="c4",
                              algo=algo),
            spconv.SubMConv3d(192,
                              192,
                              3,
                              bias=False,
                              indice_key="c4",
                              algo=algo),
            # nn.BatchNorm1d(128),
            # nn.ReLU(),
            spconv.SparseMaxPool3d(2, 2, indice_key="m4", algo=pool_algo),
            # spconv.SparseConv3d(192, 192, 2, 2, bias=False, indice_key="m4"),
            spconv.SubMConv3d(192,
                              224,
                              3,
                              bias=False,
                              indice_key="c5",
                              algo=algo),
            spconv.SubMConv3d(224,
                              224,
                              3,
                              bias=False,
                              indice_key="c5",
                              algo=algo),
            # nn.BatchNorm1d(224),
            # nn.ReLU(),
            # spconv.SparseConv3d(224, 224, 2, 2, bias=False, indice_key="m5"),
            spconv.SparseMaxPool3d(2, 2, indice_key="m5", algo=pool_algo),
            spconv.SubMConv3d(224,
                              256,
                              3,
                              bias=False,
                              indice_key="c6",
                              algo=algo),
            spconv.SubMConv3d(256,
                              256,
                              3,
                              bias=False,
                              indice_key="c6",
                              algo=algo),

            # nn.BatchNorm1d(256),
            # nn.ReLU(),

            # spconv.SparseInverseConv3d(256, 128, 2, indice_key="m5", bias=False, algo=algo),
            # # nn.BatchNorm1d(128),
            # # nn.ReLU(),

            # spconv.SparseInverseConv3d(128, 64, 2, indice_key="m4", bias=False, algo=algo),
        )
        max_batch_size = 1
        # grid (dense map) is used for indice generation. use pre-allocated grid can run faster.
        self.grid = torch.full([max_batch_size, *shape], -1,
                               dtype=torch.int32).cuda()
        # self.grid = None
        self.shape = shape
Exemplo n.º 7
0
    def __init__(self,
                 output_shape,
                 use_norm=True,
                 num_input_features=128,
                 nclasses=20,
                 n_height=32,
                 strict=False,
                 init_size=16):
        super(Asymm_3d_spconv, self).__init__()
        self.nclasses = nclasses
        self.nheight = n_height
        self.strict = False

        sparse_shape = np.array(output_shape)
        # sparse_shape[0] = 11
        print(sparse_shape)
        self.sparse_shape = sparse_shape

        self.downCntx = ResContextBlock(num_input_features,
                                        init_size,
                                        indice_key="pre")
        self.resBlock2 = ResBlock(init_size,
                                  2 * init_size,
                                  0.2,
                                  height_pooling=True,
                                  indice_key="down2")
        self.resBlock3 = ResBlock(2 * init_size,
                                  4 * init_size,
                                  0.2,
                                  height_pooling=True,
                                  indice_key="down3")
        self.resBlock4 = ResBlock(4 * init_size,
                                  8 * init_size,
                                  0.2,
                                  pooling=True,
                                  height_pooling=False,
                                  indice_key="down4")
        self.resBlock5 = ResBlock(8 * init_size,
                                  16 * init_size,
                                  0.2,
                                  pooling=True,
                                  height_pooling=False,
                                  indice_key="down5")

        self.upBlock0 = UpBlock(16 * init_size,
                                16 * init_size,
                                indice_key="up0",
                                up_key="down5")
        self.upBlock1 = UpBlock(16 * init_size,
                                8 * init_size,
                                indice_key="up1",
                                up_key="down4")
        self.upBlock2 = UpBlock(8 * init_size,
                                4 * init_size,
                                indice_key="up2",
                                up_key="down3")
        self.upBlock3 = UpBlock(4 * init_size,
                                2 * init_size,
                                indice_key="up3",
                                up_key="down2")

        self.ReconNet = ReconBlock(2 * init_size,
                                   2 * init_size,
                                   indice_key="recon")

        self.logits = spconv.SubMConv3d(4 * init_size,
                                        nclasses,
                                        indice_key="logit",
                                        kernel_size=3,
                                        stride=1,
                                        padding=1,
                                        bias=True)