Esempio n. 1
0
 def __init__(self, num_layers, ndim, shape, kernel_size, stride, padding,
              dilation):
     super().__init__()
     layers = [
         spconv.SparseMaxPool3d(kernel_size, stride, padding, dilation)
     ]
     for i in range(1, num_layers):
         layers.append(
             spconv.SparseMaxPool3d(kernel_size, stride, padding, dilation))
     self.net = spconv.SparseSequential(*layers, )
     self.shape = shape
Esempio n. 2
0
 def __init__(self, shape, algo):
     super().__init__()
     self.net = spconv.SparseSequential(
         spconv.SubMConv3d(3,
                           128,
                           3,
                           bias=False,
                           indice_key="c0",
                           algo=algo),
         # spconv.SubMConv3d(32,
         #                   32,
         #                   3,
         #                   bias=False,
         #                   indice_key="c0",
         #                   algo=algo),
         # # nn.BatchNorm1d(32),
         # # nn.ReLU(),
         # # spconv.SparseConv3d(64, 64, 2, 2, bias=False,
         # #                   algo=algo),
         # spconv.SubMConv3d(32, 64, 3, bias=False, indice_key="c0",
         #                   algo=algo),
         spconv.SubMConv3d(128,
                           128,
                           3,
                           bias=False,
                           indice_key="c0",
                           algo=algo),
         # nn.BatchNorm1d(32),
         # nn.ReLU(),
         # spconv.SparseMaxPool3d(2, 2),
         # spconv.SubMConv3d(256,
         #                   512,
         #                   3,
         #                   bias=False,
         #                   indice_key="c1",
         #                   algo=algo),
         # spconv.SubMConv3d(512,
         #                   512,
         #                   3,
         #                   bias=False,
         #                   indice_key="c1",
         #                   algo=algo),
     )
     max_batch_size = 1
     # grid (dense map) is used for indice generation. use pre-allocated grid can run faster.
     self.grid = torch.full([max_batch_size, *shape], -1,
                            dtype=torch.int32).cuda()
     # self.grid = None
     self.shape = shape
Esempio n. 3
0
 def __init__(self):
     super(Net, self).__init__()
     self.net = spconv.SparseSequential(
         nn.BatchNorm1d(1),
         spconv.SubMConv2d(1, 32, 3, 1),
         nn.ReLU(),
         spconv.SubMConv2d(32, 64, 3, 1),
         nn.ReLU(),
         spconv.SparseMaxPool2d(2, 2),
         spconv.ToDense(),
     )
     self.fc1 = nn.Linear(14 * 14 * 64, 128)
     self.fc2 = nn.Linear(128, 10)
     self.dropout1 = nn.Dropout2d(0.25)
     self.dropout2 = nn.Dropout2d(0.5)
Esempio n. 4
0
 def __init__(self, num_layers, ndim, shape, in_channels, out_channels,
              kernel_size, stride):
     super().__init__()
     self.net = spconv.SparseSequential(
         spconv.SparseConv3d(in_channels,
                             out_channels,
                             kernel_size,
                             stride,
                             indice_key="cp0",
                             bias=False),
         spconv.SparseInverseConv3d(out_channels,
                                    in_channels,
                                    kernel_size,
                                    indice_key="cp0",
                                    bias=False),
     )
     self.todense = spconv.ToDense()
     self.shape = shape
Esempio n. 5
0
 def __init__(self, num_layers, ndim, shape, in_channels, out_channels,
              kernel_size, stride, padding, dilation):
     super().__init__()
     layers = [
         spconv.SparseConvTranspose3d(in_channels,
                                      out_channels,
                                      kernel_size,
                                      stride,
                                      padding=padding,
                                      dilation=dilation,
                                      bias=False)
     ]
     for i in range(1, num_layers):
         layers.append(
             spconv.SparseConvTranspose3d(out_channels,
                                          out_channels,
                                          kernel_size,
                                          stride,
                                          padding=padding,
                                          dilation=dilation,
                                          bias=False))
     self.net = spconv.SparseSequential(*layers, )
     self.shape = shape
Esempio n. 6
0
 def __init__(self,
              num_layers,
              ndim,
              shape,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              padding,
              dilation,
              algo=spconv.ConvAlgo.MaskSplitImplicitGemm):
     super().__init__()
     self.algo = algo
     layers = [
         spconv.SparseConv3d(in_channels,
                             out_channels,
                             kernel_size,
                             stride,
                             padding=padding,
                             dilation=dilation,
                             bias=False,
                             algo=algo)
     ]
     for i in range(1, num_layers):
         layers.append(
             spconv.SparseConv3d(out_channels,
                                 out_channels,
                                 kernel_size,
                                 stride,
                                 padding=padding,
                                 dilation=dilation,
                                 bias=False,
                                 algo=algo))
     self.net = spconv.SparseSequential(*layers, )
     # self.grid = torch.full([3, *shape], -1, dtype=torch.int32).cuda()
     self.grid = None
     self.shape = shape
Esempio n. 7
0
    def __init__(self, shape, algo):
        super().__init__()
        pool_algo = algo
        # pool_algo = ConvAlgo.Native
        self.net = spconv.SparseSequential(
            spconv.SubMConv3d(3, 64, 3, bias=False, indice_key="c0",
                              algo=algo),
            # spconv.SubMConv3d(32,
            #                   32,
            #                   3,
            #                   bias=False,
            #                   indice_key="c0",
            #                   algo=algo),
            # # nn.BatchNorm1d(32),
            # # nn.ReLU(),
            # # spconv.SparseConv3d(64, 64, 2, 2, bias=False,
            # #                   algo=algo),
            # spconv.SubMConv3d(32, 64, 3, bias=False, indice_key="c0",
            #                   algo=algo),
            spconv.SubMConv3d(64,
                              64,
                              3,
                              bias=False,
                              indice_key="c0",
                              algo=algo),
            # nn.BatchNorm1d(32),
            # nn.ReLU(),
            # spconv.SparseConv3d(64, 64, 2, 2, bias=False, indice_key="m0"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(64,
                              96,
                              3,
                              bias=False,
                              indice_key="c1",
                              algo=algo),
            spconv.SubMConv3d(96,
                              96,
                              3,
                              bias=False,
                              indice_key="c1",
                              algo=algo),
            # nn.BatchNorm1d(64),
            # nn.ReLU(),
            # spconv.SparseConv3d(96, 96, 2, 2, bias=False, indice_key="m1"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(96,
                              128,
                              3,
                              bias=False,
                              indice_key="c2",
                              algo=algo),
            spconv.SubMConv3d(128,
                              128,
                              3,
                              bias=False,
                              indice_key="c2",
                              algo=algo),
            # nn.BatchNorm1d(128),
            # nn.ReLU(),
            # spconv.SparseConv3d(128, 128, 2, 2, bias=False, indice_key="m2"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(128,
                              160,
                              3,
                              bias=False,
                              indice_key="c3",
                              algo=algo),
            spconv.SubMConv3d(160,
                              160,
                              3,
                              bias=False,
                              indice_key="c3",
                              algo=algo),
            # nn.BatchNorm1d(128),
            # nn.ReLU(),
            # spconv.SparseConv3d(160, 160, 2, 2, bias=False, indice_key="m3"),
            spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
            spconv.SubMConv3d(160,
                              192,
                              3,
                              bias=False,
                              indice_key="c4",
                              algo=algo),
            spconv.SubMConv3d(192,
                              192,
                              3,
                              bias=False,
                              indice_key="c4",
                              algo=algo),
            # nn.BatchNorm1d(128),
            # nn.ReLU(),
            spconv.SparseMaxPool3d(2, 2, indice_key="m4", algo=pool_algo),
            # spconv.SparseConv3d(192, 192, 2, 2, bias=False, indice_key="m4"),
            spconv.SubMConv3d(192,
                              224,
                              3,
                              bias=False,
                              indice_key="c5",
                              algo=algo),
            spconv.SubMConv3d(224,
                              224,
                              3,
                              bias=False,
                              indice_key="c5",
                              algo=algo),
            # nn.BatchNorm1d(224),
            # nn.ReLU(),
            # spconv.SparseConv3d(224, 224, 2, 2, bias=False, indice_key="m5"),
            spconv.SparseMaxPool3d(2, 2, indice_key="m5", algo=pool_algo),
            spconv.SubMConv3d(224,
                              256,
                              3,
                              bias=False,
                              indice_key="c6",
                              algo=algo),
            spconv.SubMConv3d(256,
                              256,
                              3,
                              bias=False,
                              indice_key="c6",
                              algo=algo),

            # nn.BatchNorm1d(256),
            # nn.ReLU(),

            # spconv.SparseInverseConv3d(256, 128, 2, indice_key="m5", bias=False, algo=algo),
            # # nn.BatchNorm1d(128),
            # # nn.ReLU(),

            # spconv.SparseInverseConv3d(128, 64, 2, indice_key="m4", bias=False, algo=algo),
        )
        max_batch_size = 1
        # grid (dense map) is used for indice generation. use pre-allocated grid can run faster.
        self.grid = torch.full([max_batch_size, *shape], -1,
                               dtype=torch.int32).cuda()
        # self.grid = None
        self.shape = shape