Ejemplo n.º 1
0
    def __init__(self):
        super(PointNetFeature, self).__init__()

        k = self.KERNEL_SIZES
        s = self.STRIDES
        c = self.CONV_CHANNELS

        self.stn = STN3d(D=3)
        self.block1 = nn.Sequential(
            ME.MinkowskiConvolution(6,
                                    c[0],
                                    kernel_size=k[0],
                                    stride=s[0],
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[0]), ME.MinkowskiReLU())
        self.block2 = nn.Sequential(
            ME.MinkowskiConvolution(c[0],
                                    c[1],
                                    kernel_size=k[1],
                                    stride=s[1],
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[1]), ME.MinkowskiReLU())
        self.block3 = nn.Sequential(
            ME.MinkowskiConvolution(c[1],
                                    c[2],
                                    kernel_size=k[2],
                                    stride=s[2],
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[2]), ME.MinkowskiReLU())

        self.avgpool = ME.MinkowskiGlobalPooling()
        self.concat = ME.MinkowskiBroadcastConcatenation()
Ejemplo n.º 2
0
    def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=4):
        super(ResidualBlock4d, self).__init__()
        self.net = nn.Sequential(
            ME.MinkowskiConvolution(
                inc, outc, dimension=D,
                kernel_generator=ME.KernelGenerator(
                    kernel_size=ks, dimension=D,
                    region_type=ME.RegionType.HYBRID,
                    axis_types=(ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE,
                                ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCROSS))),
            ME.MinkowskiBatchNorm(outc),
            ME.MinkowskiReLU(True),
            ME.MinkowskiConvolution(
                outc, outc, dimension=D,
                kernel_generator=ME.KernelGenerator(
                    kernel_size=ks, dimension=D,
                    region_type=ME.RegionType.HYBRID,
                    axis_types=(ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE,
                                ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCROSS))),
            ME.MinkowskiBatchNorm(outc))
        nn.init.constant_(self.net[1].bn.weight, 1.0)
        nn.init.constant_(self.net[1].bn.bias, 0.0)
        nn.init.constant_(self.net[4].bn.weight, 1.0)
        nn.init.constant_(self.net[4].bn.bias, 0.0)

        self.downsample = nn.Sequential() if (inc == outc and stride == 1) else nn.Sequential(
            ME.MinkowskiConvolution(
                inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D),
            ME.MinkowskiBatchNorm(outc))
        if len(self.downsample) > 0:
            nn.init.constant_(self.downsample[1].bn.weight, 1.0)
            nn.init.constant_(self.downsample[1].bn.bias, 0.0)

        self.relu = ME.MinkowskiReLU(True)
    def __init__(self, input_a_dim, input_b_dim, out_dim, kernel_size=2):
        super().__init__()
        '''
        Deconv x_a
        concat with x_b
        then apply output-projection
        '''
        self.input_a_dim = input_a_dim
        self.input_b_dim = input_b_dim
        self.out_dim = out_dim
        self.conv_a = nn.Sequential(
            ME.MinkowskiConvolutionTranspose(in_channels=input_a_dim,
                                             out_channels=input_a_dim,
                                             kernel_size=4,
                                             stride=4,
                                             dimension=3),
            ME.MinkowskiBatchNorm(input_a_dim),
            ME.MinkowskiReLU(),
        )

        self.conv_proj = nn.Sequential(
            ME.MinkowskiConvolution(in_channels=input_a_dim + input_b_dim,
                                    out_channels=out_dim,
                                    kernel_size=3,
                                    stride=1,
                                    dimension=3),
            ME.MinkowskiBatchNorm(out_dim),
            ME.MinkowskiReLU(),
        )
Ejemplo n.º 4
0
    def network_initialization(self, in_channels, out_channels, D):

        self.inplanes = self.INIT_DIM
        self.conv1 = ME.MinkowskiConvolution(
            in_channels, self.inplanes, kernel_size=5, stride=1, dimension=D)

        self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
        self.relu = ME.MinkowskiReLU(inplace=True)
        self.pool = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
        self.layer1 = self._make_layer(
            self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2)
        self.layer2 = self._make_layer(
            self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2)
        self.layer3 = self._make_layer(
            self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2)
        self.layer4 = self._make_layer(
            self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2)

        self.glob_avg = ME.MinkowskiGlobalPooling(dimension=D)

        self.classification_block = nn.Sequential(
            ME.MinkowskiLinear(self.inplanes, self.inplanes, bias=False),
            ME.MinkowskiBatchNorm(self.inplanes), ME.MinkowskiReLU(),
            ME.MinkowskiLinear(self.inplanes, self.inplanes, bias=False),
            ME.MinkowskiBatchNorm(self.inplanes))

        self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
Ejemplo n.º 5
0
    def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=3):
        super(ResidualBlock, self).__init__()
        self.net = nn.Sequential(
            ME.MinkowskiConvolution(
                inc, outc, kernel_size=ks, dilation=dilation, stride=stride, dimension=D),
            ME.MinkowskiBatchNorm(outc),
            ME.MinkowskiReLU(True),
            ME.MinkowskiConvolution(
                outc, outc, kernel_size=ks, dilation=dilation, stride=1, dimension=D),
            ME.MinkowskiBatchNorm(outc))
        nn.init.constant_(self.net[1].bn.weight, 1.0)
        nn.init.constant_(self.net[1].bn.bias, 0.0)
        nn.init.constant_(self.net[4].bn.weight, 1.0)
        nn.init.constant_(self.net[4].bn.bias, 0.0)

        self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \
            nn.Sequential(
                ME.MinkowskiConvolution(
                    inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D),
                ME.MinkowskiBatchNorm(outc))
        if len(self.downsample) > 0:
            nn.init.constant_(self.downsample[1].bn.weight, 1.0)
            nn.init.constant_(self.downsample[1].bn.bias, 0.0)

        self.relu = ME.MinkowskiReLU(True)
Ejemplo n.º 6
0
    def __init__(self, D=3):
        super(STN3d, self).__init__()

        k = self.KERNEL_SIZES
        s = self.STRIDES
        c = self.CONV_CHANNELS

        self.block1 = nn.Sequential(
            ME.MinkowskiConvolution(3,
                                    c[0],
                                    kernel_size=k[0],
                                    stride=s[0],
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[0]), ME.MinkowskiReLU())
        self.block2 = nn.Sequential(
            ME.MinkowskiConvolution(c[0],
                                    c[1],
                                    kernel_size=k[1],
                                    stride=s[1],
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[1]), ME.MinkowskiReLU())
        self.block3 = nn.Sequential(
            ME.MinkowskiConvolution(c[1],
                                    c[2],
                                    kernel_size=k[2],
                                    stride=s[2],
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[2]), ME.MinkowskiReLU())

        # Use the kernelsize 1 convolution for linear layers. If kernel size ==
        # 1, minkowski engine internally uses a linear function.
        self.block4 = nn.Sequential(
            ME.MinkowskiConvolution(c[2],
                                    c[3],
                                    kernel_size=1,
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[3]), ME.MinkowskiReLU())
        self.block5 = nn.Sequential(
            ME.MinkowskiConvolution(c[3],
                                    c[4],
                                    kernel_size=1,
                                    has_bias=False,
                                    dimension=3),
            ME.MinkowskiInstanceNorm(c[4]), ME.MinkowskiReLU())
        self.fc6 = ME.MinkowskiConvolution(c[4],
                                           9,
                                           kernel_size=1,
                                           has_bias=True,
                                           dimension=3)

        self.avgpool = ME.MinkowskiGlobalPooling()
        self.broadcast = ME.MinkowskiBroadcast()
Ejemplo n.º 7
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 convolution,
                 dimension=3,
                 reduction=4):
        self.block = (Seq().append(
            convolution(
                in_channels=input_nc,
                out_channels=output_nc // reduction,
                kernel_size=1,
                stride=1,
                dilation=1,
                bias=False,
                dimension=dimension,
            )).append(ME.MinkowskiBatchNorm(output_nc // reduction)).append(
                ME.MinkowskiReLU()).append(
                    convolution(
                        output_nc // reduction,
                        output_nc // reduction,
                        kernel_size=3,
                        stride=1,
                        dilation=1,
                        bias=False,
                        dimension=dimension,
                    )).append(ME.MinkowskiBatchNorm(
                        output_nc //
                        reduction)).append(ME.MinkowskiReLU()).append(
                            convolution(
                                output_nc // reduction,
                                output_nc,
                                kernel_size=1,
                                stride=1,
                                dilation=1,
                                bias=False,
                                dimension=dimension,
                            )).append(ME.MinkowskiBatchNorm(output_nc)).append(
                                ME.MinkowskiReLU()))

        if input_nc != output_nc:
            self.downsample = (Seq().append(
                convolution(
                    in_channels=input_nc,
                    out_channels=output_nc,
                    kernel_size=1,
                    stride=1,
                    dilation=1,
                    bias=False,
                    dimension=dimension,
                )).append(ME.MinkowskiBatchNorm(output_nc)))
        else:
            self.downsample = None
Ejemplo n.º 8
0
 def __init__(self, in_channels, out_channels, D=3):
     nn.Module.__init__(self)
     self.net = nn.Sequential(
         ME.MinkowskiConvolution(in_channels, 32, 3, dimension=D),
         ME.MinkowskiBatchNorm(32),
         ME.MinkowskiReLU(),
         ME.MinkowskiConvolution(32, 64, 3, stride=2, dimension=D),
         ME.MinkowskiBatchNorm(64),
         ME.MinkowskiReLU(),
         ME.MinkowskiConvolutionTranspose(64, 32, 3, stride=2, dimension=D),
         ME.MinkowskiBatchNorm(32),
         ME.MinkowskiReLU(),
         ME.MinkowskiConvolution(32, out_channels, kernel_size=1, dimension=D),
     )
Ejemplo n.º 9
0
    def __init__(self):
        super(PointNetFeature, self).__init__()

        k = self.KERNEL_SIZES
        s = self.STRIDES
        c = self.CONV_CHANNELS

        self.stn = STN3d(D=3)
        self.conv1 = ME.MinkowskiConvolution(6,
                                             c[0],
                                             kernel_size=k[0],
                                             stride=s[0],
                                             has_bias=False,
                                             dimension=3)
        self.conv2 = ME.MinkowskiConvolution(c[0],
                                             c[1],
                                             kernel_size=k[1],
                                             stride=s[1],
                                             has_bias=False,
                                             dimension=3)
        self.conv3 = ME.MinkowskiConvolution(c[1],
                                             c[2],
                                             kernel_size=k[2],
                                             stride=s[2],
                                             has_bias=False,
                                             dimension=3)
        self.bn1 = ME.MinkowskiInstanceNorm(c[0], dimension=3)
        self.bn2 = ME.MinkowskiInstanceNorm(c[1], dimension=3)
        self.bn3 = ME.MinkowskiInstanceNorm(c[2], dimension=3)

        self.relu = ME.MinkowskiReLU(inplace=True)
        self.avgpool = ME.MinkowskiGlobalPooling()
        self.concat = ME.MinkowskiBroadcastConcatenation()
Ejemplo n.º 10
0
    def network_initialization(self, in_channels, out_channels, D):
        self.inplanes = self.init_dim
        self.conv1 = ME.MinkowskiConvolution(
            in_channels, self.inplanes, kernel_size=5, stride=2, dimension=D)

        self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
        self.relu = ME.MinkowskiReLU(inplace=True)

        self.pool = ME.MinkowskiAvgPooling(kernel_size=2, stride=2, dimension=D)

        self.layer1 = self._make_layer(
            self.block, self.planes[0], self.layers[0], stride=2)
        self.layer2 = self._make_layer(
            self.block, self.planes[1], self.layers[1], stride=2)
        self.layer3 = self._make_layer(
            self.block, self.planes[2], self.layers[2], stride=2)
        self.layer4 = self._make_layer(
            self.block, self.planes[3], self.layers[3], stride=2)

        self.conv5 = ME.MinkowskiConvolution(
            self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D)
        self.bn5 = ME.MinkowskiBatchNorm(self.inplanes)

        self.glob_avg = ME.MinkowskiGlobalMaxPooling()

        self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
Ejemplo n.º 11
0
    def _make_stem_layer(self, in_channels):
        self.conv1s = []
        if self.stem_stride == 4:
            s0, s1 = 2, 2
        if self.stem_stride == 2:
            s0, s1 = 2, 1
        if self.stem_stride == 1:
            s0, s1 = 1, 1

        if not DEBUG_CFG.SPARSE_BEV:
            kernels = [(3, 3, 5), (3, 3, 5), (3, 3, 3)]
            strides = [(s0, s0, 2), (1, 1, 2), (s1, s1, 2)]
            paddings = [(1, 1, 0), (1, 1, 0), (1, 1, 0)]
        else:
            kernels = [
                (7, 7, 1),
            ]
            strides = [
                (s0, s0, 1),
            ]
            paddings = [(3, 3, 0)]

        out_planes = [self.basic_planes, self.basic_planes * 2]

        num_layers = len(kernels) - 1
        in_channels_i = in_channels

        self.conv1s = []
        self.norm1s = []
        for i in range(num_layers):
            conv1_i = build_conv_layer(self.conv_cfg,
                                       in_channels_i,
                                       out_planes[i],
                                       kernel_size=kernels[i],
                                       stride=strides[i],
                                       padding=paddings[i],
                                       bias=False)
            in_channels_i = out_planes[i]
            norm1_name, norm1 = build_norm_layer(self.norm_cfg,
                                                 in_channels_i,
                                                 postfix=i + 1)
            self.add_module(f'conv1_{i}', conv1_i)
            self.add_module(norm1_name, norm1)

            self.conv1s.append(conv1_i)
            self.norm1s.append(norm1)

        self.relu = ME.MinkowskiReLU(inplace=True)
        if not DEBUG_CFG.SPARSE_BEV:
            self.maxpool = mink_max_pool(kernel_size=3,
                                         stride=(s1, s1, 2),
                                         padding=(1, 1, 0))
        else:
            self.maxpool = mink_max_pool(kernel_size=(3, 3, 1),
                                         stride=(s1, s1, 1),
                                         padding=1)

        self.stem_stride_z = np.product([s[-1] for s in strides])
        assert self.stem_stride_z == self.stem_stride_z_design
        assert self.stem_stride == np.product([s[0] for s in strides])
Ejemplo n.º 12
0
 def __init__(self,
              up_conv_nn=[],
              kernel_size=3,
              stride=1,
              dilation=1,
              has_bias=False,
              activation=ME.MinkowskiReLU(inplace=True),
              bn_momentum=0.01,
              dimension=-1,
              **kwargs):
     """
     Block convolution which consists of a convolution a batch norm a
     block operation and an activation.
     the block operation is usually a resnetBlock
     """
     # instantiate convolution
     # instantiate batchnorm
     # instantiate block
     # activation
     super(SimpleBlockUp, self).__init__()
     self.conv_tr = ME.MinkowskiConvolutionTranspose(
         up_conv_nn[0],
         up_conv_nn[1],
         kernel_size=kernel_size,
         stride=stride,
         dilation=dilation,
         dimension=dimension)
     self.bn = ME.MinkowskiBatchNorm(up_conv_nn[1], momentum=bn_momentum)
     self.block = BasicBlock(up_conv_nn[1],
                             up_conv_nn[1],
                             bn_momentum=bn_momentum,
                             dimension=dimension)
     self.activation = activation
Ejemplo n.º 13
0
  def network_initialization(self, in_channels, out_channels, config, D):

    def space_n_time_m(n, m):
      return n if D == 3 else [n, n, n, m]

    if D == 4:
      self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)

    dilations = config.dilations
    bn_momentum = config.bn_momentum
    self.inplanes = self.INIT_DIM
    self.conv1 = conv(
        in_channels,
        self.inplanes,
        kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
        stride=1,
        D=D)

    self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
    self.relu = ME.MinkowskiReLU(inplace=True)
    self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)

    self.layer1 = self._make_layer(
        self.BLOCK,
        self.PLANES[0],
        self.LAYERS[0],
        stride=space_n_time_m(2, 1),
        dilation=space_n_time_m(dilations[0], 1))
    self.layer2 = self._make_layer(
        self.BLOCK,
        self.PLANES[1],
        self.LAYERS[1],
        stride=space_n_time_m(2, 1),
        dilation=space_n_time_m(dilations[1], 1))
    self.layer3 = self._make_layer(
        self.BLOCK,
        self.PLANES[2],
        self.LAYERS[2],
        stride=space_n_time_m(2, 1),
        dilation=space_n_time_m(dilations[2], 1))
    self.layer4 = self._make_layer(
        self.BLOCK,
        self.PLANES[3],
        self.LAYERS[3],
        stride=space_n_time_m(2, 1),
        dilation=space_n_time_m(dilations[3], 1))

    if self.NETWORK_TYPE == NetworkType.CLASSIFICATION:
      self.glob_avg = ME.MinkowskiGlobalPooling(dimension=D)
      if self.HAS_LAST_BLOCK:
        self.final1 = nn.Linear(self.inplanes, self.inplanes, bias=False)
        self.bnfinal1 = nn.BatchNorm1d(self.inplanes)

        self.final2 = nn.Linear(self.inplanes, self.inplanes, bias=False)
        self.bnfinal2 = nn.BatchNorm1d(self.inplanes)

      self.final = nn.Linear(self.inplanes, out_channels, bias=True)
    else:
      self.final = conv(
          self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
Ejemplo n.º 14
0
 def __init__(self,
              dims,
              use_bn=False,
              use_relu=False,
              use_dropout=False,
              use_bias=True):
     super().__init__()
     layers = []
     last_dim = dims[0]
     counter = 1
     for dim in dims[1:]:
         layers.append(ME.MinkowskiLinear(last_dim, dim, bias=use_bias))
         counter += 1
         if use_bn:
             layers.append(
                 ME.MinkowskiBatchNorm(
                     dim,
                     eps=1e-5,
                     momentum=0.1,
                 ))
         if (counter < len(dims)) and use_relu:
             layers.append(ME.MinkowskiReLU(inplace=True))
             last_dim = dim
         if use_dropout:
             layers.append(MinkowskiDropout.Dropout())
     self.clf = nn.Sequential(*layers)
Ejemplo n.º 15
0
def post_act_block(in_channels,
                   out_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   dimension=None):
    '''

    :param in_channels:
    :param out_channels:
    :param kernel_size:
    :param stride:
    :param padding:
    :param dimension:
    :return:
    '''
    m = nn.Sequential(
        ME.MinkowskiConvolution(in_channels,
                                out_channels,
                                kernel_size,
                                padding=padding,
                                stride=stride,
                                dilation=1,
                                has_bias=False,
                                dimension=dimension),
        ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiReLU())
    return m
Ejemplo n.º 16
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 bn_momentum=0.1,
                 dimension=-1):
        super(Bottleneck, self).__init__()
        assert dimension > 0

        self.conv1 = ME.MinkowskiConvolution(inplanes,
                                             planes,
                                             kernel_size=1,
                                             dimension=dimension)
        self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)

        self.conv2 = ME.MinkowskiConvolution(planes,
                                             planes,
                                             kernel_size=3,
                                             stride=stride,
                                             dilation=dilation,
                                             dimension=dimension)
        self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)

        self.conv3 = ME.MinkowskiConvolution(planes,
                                             planes * self.expansion,
                                             kernel_size=1,
                                             dimension=dimension)
        self.norm3 = ME.MinkowskiBatchNorm(planes * self.expansion,
                                           momentum=bn_momentum)

        self.relu = ME.MinkowskiReLU(inplace=True)
        self.downsample = downsample
Ejemplo n.º 17
0
    def __init__(self,
                 use_cuda=True,
                 kernel_sizes=[3],
                 channels=[1],
                 symmetric_mode=True,
                 bn=False,
                 dimension=6):
        super(GeometricSparseNeighConsensus, self).__init__()
        self.symmetric_mode = symmetric_mode
        self.kernel_sizes = kernel_sizes
        self.channels = channels
        num_layers = len(kernel_sizes)
        nn_modules = list()
        ch_in = 1
        ch_out = channels[0]
        k_size = kernel_sizes[0]
        nn_modules.append(ME.MinkowskiReLU(inplace=True))
        nn_modules.append(
            ME.MinkowskiConvolution(1,
                                    1,
                                    kernel_size=k_size,
                                    bias=False,
                                    dimension=dimension))

        #nn_modules.append(ME.MinkowskiConvolution(8,1,kernel_size=k_size,bias=False,dimension=dimension))
        #nn_modules.append(ME.MinkowskiSigmoid())
        nn_modules.append(ME.MinkowskiSigmoid())
        self.conv = nn.Sequential(*nn_modules)

        if use_cuda:
            self.conv.cuda()
Ejemplo n.º 18
0
    def network_initialization(self, in_channels, out_channels, D):
        def space_n_time_m(n, m):
            return n if D == 3 else [n, n, n, m]

        if D == 4:
            self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)

        dilations = self.dilations
        bn_momentum = 1
        self.inplanes = self.INIT_DIM
        self.conv1 = conv(in_channels,
                          self.inplanes,
                          kernel_size=space_n_time_m(self.conv1_kernel_size,
                                                     1),
                          stride=1,
                          D=D)

        self.bn1 = get_norm(NormType.BATCH_NORM,
                            self.inplanes,
                            D=self.D,
                            bn_momentum=bn_momentum)
        self.relu = ME.MinkowskiReLU(inplace=True)
        self.pool = sum_pool(kernel_size=space_n_time_m(2, 1),
                             stride=space_n_time_m(2, 1),
                             D=D)

        self.layer1 = self._make_layer(
            self.BLOCK,
            self.PLANES[0],
            self.LAYERS[0],
            stride=space_n_time_m(2, 1),
            dilation=space_n_time_m(dilations[0], 1),
        )
        self.layer2 = self._make_layer(
            self.BLOCK,
            self.PLANES[1],
            self.LAYERS[1],
            stride=space_n_time_m(2, 1),
            dilation=space_n_time_m(dilations[1], 1),
        )
        self.layer3 = self._make_layer(
            self.BLOCK,
            self.PLANES[2],
            self.LAYERS[2],
            stride=space_n_time_m(2, 1),
            dilation=space_n_time_m(dilations[2], 1),
        )
        self.layer4 = self._make_layer(
            self.BLOCK,
            self.PLANES[3],
            self.LAYERS[3],
            stride=space_n_time_m(2, 1),
            dilation=space_n_time_m(dilations[3], 1),
        )

        self.final = conv(self.PLANES[3] * self.BLOCK.expansion,
                          out_channels,
                          kernel_size=1,
                          bias=True,
                          D=D)
Ejemplo n.º 19
0
    def _make_layer(self, inplanes, planes, num_blocks, idx, stride=1):
        print("NUM BLOCKS:", num_blocks, "STRIDE:", stride)
        if self._use_norm:
            BatchNorm2d = change_default_args(
                eps=1e-3, momentum=0.01)(ME.MinkowskiBatchNorm)
            Conv2d = change_default_args(bias=False, dimension=2)(ME.MinkowskiConvolution)
            SubMConv2d = change_default_args(bias=False, dimension=2)(ME.MinkowskiConvolution)
            ConvTranspose2d = change_default_args(bias=False, dimension=2)(
                ME.MinkowskiConvolutionTranspose)
        else:
            BatchNorm2d = Empty
            Conv2d = change_default_args(bias=True, dimension=2)(ME.MinkowskiConvolution)
            SubMConv2d = change_default_args(bias=True, dimension=2)(ME.MinkowskiConvolution)
            ConvTranspose2d = change_default_args(bias=True, dimension=2)(
                ME.MinkowskiConvolutionTranspose)
        ReLU = ME.MinkowskiReLU()

        block = Sequential(    
            # PrintLayer(0),   
            Conv2d(inplanes, planes, 2, stride=stride),
            BatchNorm2d(planes),
            ReLU,
            # PrintLayer(1),
        )
        for j in range(num_blocks):
            block.add(SubMConv2d(planes, planes, 3))
            block.add(BatchNorm2d(planes)),
            block.add(ReLU)
            # block.add(PrintLayer(2 + j))

        return block, planes
Ejemplo n.º 20
0
    def __init__(self, channels, block_layers, block):
        nn.Module.__init__(self)
        in_nchannels = 1
        ch = [16, 32, 64, 32, channels]
        if block == 'ResNet':
            self.block = ResNet
        elif block == 'InceptionResNet':
            self.block = InceptionResNet

        self.conv0 = ME.MinkowskiConvolution(in_channels=in_nchannels,
                                             out_channels=ch[0],
                                             kernel_size=3,
                                             stride=1,
                                             bias=True,
                                             dimension=3)
        self.down0 = ME.MinkowskiConvolution(in_channels=ch[0],
                                             out_channels=ch[1],
                                             kernel_size=2,
                                             stride=2,
                                             bias=True,
                                             dimension=3)
        self.block0 = self.make_layer(self.block, block_layers, ch[1])

        self.conv1 = ME.MinkowskiConvolution(in_channels=ch[1],
                                             out_channels=ch[1],
                                             kernel_size=3,
                                             stride=1,
                                             bias=True,
                                             dimension=3)
        self.down1 = ME.MinkowskiConvolution(in_channels=ch[1],
                                             out_channels=ch[2],
                                             kernel_size=2,
                                             stride=2,
                                             bias=True,
                                             dimension=3)
        self.block1 = self.make_layer(self.block, block_layers, ch[2])

        self.conv2 = ME.MinkowskiConvolution(in_channels=ch[2],
                                             out_channels=ch[2],
                                             kernel_size=3,
                                             stride=1,
                                             bias=True,
                                             dimension=3)
        self.down2 = ME.MinkowskiConvolution(in_channels=ch[2],
                                             out_channels=ch[3],
                                             kernel_size=2,
                                             stride=2,
                                             bias=True,
                                             dimension=3)
        self.block2 = self.make_layer(self.block, block_layers, ch[3])

        self.conv3 = ME.MinkowskiConvolution(in_channels=ch[3],
                                             out_channels=ch[4],
                                             kernel_size=3,
                                             stride=1,
                                             bias=True,
                                             dimension=3)

        self.relu = ME.MinkowskiReLU(inplace=True)
Ejemplo n.º 21
0
def get_nonlinearity(non_type):
  if non_type == 'ReLU':
    return ME.MinkowskiReLU()
  elif non_type == 'ELU':
    # return ME.MinkowskiInstanceNorm(num_feats, dimension=dimension)
    return ME.MinkowskiELU()
  else:
    raise ValueError(f'Type {non_type}, not defined')
Ejemplo n.º 22
0
 def __init__(self, channel, reduction=16, D=-1):
   # Global coords does not require coords_key
   super(SELayer, self).__init__()
   self.fc = nn.Sequential(
       ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True),
       ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid())
   self.pooling = ME.MinkowskiGlobalPooling(dimension=D)
   self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D)
Ejemplo n.º 23
0
 def __init__(self, down_channels, skip_channels, out_channels, D=3):
     super().__init__()
     self.deconv = nn.Sequential(
         ME.MinkowskiConvolutionTranspose(down_channels,
                                          down_channels,
                                          kernel_size=3,
                                          stride=2,
                                          dimension=D),
         ME.MinkowskiReLU(inplace=True),
     )
     self.conv = nn.Sequential(
         ME.MinkowskiConvolution(skip_channels + down_channels,
                                 out_channels,
                                 kernel_size=3,
                                 dimension=D),
         ME.MinkowskiReLU(inplace=True),
     )
Ejemplo n.º 24
0
 def __init__(self, in_feat, out_feat, D):
     super(ExampleNetwork, self).__init__(D)
     self.net = nn.Sequential(
         ME.MinkowskiConvolution(in_channels=in_feat,
                                 out_channels=64,
                                 kernel_size=3,
                                 stride=2,
                                 dilation=1,
                                 has_bias=False,
                                 dimension=D), ME.MinkowskiBatchNorm(64),
         ME.MinkowskiReLU(),
         ME.MinkowskiConvolution(in_channels=64,
                                 out_channels=128,
                                 kernel_size=3,
                                 stride=2,
                                 dimension=D), ME.MinkowskiBatchNorm(128),
         ME.MinkowskiReLU(), ME.MinkowskiGlobalPooling(dimension=D),
         ME.MinkowskiLinear(128, out_feat))
Ejemplo n.º 25
0
Archivo: unet.py Proyecto: jgwak/GSDN
    def __init__(self, in_feats, out_feats, D):
        super(UNBlocks, self).__init__()

        self.convs, self.bns = {}, {}
        self.relu = ME.MinkowskiReLU(inplace=True)
        self.conv1 = conv(in_feats, out_feats, kernel_size=3, bias=False, D=D)
        self.bn1 = ME.MinkowskiBatchNorm(out_feats)
        self.conv2 = conv(out_feats, out_feats, kernel_size=3, bias=False, D=D)
        self.bn2 = ME.MinkowskiBatchNorm(out_feats)
Ejemplo n.º 26
0
 def __init__(self, inc, outc, ks=3, stride=1, D=3):
     super(BasicDeconvolutionBlock, self).__init__()
     self.net = nn.Sequential(
         ME.MinkowskiConvolutionTranspose(
             inc, outc, kernel_size=ks, stride=stride, dimension=D),
         ME.MinkowskiBatchNorm(outc),
         ME.MinkowskiReLU(True))
     nn.init.constant_(self.net[1].bn.weight, 1.0)
     nn.init.constant_(self.net[1].bn.bias, 0.0)
Ejemplo n.º 27
0
    def network_initialization(self, in_channels, out_channels, D):

        self.inplanes = self.INIT_DIM
        self.conv1 = nn.Sequential(
            ME.MinkowskiConvolution(in_channels,
                                    self.inplanes,
                                    kernel_size=5,
                                    stride=2,
                                    dimension=D),
            ME.MinkowskiBatchNorm(self.inplanes),
            ME.MinkowskiReLU(inplace=True),
            ME.MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D),
        )

        self.layer1 = self._make_layer(self.BLOCK,
                                       self.PLANES[0],
                                       self.LAYERS[0],
                                       stride=2)
        self.layer2 = self._make_layer(self.BLOCK,
                                       self.PLANES[1],
                                       self.LAYERS[1],
                                       stride=2)
        self.layer3 = self._make_layer(self.BLOCK,
                                       self.PLANES[2],
                                       self.LAYERS[2],
                                       stride=2)
        self.layer4 = self._make_layer(self.BLOCK,
                                       self.PLANES[3],
                                       self.LAYERS[3],
                                       stride=2)

        self.conv5 = nn.Sequential(
            ME.MinkowskiConvolution(self.inplanes,
                                    self.inplanes,
                                    kernel_size=3,
                                    stride=3,
                                    dimension=D),
            ME.MinkowskiBatchNorm(self.inplanes),
            ME.MinkowskiReLU(inplace=True),
        )

        self.glob_avg = ME.MinkowskiGlobalMaxPooling()

        self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
Ejemplo n.º 28
0
    def __init__(self,
                 use_cuda=True,
                 kernel_sizes=[3, 3, 3],
                 channels=[10, 10, 1],
                 symmetric_mode=True,
                 bn=False):
        super(SparseNeighConsensus, self).__init__()
        self.symmetric_mode = symmetric_mode
        self.kernel_sizes = kernel_sizes
        self.channels = channels
        num_layers = len(kernel_sizes)
        nn_modules = list()
        for i in range(num_layers):
            if i == 0:
                nn_modules.append(ME.MinkowskiReLU(inplace=True))
                ch_in = 1
            else:
                ch_in = channels[i - 1]
            ch_out = channels[i]
            k_size = kernel_sizes[i]
            if ch_out == 1 or bn == False:
                nn_modules.append(
                    ME.MinkowskiConvolution(ch_in,
                                            ch_out,
                                            kernel_size=k_size,
                                            has_bias=True,
                                            dimension=4))
            elif bn == True:
                nn_modules.append(
                    torch.nn.Sequential(
                        ME.MinkowskiConvolution(ch_in,
                                                ch_out,
                                                kernel_size=k_size,
                                                has_bias=True,
                                                dimension=4),
                        ME.MinkowskiBatchNorm(ch_out)))
            nn_modules.append(ME.MinkowskiReLU(inplace=True))
        self.conv = nn.Sequential(*nn_modules)
        #        self.add = ME.MinkowskiUnion()

        if use_cuda:
            self.conv.cuda()
Ejemplo n.º 29
0
 def _make_deblock(self, num_out_filters, idx):
     print("CUSTOM MAKE DEBLOCK")
     stride = self._upsample_strides[idx - self._upsample_start_idx]
     if self._use_norm:
         if self._use_groupnorm:
             SparseBatchNorm2d = change_default_args(
                 num_groups=self._num_groups, eps=1e-3)(GroupNorm)
             DenseBatchNorm2d = change_default_args(
                 num_groups=self._num_groups, eps=1e-3)(GroupNorm)
         else:
             SparseBatchNorm2d = change_default_args(
             eps=1e-3, momentum=0.01)(ME.MinkowskiBatchNorm)
             DenseBatchNorm2d = change_default_args(
                 eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
         SparseConvTranspose2d = change_default_args(bias=False, dimension=2)(
             ME.MinkowskiConvolutionTranspose)
         DenseConvTranspose2d = change_default_args(bias=False)(
             nn.ConvTranspose2d)
     else:
         SparseBatchNorm2d = Empty
         DenseBatchNorm2d = Empty
         SparseConvTranspose2d = change_default_args(bias=True, dimension=2)(
             ME.MinkowskiConvolutionTranspose)
         DenseConvTranspose2d = change_default_args(bias=True)(
             nn.ConvTranspose2d)
     ReLU = ME.MinkowskiReLU()
     stride = np.round(stride).astype(np.int64)
     if (idx <= LAST_SPARSE_IDX):
         deblock = Sequential(
             SparseConvTranspose2d(
                 num_out_filters,
                 self._num_upsample_filters[idx - self._upsample_start_idx],
                 stride,
                 stride=stride),
             SparseBatchNorm2d(
                 self._num_upsample_filters[idx -
                                         self._upsample_start_idx]),
             ReLU,
             ME.ToDense()
         )
     else:
         stride = np.round(stride).astype(np.int64)
         deblock = Sequential(
             DenseConvTranspose2d(
                 num_out_filters,
                 self._num_upsample_filters[idx - self._upsample_start_idx],
                 stride,
                 stride=stride),
             DenseBatchNorm2d(
                 self._num_upsample_filters[idx -
                                         self._upsample_start_idx]),
             ReLU,
         )
     return deblock
 def get_conv_block(self, in_channel, out_channel, kernel_size, stride):
     return nn.Sequential(
         ME.MinkowskiConvolution(
             in_channel,
             out_channel,
             kernel_size=kernel_size,
             stride=stride,
             dimension=self.D,
         ),
         ME.MinkowskiBatchNorm(out_channel),
         ME.MinkowskiReLU(),
     )