def __init__(self): super(PointNetFeature, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.stn = STN3d(D=3) self.block1 = nn.Sequential( ME.MinkowskiConvolution(6, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[0]), ME.MinkowskiReLU()) self.block2 = nn.Sequential( ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[1]), ME.MinkowskiReLU()) self.block3 = nn.Sequential( ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[2]), ME.MinkowskiReLU()) self.avgpool = ME.MinkowskiGlobalPooling() self.concat = ME.MinkowskiBroadcastConcatenation()
def __init__(self): super(PointNetFeature, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.stn = STN3d(D=3) self.conv1 = ME.MinkowskiConvolution(6, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3) self.conv2 = ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3) self.conv3 = ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3) self.bn1 = ME.MinkowskiInstanceNorm(c[0], dimension=3) self.bn2 = ME.MinkowskiInstanceNorm(c[1], dimension=3) self.bn3 = ME.MinkowskiInstanceNorm(c[2], dimension=3) self.relu = ME.MinkowskiReLU(inplace=True) self.avgpool = ME.MinkowskiGlobalPooling() self.concat = ME.MinkowskiBroadcastConcatenation()
def __init__(self, D=3): super(STN3d, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.block1 = nn.Sequential( ME.MinkowskiConvolution(3, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[0]), ME.MinkowskiReLU()) self.block2 = nn.Sequential( ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[1]), ME.MinkowskiReLU()) self.block3 = nn.Sequential( ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[2]), ME.MinkowskiReLU()) # Use the kernelsize 1 convolution for linear layers. If kernel size == # 1, minkowski engine internally uses a linear function. self.block4 = nn.Sequential( ME.MinkowskiConvolution(c[2], c[3], kernel_size=1, has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[3]), ME.MinkowskiReLU()) self.block5 = nn.Sequential( ME.MinkowskiConvolution(c[3], c[4], kernel_size=1, has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[4]), ME.MinkowskiReLU()) self.fc6 = ME.MinkowskiConvolution(c[4], 9, kernel_size=1, has_bias=True, dimension=3) self.avgpool = ME.MinkowskiGlobalPooling() self.broadcast = ME.MinkowskiBroadcast()
def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if norm_type == NormType.BATCH_NORM: return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif norm_type == NormType.INSTANCE_NORM: return ME.MinkowskiInstanceNorm(n_channels) elif norm_type == NormType.INSTANCE_BATCH_NORM: return nn.Sequential( ME.MinkowskiInstanceNorm(n_channels), ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)) else: raise ValueError(f'Norm type: {norm_type} not supported')
def get_norm(norm_type, num_feats, bn_momentum=0.05, dimension=-1): if norm_type == 'BN': return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum) elif norm_type == 'IN': return ME.MinkowskiInstanceNorm(num_feats) elif norm_type == 'INBN': return nn.Sequential( ME.MinkowskiInstanceNorm(num_feats), ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum)) else: raise ValueError(f'Type {norm_type}, not defined')
def __init__(self, D=3): super(STN3d, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.conv1 = ME.MinkowskiConvolution(3, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3) self.conv2 = ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3) self.conv3 = ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3) # Use the kernelsize 1 convolution for linear layers. If kernel size == # 1, minkowski engine internally uses a linear function. self.fc4 = ME.MinkowskiConvolution(c[2], c[3], kernel_size=1, has_bias=False, dimension=3) self.fc5 = ME.MinkowskiConvolution(c[3], c[4], kernel_size=1, has_bias=False, dimension=3) self.fc6 = ME.MinkowskiConvolution(c[4], 9, kernel_size=1, has_bias=True, dimension=3) self.relu = ME.MinkowskiReLU(inplace=True) self.avgpool = ME.MinkowskiGlobalPooling() self.broadcast = ME.MinkowskiBroadcast() self.bn1 = ME.MinkowskiInstanceNorm(c[0], dimension=3) self.bn2 = ME.MinkowskiInstanceNorm(c[1], dimension=3) self.bn3 = ME.MinkowskiInstanceNorm(c[2], dimension=3) self.bn4 = ME.MinkowskiInstanceNorm(c[3], dimension=3) self.bn5 = ME.MinkowskiInstanceNorm(c[4], dimension=3)
def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if norm_type == NormType.BATCH_NORM: return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif norm_type == NormType.SPARSE_INSTANCE_NORM: return ME.MinkowskiInstanceNorm(n_channels, dimension=D) else: raise ValueError(f'Norm type: {norm_type} not supported')
def network_initialization(self, in_channels, config, D): self.conv1 = ME.MinkowskiConvolution( in_channels, config.proposal_feat_size, kernel_size=1, dimension=3) self.bn1 = ME.MinkowskiInstanceNorm(config.proposal_feat_size) self.conv2 = ME.MinkowskiConvolution( config.proposal_feat_size, config.proposal_feat_size, kernel_size=1, dimension=3) self.bn2 = ME.MinkowskiInstanceNorm(config.proposal_feat_size) self.final_class_logits = ME.MinkowskiConvolution( config.proposal_feat_size, self.out_channels * 2, kernel_size=1, dimension=3, has_bias=True) self.final_bbox = ME.MinkowskiConvolution( config.proposal_feat_size, self.out_channels * 6, kernel_size=1, dimension=3, has_bias=True) self.elu = ME.MinkowskiELU() self.softmax = ME.MinkowskiSoftmax() if self.is_rotation_bbox: self.final_rotation = ME.MinkowskiConvolution( config.proposal_feat_size, self.out_channels * self.rotation_criterion.NUM_OUTPUT, kernel_size=1, dimension=3, has_bias=True)
def network_initialization(self, in_channels, out_channels, D): self.inplanes = self.INIT_DIM self.conv1 = nn.Sequential( ME.MinkowskiConvolution(in_channels, self.inplanes, kernel_size=3, stride=2, dimension=D), ME.MinkowskiInstanceNorm(self.inplanes), ME.MinkowskiReLU(inplace=True), ME.MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D), ) self.layer1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2) self.layer2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2) self.layer3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2) self.layer4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2) self.conv5 = nn.Sequential( ME.MinkowskiDropout(), ME.MinkowskiConvolution(self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D), ME.MinkowskiInstanceNorm(self.inplanes), ME.MinkowskiGELU(), ) self.glob_pool = ME.MinkowskiGlobalMaxPooling() self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def get_norm_layer(norm_type, num_feats, bn_momentum=0.05, D=-1): if norm_type == 'BN': return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum) elif norm_type == 'IN': return ME.MinkowskiInstanceNorm(num_feats) else: raise ValueError(f'Type {norm_type}, not defined')
def __init__(self, out_channels, D=3): super(PointNet, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.feat = PointNetFeature() self.block1 = nn.Sequential( ME.MinkowskiConvolution(1280, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[0]), ME.MinkowskiReLU()) self.block2 = nn.Sequential( ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[1]), ME.MinkowskiReLU()) self.block3 = nn.Sequential( ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[2]), ME.MinkowskiReLU()) # Last FC layer. Note that kernel_size 1 == linear layer self.conv4 = ME.MinkowskiConvolution(c[2], out_channels, kernel_size=1, has_bias=True, dimension=3)
def __init__(self, in_features, out_features, dimension=3, leakiness=0.0): super(AtrousIIBlock, self).__init__() assert dimension > 0 self.D = dimension if in_features != out_features: self.residual = ME.MinkowskiLinear(in_features, out_features) else: self.residual = Identity() self.conv1 = ME.MinkowskiConvolution(in_features, out_features, kernel_size=3, stride=1, dilation=1, dimension=self.D) self.norm1 = ME.MinkowskiInstanceNorm(out_features, dimension=self.D) self.conv2 = ME.MinkowskiConvolution(out_features, out_features, kernel_size=3, stride=1, dilation=3, dimension=self.D) self.norm2 = ME.MinkowskiInstanceNorm(out_features, dimension=self.D) self.leaky_relu = MinkowskiLeakyReLU(negative_slope=leakiness)
def __init__(self, out_channels, D=3): super(PointNet, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.feat = PointNetFeature() self.conv1 = ME.MinkowskiConvolution( 1280, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3) self.conv2 = ME.MinkowskiConvolution( c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3) self.conv3 = ME.MinkowskiConvolution( c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3) # Last FC layer. Note that kernel_size 1 == linear layer self.conv4 = ME.MinkowskiConvolution( c[2], out_channels, kernel_size=1, has_bias=True, dimension=3) self.bn1 = ME.MinkowskiInstanceNorm(c[0], dimension=3) self.bn2 = ME.MinkowskiInstanceNorm(c[1], dimension=3) self.bn3 = ME.MinkowskiInstanceNorm(c[2], dimension=3) self.relu = ME.MinkowskiReLU(inplace=True)