def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=4): super(ResidualBlock4d, self).__init__() self.net = nn.Sequential( ME.MinkowskiConvolution( inc, outc, dimension=D, kernel_generator=ME.KernelGenerator( kernel_size=ks, dimension=D, region_type=ME.RegionType.HYBRID, axis_types=(ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCROSS))), ME.MinkowskiBatchNorm(outc), ME.MinkowskiReLU(True), ME.MinkowskiConvolution( outc, outc, dimension=D, kernel_generator=ME.KernelGenerator( kernel_size=ks, dimension=D, region_type=ME.RegionType.HYBRID, axis_types=(ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCROSS))), ME.MinkowskiBatchNorm(outc)) nn.init.constant_(self.net[1].bn.weight, 1.0) nn.init.constant_(self.net[1].bn.bias, 0.0) nn.init.constant_(self.net[4].bn.weight, 1.0) nn.init.constant_(self.net[4].bn.bias, 0.0) self.downsample = nn.Sequential() if (inc == outc and stride == 1) else nn.Sequential( ME.MinkowskiConvolution( inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc)) if len(self.downsample) > 0: nn.init.constant_(self.downsample[1].bn.weight, 1.0) nn.init.constant_(self.downsample[1].bn.bias, 0.0) self.relu = ME.MinkowskiReLU(True)
def __init__(self): super(PointNetFeature, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.stn = STN3d(D=3) self.conv1 = ME.MinkowskiConvolution(6, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3) self.conv2 = ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3) self.conv3 = ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3) self.bn1 = ME.MinkowskiInstanceNorm(c[0], dimension=3) self.bn2 = ME.MinkowskiInstanceNorm(c[1], dimension=3) self.bn3 = ME.MinkowskiInstanceNorm(c[2], dimension=3) self.relu = ME.MinkowskiReLU(inplace=True) self.avgpool = ME.MinkowskiGlobalPooling() self.concat = ME.MinkowskiBroadcastConcatenation()
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, dimension=-1): super(Bottleneck, self).__init__() assert dimension > 0 self.conv1 = ME.MinkowskiConvolution(inplanes, planes, kernel_size=1, dimension=dimension) self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.conv2 = ME.MinkowskiConvolution(planes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension) self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.conv3 = ME.MinkowskiConvolution(planes, planes * self.expansion, kernel_size=1, dimension=dimension) self.norm3 = ME.MinkowskiBatchNorm(planes * self.expansion, momentum=bn_momentum) self.relu = ME.MinkowskiReLU(inplace=True) self.downsample = downsample
def __init__(self): super(PointNetFeature, self).__init__() k = self.KERNEL_SIZES s = self.STRIDES c = self.CONV_CHANNELS self.stn = STN3d(D=3) self.block1 = nn.Sequential( ME.MinkowskiConvolution(6, c[0], kernel_size=k[0], stride=s[0], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[0]), ME.MinkowskiReLU()) self.block2 = nn.Sequential( ME.MinkowskiConvolution(c[0], c[1], kernel_size=k[1], stride=s[1], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[1]), ME.MinkowskiReLU()) self.block3 = nn.Sequential( ME.MinkowskiConvolution(c[1], c[2], kernel_size=k[2], stride=s[2], has_bias=False, dimension=3), ME.MinkowskiInstanceNorm(c[2]), ME.MinkowskiReLU()) self.avgpool = ME.MinkowskiGlobalPooling() self.concat = ME.MinkowskiBroadcastConcatenation()
def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=3): super(ResidualBlock, self).__init__() self.net = nn.Sequential( ME.MinkowskiConvolution( inc, outc, kernel_size=ks, dilation=dilation, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc), ME.MinkowskiReLU(True), ME.MinkowskiConvolution( outc, outc, kernel_size=ks, dilation=dilation, stride=1, dimension=D), ME.MinkowskiBatchNorm(outc)) nn.init.constant_(self.net[1].bn.weight, 1.0) nn.init.constant_(self.net[1].bn.bias, 0.0) nn.init.constant_(self.net[4].bn.weight, 1.0) nn.init.constant_(self.net[4].bn.bias, 0.0) self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \ nn.Sequential( ME.MinkowskiConvolution( inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc)) if len(self.downsample) > 0: nn.init.constant_(self.downsample[1].bn.weight, 1.0) nn.init.constant_(self.downsample[1].bn.bias, 0.0) self.relu = ME.MinkowskiReLU(True)
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = ME.MinkowskiConvolution(inplanes, planes, kernel_size=3, stride=stride, dimension=D) self.norm1 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D) self.conv2 = ME.MinkowskiConvolution(planes, planes, kernel_size=3, stride=1, dilation=dilation, has_bias=False, dimension=D) self.norm2 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D) self.downsample = downsample
def network_initialization(self, in_channels, out_channels, D): self.inplanes = self.init_dim self.conv1 = ME.MinkowskiConvolution( in_channels, self.inplanes, kernel_size=5, stride=2, dimension=D) self.bn1 = ME.MinkowskiBatchNorm(self.inplanes) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = ME.MinkowskiAvgPooling(kernel_size=2, stride=2, dimension=D) self.layer1 = self._make_layer( self.block, self.planes[0], self.layers[0], stride=2) self.layer2 = self._make_layer( self.block, self.planes[1], self.layers[1], stride=2) self.layer3 = self._make_layer( self.block, self.planes[2], self.layers[2], stride=2) self.layer4 = self._make_layer( self.block, self.planes[3], self.layers[3], stride=2) self.conv5 = ME.MinkowskiConvolution( self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D) self.bn5 = ME.MinkowskiBatchNorm(self.inplanes) self.glob_avg = ME.MinkowskiGlobalMaxPooling() self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, leakiness=0, dimension=-1): super(BasicBlock, self).__init__() assert dimension > 0 self.conv1 = ME.MinkowskiConvolution(inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension) self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.conv2 = ME.MinkowskiConvolution(planes, planes, kernel_size=3, stride=1, dilation=dilation, dimension=dimension) self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.relu = ME.MinkowskiLeakyReLU(negative_slope=leakiness) self.downsample = downsample
def __init__(self, in_channels=64, out_channels=128, bn_momentum=0.1, norm_type='IN', D=3): ME.MinkowskiNetwork.__init__(self, D) NORM_TYPE = norm_type self.seg_head_1 = ME.MinkowskiConvolution( in_channels=in_channels, out_channels=in_channels, kernel_size=1, stride=1, dilation=1, bias=True, dimension=D) self.norm_1 = get_norm_layer(NORM_TYPE, in_channels, bn_momentum=bn_momentum, D=D) self.seg_head_2 = ME.MinkowskiConvolution( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, dilation=1, bias=True, dimension=D)
def __init__(self, in_features, out_features, stride=1, dilation=1, downsample=None, bn_momentum=0.1, leakiness=0.0, dimension=-1): super(ResNetBlock, self).__init__() assert dimension > 0 if in_features != out_features: self.residual = ME.MinkowskiLinear(in_features, out_features) else: self.residual = Identity() self.conv1 = ME.MinkowskiConvolution(in_features, out_features, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension) self.norm1 = ME.MinkowskiBatchNorm(out_features, momentum=bn_momentum) self.conv2 = ME.MinkowskiConvolution(out_features, out_features, kernel_size=3, stride=1, dilation=dilation, dimension=dimension) self.norm2 = ME.MinkowskiBatchNorm(out_features, momentum=bn_momentum) self.leaky_relu = MinkowskiLeakyReLU(negative_slope=leakiness)
def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(SimpleNet, self).__init__(in_channels, out_channels, config, D) kernel_size = 3 self.conv1 = ME.MinkowskiConvolution( in_channels=in_channels, out_channels=64, pixel_dist=1, kernel_size=kernel_size, stride=2, dilation=1, has_bias=False, dimension=D) self.bn1 = nn.BatchNorm1d(64) self.conv2 = ME.MinkowskiConvolution( in_channels=64, out_channels=128, pixel_dist=2, kernel_size=kernel_size, stride=2, dilation=1, has_bias=False, dimension=D) self.bn2 = nn.BatchNorm1d(128) self.conv3 = ME.MinkowskiConvolution( in_channels=128, out_channels=128, pixel_dist=4, kernel_size=kernel_size, stride=1, dilation=1, has_bias=False, dimension=D) self.bn3 = nn.BatchNorm1d(128) self.conv4 = ME.MinkowskiConvolution( in_channels=128, out_channels=128, pixel_dist=4, kernel_size=kernel_size, stride=1, dilation=1, has_bias=False, dimension=D) self.bn4 = nn.BatchNorm1d(128) self.conv5 = ME.MinkowskiConvolution( in_channels=128, out_channels=out_channels, pixel_dist=4, kernel_size=kernel_size, stride=1, dilation=1, has_bias=False, dimension=D) self.bn5 = nn.BatchNorm1d(out_channels) self.relu = nn.ReLU(inplace=True)
def __init__(self, flow_dim = 3, flow_channels = 64, out_channels=3, bn_momentum=0.1, conv1_kernel_size=5, D=3): ME.MinkowskiNetwork.__init__(self, D) NORM_TYPE = self.NORM_TYPE BLOCK_NORM_TYPE = self.BLOCK_NORM_TYPE self.conv1 = ME.MinkowskiConvolution( in_channels=flow_dim, out_channels=flow_channels, kernel_size=conv1_kernel_size, stride=1, dilation=1, bias=False, dimension=D) self.conv2 = ME.MinkowskiConvolution( in_channels=flow_channels, out_channels=flow_channels, kernel_size=3, stride=1, dilation=1, bias=False, dimension=D) self.conv3 = ME.MinkowskiConvolution( in_channels=flow_channels, out_channels=flow_channels, kernel_size=3, stride=1, dilation=1, bias=False, dimension=D) self.conv4 = ME.MinkowskiConvolution( in_channels=flow_channels, out_channels=flow_channels, kernel_size=3, stride=1, dilation=1, bias=False, dimension=D) self.final = ME.MinkowskiConvolution( in_channels=flow_channels, out_channels=out_channels, kernel_size=1, stride=1, dilation=1, bias=False, dimension=D)
def test_sum(self): coords, colors, pcd = load_file("1.ply") device = "cuda" D = 3 batch_size = 16 voxel_size = 0.02 channels = [3, 64, 128] dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int() bcoords = batched_coordinates([dcoords for i in range(batch_size)]) in_feats = torch.rand(len(bcoords), 3).to(0) layer = MinkowskiStackSum( ME.MinkowskiConvolution( channels[0], channels[1], kernel_size=3, stride=1, dimension=3, ), nn.Sequential( ME.MinkowskiConvolution( channels[0], channels[1], kernel_size=3, stride=2, dimension=3, ), ME.MinkowskiStackSum( nn.Identity(), nn.Sequential( ME.MinkowskiConvolution( channels[1], channels[2], kernel_size=3, stride=2, dimension=3, ), ME.MinkowskiConvolutionTranspose( channels[2], channels[1], kernel_size=3, stride=1, dimension=3, ), ME.MinkowskiPoolingTranspose( kernel_size=2, stride=2, dimension=D ), ), ), ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D), ), ).cuda() for i in range(1000): torch.cuda.empty_cache() sinput = ME.SparseTensor(in_feats, coordinates=bcoords, device=device) layer(sinput)
def __init__(self, in_nchannel, out_nchannel, D): super(UNet, self).__init__(D) self.block1 = torch.nn.Sequential( ME.MinkowskiConvolution( in_channels=in_nchannel, out_channels=8, kernel_size=3, stride=1, dimension=D), ME.MinkowskiBatchNorm(8)) self.block2 = torch.nn.Sequential( ME.MinkowskiConvolution( in_channels=8, out_channels=16, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(16), ) self.block3 = torch.nn.Sequential( ME.MinkowskiConvolution( in_channels=16, out_channels=32, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(32)) self.block3_tr = torch.nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels=32, out_channels=16, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(16)) self.block2_tr = torch.nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels=32, out_channels=16, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(16)) self.conv1_tr = ME.MinkowskiConvolution( in_channels=24, out_channels=out_nchannel, kernel_size=1, stride=1, dimension=D)
def network_initialization(self, in_channels, config, D): up_kernel_size = 3 self.conv_up1 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[0], in_channels[0], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[0]), ME.MinkowskiELU()) self.conv_up2 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[1], in_channels[0], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[0]), ME.MinkowskiELU()) self.conv_up3 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[2], in_channels[1], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[1]), ME.MinkowskiELU()) self.conv_up4 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[3], in_channels[2], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[2]), ME.MinkowskiELU()) self.conv_feat1 = nn.Sequential( ME.MinkowskiConvolution( in_channels[0], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU()) self.conv_feat2 = nn.Sequential( ME.MinkowskiConvolution( in_channels[1], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU()) self.conv_feat3 = nn.Sequential( ME.MinkowskiConvolution( in_channels[2], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU()) self.conv_feat4 = nn.Sequential( ME.MinkowskiConvolution( in_channels[3], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU())
def __init__(self, in_channels, out_channels, D=3): nn.Module.__init__(self) self.net = nn.Sequential( ME.MinkowskiConvolution(in_channels, 32, 3, dimension=D), ME.MinkowskiBatchNorm(32), ME.MinkowskiReLU(), ME.MinkowskiConvolution(32, 64, 3, stride=2, dimension=D), ME.MinkowskiBatchNorm(64), ME.MinkowskiReLU(), ME.MinkowskiConvolutionTranspose(64, 32, 3, stride=2, dimension=D), ME.MinkowskiBatchNorm(32), ME.MinkowskiReLU(), ME.MinkowskiConvolution(32, out_channels, kernel_size=1, dimension=D), )
def __init__(self, in_nchannel, out_nchannel, D): ME.MinkowskiNetwork.__init__(self, D) channels = [in_nchannel, 16, 32] self.net = nn.Sequential( ME.MinkowskiStackSum( ME.MinkowskiConvolution( channels[0], channels[1], kernel_size=3, stride=1, dimension=D, ), nn.Sequential( ME.MinkowskiConvolution( channels[0], channels[1], kernel_size=3, stride=2, dimension=D, ), ME.MinkowskiStackSum( nn.Identity(), nn.Sequential( ME.MinkowskiConvolution( channels[1], channels[2], kernel_size=3, stride=2, dimension=D, ), ME.MinkowskiConvolutionTranspose( channels[2], channels[1], kernel_size=3, stride=1, dimension=D, ), ME.MinkowskiPoolingTranspose( kernel_size=2, stride=2, dimension=D ), ), ), ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D), ), ), ME.MinkowskiToFeature(), nn.Linear(channels[1], out_nchannel, bias=True), )
def network_initialization(self, in_channels, out_channels, D): self.inplanes = self.INIT_DIM self.conv1 = ME.MinkowskiConvolution( in_channels, self.inplanes, kernel_size=5, stride=1, dimension=D) self.bn1 = ME.MinkowskiBatchNorm(self.inplanes) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D) self.layer1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2) self.layer2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2) self.layer3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2) self.layer4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2) self.glob_avg = ME.MinkowskiGlobalPooling(dimension=D) self.classification_block = nn.Sequential( ME.MinkowskiLinear(self.inplanes, self.inplanes, bias=False), ME.MinkowskiBatchNorm(self.inplanes), ME.MinkowskiReLU(), ME.MinkowskiLinear(self.inplanes, self.inplanes, bias=False), ME.MinkowskiBatchNorm(self.inplanes)) self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def decomposition(): coords0, feats0 = to_sparse_coo(data_batch_0) coords1, feats1 = to_sparse_coo(data_batch_1) coords, feats = ME.utils.sparse_collate(coords=[coords0, coords1], feats=[feats0, feats1]) # sparse tensors A = ME.SparseTensor(coords=coords, feats=feats) conv = ME.MinkowskiConvolution(in_channels=1, out_channels=2, kernel_size=3, stride=2, dimension=2) B = conv(A) # Extract features and coordinates per batch index list_of_coords = B.decomposed_coordinates list_of_feats = B.decomposed_features list_of_coords, list_of_feats = B.decomposed_coordinates_and_features # To specify a batch index batch_index = 1 coords = B.coordinates_at(batch_index) feats = B.features_at(batch_index) # Empty list if given an invalid batch index batch_index = 3 print(B.coordinates_at(batch_index))
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, bn_momentum=0.1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( ME.MinkowskiConvolution(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, dimension=self.D), ME.MinkowskiBatchNorm(planes * block.expansion)) layers = [] layers.append( block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, dimension=self.D)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block(self.inplanes, planes, stride=1, dilation=dilation, dimension=self.D)) return nn.Sequential(*layers)
def __init__(self, use_cuda=True, kernel_sizes=[3], channels=[1], symmetric_mode=True, bn=False, dimension=6): super(GeometricSparseNeighConsensus, self).__init__() self.symmetric_mode = symmetric_mode self.kernel_sizes = kernel_sizes self.channels = channels num_layers = len(kernel_sizes) nn_modules = list() ch_in = 1 ch_out = channels[0] k_size = kernel_sizes[0] nn_modules.append(ME.MinkowskiReLU(inplace=True)) nn_modules.append( ME.MinkowskiConvolution(1, 1, kernel_size=k_size, bias=False, dimension=dimension)) #nn_modules.append(ME.MinkowskiConvolution(8,1,kernel_size=k_size,bias=False,dimension=dimension)) #nn_modules.append(ME.MinkowskiSigmoid()) nn_modules.append(ME.MinkowskiSigmoid()) self.conv = nn.Sequential(*nn_modules) if use_cuda: self.conv.cuda()
def __init__(self, input_a_dim, input_b_dim, out_dim, kernel_size=2): super().__init__() ''' Deconv x_a concat with x_b then apply output-projection ''' self.input_a_dim = input_a_dim self.input_b_dim = input_b_dim self.out_dim = out_dim self.conv_a = nn.Sequential( ME.MinkowskiConvolutionTranspose(in_channels=input_a_dim, out_channels=input_a_dim, kernel_size=4, stride=4, dimension=3), ME.MinkowskiBatchNorm(input_a_dim), ME.MinkowskiReLU(), ) self.conv_proj = nn.Sequential( ME.MinkowskiConvolution(in_channels=input_a_dim + input_b_dim, out_channels=out_dim, kernel_size=3, stride=1, dimension=3), ME.MinkowskiBatchNorm(out_dim), ME.MinkowskiReLU(), )
def __init__(self, down_conv_nn, kernel_size, stride, dilation, dimension=3, bn_momentum=0.01, norm_type=NormType.BATCH_NORM, block_norm_type=NormType.BATCH_NORM, **kwargs): ME.MinkowskiNetwork.__init__(self, dimension) self.conv = ME.MinkowskiConvolution( in_channels=down_conv_nn[0], out_channels=down_conv_nn[1], kernel_size=kernel_size, stride=stride, dilation=dilation, has_bias=False, dimension=dimension, ) self.norm = get_norm(norm_type, down_conv_nn[1], bn_momentum=bn_momentum, D=dimension) self.block = get_block(block_norm_type, down_conv_nn[1], down_conv_nn[1], bn_momentum=bn_momentum, D=dimension)
def conv(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False, region_type=0, dimension=3): if not isinstance(region_type, ME.RegionType): if region_type == 0: region_type = ME.RegionType.HYPER_CUBE elif region_type == 1: region_type = ME.RegionType.HYPER_CROSS else: raise ValueError('Unsupported region type') kernel_generator = ME.KernelGenerator(kernel_size=kernel_size, stride=stride, dilation=dilation, region_type=region_type, dimension=dimension) return ME.MinkowskiConvolution(in_channels, out_channels, kernel_size=kernel_size, stride=stride, kernel_generator=kernel_generator, dimension=dimension)
def conv(in_planes, out_planes, kernel_size, stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type( conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator(kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolution(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, has_bias=bias, kernel_generator=kernel_generator, dimension=D)
def network_initialization(self, in_channels, out_channels, D): self.conv1 = ME.MinkowskiConvolution(in_channels, 8, kernel_size=3, dimension=D) self.conv2 = ME.MinkowskiConvolution(8, 8, kernel_size=3, dimension=D) self.pool = ME.MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D) self.conv3 = ME.MinkowskiConvolution(8, 16, kernel_size=3, dimension=D) self.conv4 = ME.MinkowskiConvolution(16, 16, kernel_size=3, dimension=D)
def post_act_block(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dimension=None): ''' :param in_channels: :param out_channels: :param kernel_size: :param stride: :param padding: :param dimension: :return: ''' m = nn.Sequential( ME.MinkowskiConvolution(in_channels, out_channels, kernel_size, padding=padding, stride=stride, dilation=1, has_bias=False, dimension=dimension), ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiReLU()) return m
def __init__(self, input_dim, out_dim, k=16): super().__init__() ''' Transition Down Layer npoint: number of input points nsample: k in kNN, default 16 in_dim: feature dimension of the input feature x (output of the PCTLayer) out_dim: feature dimension of the TDLayer ''' self.k = k self.input_dim = input_dim self.out_dim = out_dim self.mlp_convs = nn.ModuleList() self.mlp_bns = nn.ModuleList() self.mlp_convs.append(nn.Conv2d(input_dim + 3, input_dim, 1)) self.mlp_convs.append(nn.Conv2d(input_dim, out_dim, 1)) self.mlp_bns.append(nn.BatchNorm2d(input_dim)) self.mlp_bns.append(nn.BatchNorm2d(out_dim)) self.conv = ME.MinkowskiConvolution(input_dim, out_dim, kernel_size=3, stride=2, bias=False, dimension=3)
def __init__(self, down_conv_nn=[], kernel_size=3, stride=1, dilation=1, has_bias=False, activation=ME.MinkowskiReLU(inplace=True), bn_momentum=0.01, dimension=-1, **kwargs): """ Block convolution which consists of a convolution a batch norm a block operation and an activation. the block operation is usually a resnetBlock """ # instantiate convolution # instantiate batchnorm # instantiate block # activation super(SimpleBlockDown, self).__init__() self.conv = ME.MinkowskiConvolution( down_conv_nn[0], down_conv_nn[1], kernel_size=kernel_size, stride=stride, dilation=dilation, dimension=dimension, ) self.bn = ME.MinkowskiBatchNorm(down_conv_nn[1], momentum=bn_momentum) self.block = BasicBlock(down_conv_nn[1], down_conv_nn[1], bn_momentum=bn_momentum, dimension=dimension) self.activation = activation
def network_initialization(self, in_channels, config, D): self.conv1 = ME.MinkowskiConvolution( in_channels, config.proposal_feat_size, kernel_size=1, dimension=3) self.bn1 = ME.MinkowskiInstanceNorm(config.proposal_feat_size) self.conv2 = ME.MinkowskiConvolution( config.proposal_feat_size, config.proposal_feat_size, kernel_size=1, dimension=3) self.bn2 = ME.MinkowskiInstanceNorm(config.proposal_feat_size) self.final_class_logits = ME.MinkowskiConvolution( config.proposal_feat_size, self.out_channels * 2, kernel_size=1, dimension=3, has_bias=True) self.final_bbox = ME.MinkowskiConvolution( config.proposal_feat_size, self.out_channels * 6, kernel_size=1, dimension=3, has_bias=True) self.elu = ME.MinkowskiELU() self.softmax = ME.MinkowskiSoftmax() if self.is_rotation_bbox: self.final_rotation = ME.MinkowskiConvolution( config.proposal_feat_size, self.out_channels * self.rotation_criterion.NUM_OUTPUT, kernel_size=1, dimension=3, has_bias=True)