def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=4): super(ResidualBlock4d, self).__init__() self.net = nn.Sequential( ME.MinkowskiConvolution( inc, outc, dimension=D, kernel_generator=ME.KernelGenerator( kernel_size=ks, dimension=D, region_type=ME.RegionType.HYBRID, axis_types=(ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCROSS))), ME.MinkowskiBatchNorm(outc), ME.MinkowskiReLU(True), ME.MinkowskiConvolution( outc, outc, dimension=D, kernel_generator=ME.KernelGenerator( kernel_size=ks, dimension=D, region_type=ME.RegionType.HYBRID, axis_types=(ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCUBE, ME.RegionType.HYPERCROSS))), ME.MinkowskiBatchNorm(outc)) nn.init.constant_(self.net[1].bn.weight, 1.0) nn.init.constant_(self.net[1].bn.bias, 0.0) nn.init.constant_(self.net[4].bn.weight, 1.0) nn.init.constant_(self.net[4].bn.bias, 0.0) self.downsample = nn.Sequential() if (inc == outc and stride == 1) else nn.Sequential( ME.MinkowskiConvolution( inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc)) if len(self.downsample) > 0: nn.init.constant_(self.downsample[1].bn.weight, 1.0) nn.init.constant_(self.downsample[1].bn.bias, 0.0) self.relu = ME.MinkowskiReLU(True)
def network_initialization(self, in_channels, out_channels, D): self.inplanes = self.init_dim self.conv1 = ME.MinkowskiConvolution( in_channels, self.inplanes, kernel_size=5, stride=2, dimension=D) self.bn1 = ME.MinkowskiBatchNorm(self.inplanes) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = ME.MinkowskiAvgPooling(kernel_size=2, stride=2, dimension=D) self.layer1 = self._make_layer( self.block, self.planes[0], self.layers[0], stride=2) self.layer2 = self._make_layer( self.block, self.planes[1], self.layers[1], stride=2) self.layer3 = self._make_layer( self.block, self.planes[2], self.layers[2], stride=2) self.layer4 = self._make_layer( self.block, self.planes[3], self.layers[3], stride=2) self.conv5 = ME.MinkowskiConvolution( self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D) self.bn5 = ME.MinkowskiBatchNorm(self.inplanes) self.glob_avg = ME.MinkowskiGlobalMaxPooling() self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def __init__(self, in_features, out_features, stride=1, dilation=1, downsample=None, bn_momentum=0.1, leakiness=0.0, dimension=-1): super(ResNetBlock, self).__init__() assert dimension > 0 if in_features != out_features: self.residual = ME.MinkowskiLinear(in_features, out_features) else: self.residual = Identity() self.conv1 = ME.MinkowskiConvolution(in_features, out_features, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension) self.norm1 = ME.MinkowskiBatchNorm(out_features, momentum=bn_momentum) self.conv2 = ME.MinkowskiConvolution(out_features, out_features, kernel_size=3, stride=1, dilation=dilation, dimension=dimension) self.norm2 = ME.MinkowskiBatchNorm(out_features, momentum=bn_momentum) self.leaky_relu = MinkowskiLeakyReLU(negative_slope=leakiness)
def network_initialization(self, in_channels, out_channels, D): self.inplanes = self.INIT_DIM self.conv1 = ME.MinkowskiConvolution( in_channels, self.inplanes, kernel_size=5, stride=1, dimension=D) self.bn1 = ME.MinkowskiBatchNorm(self.inplanes) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D) self.layer1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2) self.layer2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2) self.layer3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2) self.layer4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2) self.glob_avg = ME.MinkowskiGlobalPooling(dimension=D) self.classification_block = nn.Sequential( ME.MinkowskiLinear(self.inplanes, self.inplanes, bias=False), ME.MinkowskiBatchNorm(self.inplanes), ME.MinkowskiReLU(), ME.MinkowskiLinear(self.inplanes, self.inplanes, bias=False), ME.MinkowskiBatchNorm(self.inplanes)) self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, leakiness=0, dimension=-1): super(BasicBlock, self).__init__() assert dimension > 0 self.conv1 = ME.MinkowskiConvolution(inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension) self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.conv2 = ME.MinkowskiConvolution(planes, planes, kernel_size=3, stride=1, dilation=dilation, dimension=dimension) self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.relu = ME.MinkowskiLeakyReLU(negative_slope=leakiness) self.downsample = downsample
def __init__(self, input_a_dim, input_b_dim, out_dim, kernel_size=2): super().__init__() ''' Deconv x_a concat with x_b then apply output-projection ''' self.input_a_dim = input_a_dim self.input_b_dim = input_b_dim self.out_dim = out_dim self.conv_a = nn.Sequential( ME.MinkowskiConvolutionTranspose(in_channels=input_a_dim, out_channels=input_a_dim, kernel_size=4, stride=4, dimension=3), ME.MinkowskiBatchNorm(input_a_dim), ME.MinkowskiReLU(), ) self.conv_proj = nn.Sequential( ME.MinkowskiConvolution(in_channels=input_a_dim + input_b_dim, out_channels=out_dim, kernel_size=3, stride=1, dimension=3), ME.MinkowskiBatchNorm(out_dim), ME.MinkowskiReLU(), )
def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=3): super(ResidualBlock, self).__init__() self.net = nn.Sequential( ME.MinkowskiConvolution( inc, outc, kernel_size=ks, dilation=dilation, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc), ME.MinkowskiReLU(True), ME.MinkowskiConvolution( outc, outc, kernel_size=ks, dilation=dilation, stride=1, dimension=D), ME.MinkowskiBatchNorm(outc)) nn.init.constant_(self.net[1].bn.weight, 1.0) nn.init.constant_(self.net[1].bn.bias, 0.0) nn.init.constant_(self.net[4].bn.weight, 1.0) nn.init.constant_(self.net[4].bn.bias, 0.0) self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \ nn.Sequential( ME.MinkowskiConvolution( inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc)) if len(self.downsample) > 0: nn.init.constant_(self.downsample[1].bn.weight, 1.0) nn.init.constant_(self.downsample[1].bn.bias, 0.0) self.relu = ME.MinkowskiReLU(True)
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, dimension=-1): super(Bottleneck, self).__init__() assert dimension > 0 self.conv1 = ME.MinkowskiConvolution(inplanes, planes, kernel_size=1, dimension=dimension) self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.conv2 = ME.MinkowskiConvolution(planes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension) self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) self.conv3 = ME.MinkowskiConvolution(planes, planes * self.expansion, kernel_size=1, dimension=dimension) self.norm3 = ME.MinkowskiBatchNorm(planes * self.expansion, momentum=bn_momentum) self.relu = ME.MinkowskiReLU(inplace=True) self.downsample = downsample
def __init__(self, in_feats, out_feats, D): super(UNBlocks, self).__init__() self.convs, self.bns = {}, {} self.relu = ME.MinkowskiReLU(inplace=True) self.conv1 = conv(in_feats, out_feats, kernel_size=3, bias=False, D=D) self.bn1 = ME.MinkowskiBatchNorm(out_feats) self.conv2 = conv(out_feats, out_feats, kernel_size=3, bias=False, D=D) self.bn2 = ME.MinkowskiBatchNorm(out_feats)
def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if norm_type == NormType.BATCH_NORM: return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif norm_type == NormType.INSTANCE_NORM: return ME.MinkowskiInstanceNorm(n_channels) elif norm_type == NormType.INSTANCE_BATCH_NORM: return nn.Sequential( ME.MinkowskiInstanceNorm(n_channels), ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)) else: raise ValueError(f'Norm type: {norm_type} not supported')
def __init__(self, in_nchannel, out_nchannel, D): super(UNet, self).__init__(D) self.block1 = torch.nn.Sequential( ME.MinkowskiConvolution( in_channels=in_nchannel, out_channels=8, kernel_size=3, stride=1, dimension=D), ME.MinkowskiBatchNorm(8)) self.block2 = torch.nn.Sequential( ME.MinkowskiConvolution( in_channels=8, out_channels=16, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(16), ) self.block3 = torch.nn.Sequential( ME.MinkowskiConvolution( in_channels=16, out_channels=32, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(32)) self.block3_tr = torch.nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels=32, out_channels=16, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(16)) self.block2_tr = torch.nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels=32, out_channels=16, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(16)) self.conv1_tr = ME.MinkowskiConvolution( in_channels=24, out_channels=out_nchannel, kernel_size=1, stride=1, dimension=D)
def get_norm(norm_type, num_feats, bn_momentum=0.05, dimension=-1): if norm_type == 'BN': return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum) elif norm_type == 'IN': return ME.MinkowskiInstanceNorm(num_feats) elif norm_type == 'INBN': return nn.Sequential( ME.MinkowskiInstanceNorm(num_feats), ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum)) else: raise ValueError(f'Type {norm_type}, not defined')
def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(UNet2, self).__init__(in_channels, out_channels, config, D) PLANES = self.PLANES # Output of the first conv concated to conv6 self.conv1 = conv(in_channels, PLANES[0], kernel_size=3, stride=1, bias=False, D=D) self.bn1 = ME.MinkowskiBatchNorm(PLANES[0]) self.block1 = UNBlocks(PLANES[0], PLANES[0], D) self.down1 = conv(PLANES[0], PLANES[1], kernel_size=2, stride=2, D=D) self.down1bn = ME.MinkowskiBatchNorm(PLANES[1]) self.up1 = conv_tr(PLANES[1], PLANES[0], kernel_size=2, upsample_stride=2, D=D) self.block1up = UNBlocks(PLANES[0] * 2, PLANES[0], D) self.block2 = UNBlocks(PLANES[1], PLANES[1], D) self.down2 = conv(PLANES[1], PLANES[2], kernel_size=2, stride=2, D=D) self.up2 = conv_tr(PLANES[2], PLANES[1], kernel_size=2, upsample_stride=2, D=D) self.block2up = UNBlocks(PLANES[1] * 2, PLANES[1], D) self.block3 = UNBlocks(PLANES[2], PLANES[2], D) self.down3 = conv(PLANES[2], PLANES[3], kernel_size=2, stride=2, D=D) self.up3 = conv_tr(PLANES[3], PLANES[2], kernel_size=2, upsample_stride=2, D=D) self.block3up = UNBlocks(PLANES[2] * 2, PLANES[2], D) self.block4 = UNBlocks(PLANES[3], PLANES[3], D) self.relu = ME.MinkowskiReLU(inplace=True) self.final = conv(PLANES[0], out_channels, kernel_size=1, bias=True, D=D)
def __init__(self, input_nc, output_nc, convolution, dimension=3, reduction=4): self.block = (Seq().append( convolution( in_channels=input_nc, out_channels=output_nc // reduction, kernel_size=1, stride=1, dilation=1, bias=False, dimension=dimension, )).append(ME.MinkowskiBatchNorm(output_nc // reduction)).append( ME.MinkowskiReLU()).append( convolution( output_nc // reduction, output_nc // reduction, kernel_size=3, stride=1, dilation=1, bias=False, dimension=dimension, )).append(ME.MinkowskiBatchNorm( output_nc // reduction)).append(ME.MinkowskiReLU()).append( convolution( output_nc // reduction, output_nc, kernel_size=1, stride=1, dilation=1, bias=False, dimension=dimension, )).append(ME.MinkowskiBatchNorm(output_nc)).append( ME.MinkowskiReLU())) if input_nc != output_nc: self.downsample = (Seq().append( convolution( in_channels=input_nc, out_channels=output_nc, kernel_size=1, stride=1, dilation=1, bias=False, dimension=dimension, )).append(ME.MinkowskiBatchNorm(output_nc))) else: self.downsample = None
def __init__(self, in_channels, out_channels, D=3): nn.Module.__init__(self) self.net = nn.Sequential( ME.MinkowskiConvolution(in_channels, 32, 3, dimension=D), ME.MinkowskiBatchNorm(32), ME.MinkowskiReLU(), ME.MinkowskiConvolution(32, 64, 3, stride=2, dimension=D), ME.MinkowskiBatchNorm(64), ME.MinkowskiReLU(), ME.MinkowskiConvolutionTranspose(64, 32, 3, stride=2, dimension=D), ME.MinkowskiBatchNorm(32), ME.MinkowskiReLU(), ME.MinkowskiConvolution(32, out_channels, kernel_size=1, dimension=D), )
def network_initialization(self, in_channels, config, D): self.conv1 = conv(in_channels, self.mask_feat_size, kernel_size=3, stride=1, D=self.D) self.bn1 = ME.MinkowskiBatchNorm(self.mask_feat_size, momentum=self.config.bn_momentum) self.conv2 = conv( self.mask_feat_size, self.mask_feat_size, kernel_size=3, stride=1, D=self.D) self.bn2 = ME.MinkowskiBatchNorm(self.mask_feat_size, momentum=self.config.bn_momentum) self.conv3 = conv( self.mask_feat_size, self.mask_feat_size, kernel_size=3, stride=1, D=self.D) self.bn3 = ME.MinkowskiBatchNorm(self.mask_feat_size, momentum=self.config.bn_momentum) self.conv4 = conv( self.mask_feat_size, self.mask_feat_size, kernel_size=3, stride=1, D=self.D) self.bn4 = ME.MinkowskiBatchNorm(self.mask_feat_size, momentum=self.config.bn_momentum) self.final = conv(self.mask_feat_size, 1, kernel_size=1, stride=1, D=self.D) self.relu = ME.MinkowskiReLU(inplace=True)
def __init__(self, up_conv_nn=[], kernel_size=3, stride=1, dilation=1, has_bias=False, activation=ME.MinkowskiReLU(inplace=True), bn_momentum=0.01, dimension=-1, **kwargs): """ Block convolution which consists of a convolution a batch norm a block operation and an activation. the block operation is usually a resnetBlock """ # instantiate convolution # instantiate batchnorm # instantiate block # activation super(SimpleBlockUp, self).__init__() self.conv_tr = ME.MinkowskiConvolutionTranspose( up_conv_nn[0], up_conv_nn[1], kernel_size=kernel_size, stride=stride, dilation=dilation, dimension=dimension) self.bn = ME.MinkowskiBatchNorm(up_conv_nn[1], momentum=bn_momentum) self.block = BasicBlock(up_conv_nn[1], up_conv_nn[1], bn_momentum=bn_momentum, dimension=dimension) self.activation = activation
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, bn_momentum=0.1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( ME.MinkowskiConvolution(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, dimension=self.D), ME.MinkowskiBatchNorm(planes * block.expansion)) layers = [] layers.append( block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, dimension=self.D)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block(self.inplanes, planes, stride=1, dilation=dilation, dimension=self.D)) return nn.Sequential(*layers)
def post_act_block(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dimension=None): ''' :param in_channels: :param out_channels: :param kernel_size: :param stride: :param padding: :param dimension: :return: ''' m = nn.Sequential( ME.MinkowskiConvolution(in_channels, out_channels, kernel_size, padding=padding, stride=stride, dilation=1, has_bias=False, dimension=dimension), ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiReLU()) return m
def __init__(self, dims, use_bn=False, use_relu=False, use_dropout=False, use_bias=True): super().__init__() layers = [] last_dim = dims[0] counter = 1 for dim in dims[1:]: layers.append(ME.MinkowskiLinear(last_dim, dim, bias=use_bias)) counter += 1 if use_bn: layers.append( ME.MinkowskiBatchNorm( dim, eps=1e-5, momentum=0.1, )) if (counter < len(dims)) and use_relu: layers.append(ME.MinkowskiReLU(inplace=True)) last_dim = dim if use_dropout: layers.append(MinkowskiDropout.Dropout()) self.clf = nn.Sequential(*layers)
def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if norm_type == NormType.BATCH_NORM: return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif norm_type == NormType.SPARSE_INSTANCE_NORM: return ME.MinkowskiInstanceNorm(n_channels, dimension=D) else: raise ValueError(f'Norm type: {norm_type} not supported')
def network_initialization(self, in_channels, out_channels, D): self.inplanes = self.INIT_DIM self.conv1 = nn.Sequential( ME.MinkowskiConvolution(in_channels, self.inplanes, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(self.inplanes), ME.MinkowskiReLU(inplace=True), ME.MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D), ) self.layer1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2) self.layer2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2) self.layer3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2) self.layer4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2) self.conv5 = nn.Sequential( ME.MinkowskiDropout(), ME.MinkowskiConvolution(self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D), ME.MinkowskiBatchNorm(self.inplanes), ME.MinkowskiGELU(), ) self.glob_pool = ME.MinkowskiGlobalMaxPooling() self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def __init__(self, in_feat, out_feat, D): super(ExampleNetwork, self).__init__(D) self.net = nn.Sequential( ME.MinkowskiConvolution(in_channels=in_feat, out_channels=64, kernel_size=3, stride=2, dilation=1, has_bias=False, dimension=D), ME.MinkowskiBatchNorm(64), ME.MinkowskiReLU(), ME.MinkowskiConvolution(in_channels=64, out_channels=128, kernel_size=3, stride=2, dimension=D), ME.MinkowskiBatchNorm(128), ME.MinkowskiReLU(), ME.MinkowskiGlobalPooling(dimension=D), ME.MinkowskiLinear(128, out_feat))
def __init__(self, in_feat, out_feat, D): super(ExampleNetwork, self).__init__(D) self.conv1 = ME.MinkowskiConvolution(in_channels=in_feat, out_channels=64, kernel_size=3, stride=2, dilation=1, has_bias=False, dimension=D) self.bn1 = ME.MinkowskiBatchNorm(64) self.conv2 = ME.MinkowskiConvolution(in_channels=64, out_channels=128, kernel_size=3, stride=2, dimension=D) self.bn2 = ME.MinkowskiBatchNorm(128) self.pooling = ME.MinkowskiGlobalPooling(dimension=D) self.linear = ME.MinkowskiLinear(128, out_feat)
def __init__(self, inc, outc, ks=3, stride=1, D=3): super(BasicDeconvolutionBlock, self).__init__() self.net = nn.Sequential( ME.MinkowskiConvolutionTranspose( inc, outc, kernel_size=ks, stride=stride, dimension=D), ME.MinkowskiBatchNorm(outc), ME.MinkowskiReLU(True)) nn.init.constant_(self.net[1].bn.weight, 1.0) nn.init.constant_(self.net[1].bn.bias, 0.0)
def __init__(self, in_nchannel, out_nchannel, D): super(UNet, self).__init__(D) self.conv1 = ME.MinkowskiConvolution( in_channels=in_nchannel, out_channels=8, kernel_size=3, stride=1, dimension=D) self.bn1 = ME.MinkowskiBatchNorm(8) self.conv2 = ME.MinkowskiConvolution( in_channels=8, out_channels=16, kernel_size=3, stride=2, dimension=D) self.bn2 = ME.MinkowskiBatchNorm(16) self.conv3 = ME.MinkowskiConvolution( in_channels=16, out_channels=32, kernel_size=3, stride=2, dimension=D) self.bn3 = ME.MinkowskiBatchNorm(32) self.conv4 = ME.MinkowskiConvolutionTranspose( in_channels=32, out_channels=16, kernel_size=3, stride=2, dimension=D) self.bn4 = ME.MinkowskiBatchNorm(16) self.conv5 = ME.MinkowskiConvolutionTranspose( in_channels=32, out_channels=16, kernel_size=3, stride=2, dimension=D) self.bn5 = ME.MinkowskiBatchNorm(16) self.conv6 = ME.MinkowskiConvolution( in_channels=24, out_channels=out_nchannel, kernel_size=1, stride=1, dimension=D)
def network_initialization(self, in_channels, config, D): up_kernel_size = 3 self.conv_up1 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[0], in_channels[0], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[0]), ME.MinkowskiELU()) self.conv_up2 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[1], in_channels[0], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[0]), ME.MinkowskiELU()) self.conv_up3 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[2], in_channels[1], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[1]), ME.MinkowskiELU()) self.conv_up4 = nn.Sequential( ME.MinkowskiConvolutionTranspose( in_channels[3], in_channels[2], kernel_size=up_kernel_size, stride=2, generate_new_coords=True, dimension=3), ME.MinkowskiBatchNorm(in_channels[2]), ME.MinkowskiELU()) self.conv_feat1 = nn.Sequential( ME.MinkowskiConvolution( in_channels[0], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU()) self.conv_feat2 = nn.Sequential( ME.MinkowskiConvolution( in_channels[1], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU()) self.conv_feat3 = nn.Sequential( ME.MinkowskiConvolution( in_channels[2], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU()) self.conv_feat4 = nn.Sequential( ME.MinkowskiConvolution( in_channels[3], config.upsample_feat_size, kernel_size=1, dimension=3), ME.MinkowskiBatchNorm(config.upsample_feat_size), ME.MinkowskiELU())
def get_norm_layer(norm_type, num_feats, bn_momentum=0.05, D=-1): if norm_type == 'BN': return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum) elif norm_type == 'IN': return ME.MinkowskiInstanceNorm(num_feats) else: raise ValueError(f'Type {norm_type}, not defined')
def get_conv_block(self, in_channel, out_channel, kernel_size, stride): return nn.Sequential( ME.MinkowskiConvolution( in_channel, out_channel, kernel_size=kernel_size, stride=stride, dimension=self.D, ), ME.MinkowskiBatchNorm(out_channel), ME.MinkowskiReLU(), )
def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=3): super(BasicConvolutionBlock4d, self).__init__() self.net = nn.Sequential( ME.MinkowskiConvolution( inc, outc, dilation=dilation, stride=stride, dimension=D, kernel_generator=ME.KernelGenerator( kernel_size=(ks, ks, ks, 1), dimension=D, region_type=ME.RegionType.HYPERCUBE)), ME.MinkowskiBatchNorm(outc), ME.MinkowskiReLU(True)) nn.init.constant_(self.net[1].bn.weight, 1.0) nn.init.constant_(self.net[1].bn.bias, 0.0)