Пример #1
0
    def __init__(self, in_channels, n_filters, stride=1, downsample=None,dilation=1):
        super(residualBlock, self).__init__()

        self.flagReLUInplace = GLOBAL.torch_relu_inplace()

        if dilation > 1:
            padding = dilation
        else:
            padding = 1
        self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3,  stride, padding, bias=False,dilation=dilation)
        self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, bias=False)
        self.downsample = downsample
        self.stride = stride
        self.relu = nn.ReLU(inplace=self.flagReLUInplace)
Пример #2
0
    def __init__(self, inCh, outCh, stride=(1,1,1)):
        super(SepConv3DBlock, self).__init__()

        self.flagReLUInplace = GLOBAL.torch_relu_inplace()

        if inCh == outCh and stride==(1,1,1):
            self.downsample = None
        else:
            self.downsample = ProjFeat3D(inCh, outCh, stride)

        self.conv0 = cm3d.Conv3D( inCh, outCh, s=stride, 
            normLayer=cm3d.FeatureNorm3D(outCh), 
            activation=nn.ReLU(inplace=self.flagReLUInplace) )
        self.conv1 = cm3d.Conv3D_W( outCh, outCh, 
            normLayer=cm3d.FeatureNorm3D(outCh), 
            activation=nn.ReLU(inplace=self.flagReLUInplace) )
Пример #3
0
    def __init__(self, 
        nConvs, inCh, interCh, outCh,
        baseStride=(1,1,1), nStrides=1, 
        outputUpSampledFeat=False, pooling=False ):
        super(DecoderBlock, self).__init__()

        # Get the global settings.
        self.flagAlignCorners = GLOBAL.torch_align_corners()
        self.flagReLUInplace  = GLOBAL.torch_relu_inplace()

        # Prepare the list of strides.
        assert( nConvs >= nStrides )
        strideList = [baseStride] * nStrides + [(1,1,1)] * (nConvs - nStrides)

        # Create the the convolusion layers.
        convs = [ SepConv3DBlock( inCh, interCh, stride=strideList[0] ) ]
        for i in range(1, nConvs):
            convs.append( SepConv3DBlock( interCh, interCh, stride=strideList[i] ) )
        self.entryConvs = WrappedModule( nn.Sequential(*convs) )
        self.append_init_here( self.entryConvs )

        # Classification layer.
        self.classify = WrappedModule(
            nn.Sequential(
                cm3d.Conv3D_W( interCh, interCh, 
                    normLayer=cm3d.FeatureNorm3D(interCh), 
                    activation=nn.ReLU(inplace=self.flagReLUInplace) ), 
                cm3d.Conv3D_W(interCh, outCh, bias=True) ) )
        self.append_init_here(self.classify)

        # Feature up-sample setting.
        self.featUpSampler = None
        if outputUpSampledFeat:
            self.featUpSampler = WrappedModule(
                nn.Sequential(
                    cm3d.Interpolate3D_FixedScale(2),
                    cm3d.Conv3D_W( interCh, interCh//2, 
                        normLayer=cm3d.FeatureNorm3D(interCh//2), 
                        activation=nn.ReLU(inplace=self.flagReLUInplace) ) ) )
            self.append_init_here(self.featUpSampler)

        # Pooling.
        if pooling:
            self.spp = SPP3D( interCh, levels=4 )
            self.append_init_here(self.spp)
        else:
            self.spp = None
Пример #4
0
    def __init__(self,
                 nconvs,
                 inchannelF,
                 channelF,
                 stride=(1, 1, 1),
                 up=False,
                 nstride=1,
                 pool=False):
        super(decoderBlock, self).__init__()

        self.flagAlignCorners = GLOBAL.torch_align_corners()
        self.flagReLUInplace = GLOBAL.torch_relu_inplace()

        self.pool = pool
        stride = [stride] * nstride + [(1, 1, 1)] * (nconvs - nstride)
        self.convs = [sepConv3dBlock(inchannelF, channelF, stride=stride[0])]
        for i in range(1, nconvs):
            self.convs.append(
                sepConv3dBlock(channelF, channelF, stride=stride[i]))
        self.convs = nn.Sequential(*self.convs)

        self.classify = nn.Sequential(
            sepConv3d(channelF, channelF, 3, (1, 1, 1), 1),
            nn.ReLU(inplace=self.flagReLUInplace),
            sepConv3d(channelF, 1, 3, (1, 1, 1), 1, bias=True))

        self.up = False
        if up:
            self.up = True
            self.up = nn.Sequential(
                nn.Upsample(scale_factor=(2, 2, 2),
                            mode='trilinear',
                            align_corners=self.flagAlignCorners),
                sepConv3d(channelF, channelF // 2, 3, (1, 1, 1), 1,
                          bias=False), nn.ReLU(inplace=self.flagReLUInplace))

        if pool:
            self.pool_convs = torch.nn.ModuleList([
                sepConv3d(channelF, channelF, 1, (1, 1, 1), 0),
                sepConv3d(channelF, channelF, 1, (1, 1, 1), 0),
                sepConv3d(channelF, channelF, 1, (1, 1, 1), 0),
                sepConv3d(channelF, channelF, 1, (1, 1, 1), 0)
            ])
Пример #5
0
    def __init__(self, in_channels, pool_sizes, model_name='pspnet', fusion_mode='cat', with_bn=True):
        super(pyramidPooling, self).__init__()

        self.flagAlignCorners = GLOBAL.torch_align_corners()
        self.flagReLUInplace  = GLOBAL.torch_relu_inplace()

        bias = not with_bn

        self.paths = []
        if pool_sizes is None:
            for i in range(4):
                self.paths.append(conv2DBatchNormRelu(in_channels, in_channels, 1, 1, 0, bias=bias, with_bn=with_bn))
        else:
            for i in range(len(pool_sizes)):
                self.paths.append(conv2DBatchNormRelu(in_channels, int(in_channels / len(pool_sizes)), 1, 1, 0, bias=bias, with_bn=with_bn))

        self.path_module_list = nn.ModuleList(self.paths)
        self.pool_sizes = pool_sizes
        self.model_name = model_name
        self.fusion_mode = fusion_mode
Пример #6
0
    def __init__(self, in_channels, n_filters, k_size,  stride, padding, bias=True, dilation=1, with_bn=True):
        super(conv2DBatchNormRelu, self).__init__()

        self.flagTS = GLOBAL.torch_batch_normal_track_stat()
        self.flagReLUInplace = GLOBAL.torch_relu_inplace()

        if dilation > 1:
            conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, 
                                 padding=padding, stride=stride, bias=bias, dilation=dilation)

        else:
            conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, 
                                 padding=padding, stride=stride, bias=bias, dilation=1)

        if with_bn:
            self.cbr_unit = nn.Sequential(conv_mod,
                                          nn.BatchNorm2d(int(n_filters), track_running_stats=self.flagTS),
                                          nn.LeakyReLU(0.1, inplace=self.flagReLUInplace),)
        else:
            self.cbr_unit = nn.Sequential(conv_mod,
                                          nn.LeakyReLU(0.1, inplace=self.flagReLUInplace),)
Пример #7
0
    def __init__(self, nLayers=3, intermediateChannels=8, ):
        super(HalfSizeExtractor, self).__init__()

        self.flagReLUInplace = GLOBAL.torch_relu_inplace()

        modelList = [ 
            cm.Conv_Half( 3, intermediateChannels, 
                normLayer=cm.FeatureNormalization(intermediateChannels),
                activation=nn.ReLU(inplace=self.flagReLUInplace) ) ]

        for i in range(nLayers):
            if ( i == nLayers - 1):
                modelList.append(
                    cm.Conv_W( intermediateChannels, intermediateChannels, 
                    normLayer=cm.FeatureNormalization(intermediateChannels),
                    activation=nn.ReLU(inplace=self.flagReLUInplace) ) )
            else:
                modelList.append(
                    cm.Conv_W( intermediateChannels, intermediateChannels, 
                    normLayer=None,
                    activation=nn.ReLU(inplace=self.flagReLUInplace) ) )

        self.model = nn.Sequential( *modelList )
Пример #8
0
def selected_relu(x):
    # return F.selu(x, inplace=False)
    return F.leaky_relu(x, 0.1, inplace=GLOBAL.torch_relu_inplace())
Пример #9
0
    def __init__(self, 
        edChannels = [
            [  3, 16, 16, 16, 16 ],
            [ 16, 16, 16, 16, 16 ],
            [ 16, 16, 16, 16, 16 ],
            [ 16, 16, 16, 16, 16 ]
        ],
        freeze=False):
        '''
        edChannels (list of lists): Channel specification for every layer. 
            [ eIn, eOut, dOut, up, out ]
        '''
        super(UNetOneHalf, self).__init__(
            levels=[2, 4, 8, 16],
            freeze=freeze)

        N = len(levels)
        assert( N == len(edChannels) ), \
            f'Wrong level and channel specification. levels = {levels}, edChannels = {edChannels}'

        self.flagTS = GLOBAL.torch_batch_normal_track_stat()
        self.flagReLUInplace = GLOBAL.torch_relu_inplace()

        # Encoders.
        self.encoders = nn.ModuleList()
        for i in range(N):
            inCh  = edChannels[i][CH_IDX_E_IN]
            outCh = edChannels[i][CH_IDX_E_OUT]
            self.encoders.append(
                cm.ResidualBlock( inCh, outCh, 
                    stride=2, downsample=
                        cm.Conv( inCh, outCh, k=1, s=2, p=0, 
                            normLayer=cm.FeatureNormalization( outCh ) )
                )
            )

        # Decoders.
        self.decoders = nn.ModuleList()
        for i in range(N-1, -1, -1):
            if ( i == N-1 ):
                inCh = edChannels[i][CH_IDX_E_OUT]
            else:
                inCh = edChannels[i][CH_IDX_E_OUT] \
                     + edChannels[i-1][CH_IDX_U_OUT]

            outCh = edChannels[i][CH_IDX_D_OUT]

            self.decoders.append(
                cm.Conv_W( inCh, outCh, 
                    normLayer=cm.FeatureNormalization( outCh ),
                    activation=nn.ReLU(inplace=self.flagReLUInplace) ) )

        self.decoders = self.decoders[::-1]

        # Up-feature layers.
        self.ups = nn.ModuleList()
        for i in range( N-1, 0, -1 ):
            inCh  = edChannels[i][CH_IDX_D_OUT]
            outCh = edChannels[i][CH_IDX_U_OUT]
            self.ups.append(
                cm.Interpolate2D_FixedScale(2),
                cm.Conv_W( inCh, outCh, 
                    normLayer=cm.FeatureNormalization( outCh ),
                    activation=nn.ReLU(inplace=self.flagReLUInplace) ) )

        self.ups = self.ups[::-1]

        # Finale layers.
        self.finals = nn.ModuleList()
        for i in range( N-1, -1, -1 ):
            inCh  = edChannels[i][CH_IDX_U_OUT]
            outCh = edChannels[i][CH_IDX_F_OUT]
            self.finals.append( 
                cm.Conv_W( inCh, outCh, 
                    normLayer=cm.FeatureNormalization( outCh ),
                    activation=nn.ReLU(inplace=self.flagReLUInplace) ) )

        self.finals = self.finals[::-1]

        # Middle.
        self.middle = cm.ResidualBlock(
            edChannels[-1][CH_IDX_E_OUT], edChannels[-1][CH_IDX_E_OUT], 
                lastActivation=nn.ReLU(inplace=self.flagReLUInplace) )