示例#1
0
def build_conv_block(dim, norm_layer, use_dropout, use_bias):
    """
    Construct a convolutional block.

    :param dim: the number of channels in the conv layer.
    :type dim: int
    :param norm_layer: normalization layer.
    :type norm_layer: :class:`nn.Module`
    :param use_dropout: if use dropout layers.
    :type use_dropout: bool
    :param use_bias: if the conv layer uses bias or not
    :type use_bias: bool
    :return: Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
    :rtype: :class:`nn.Sequential`
    """
    conv_block = []
    conv_block += [nn.ReplicationPad3d(1)]
    conv_block += [nn.Conv3d(dim, dim, kernel_size=3, bias=use_bias)]
    conv_block += [nn.ReLU(True)]

    if use_dropout:
        conv_block += [nn.Dropout(0.5)]

    conv_block += [nn.ReplicationPad3d(1)]
    conv_block += [nn.Conv3d(dim, dim, kernel_size=3, bias=use_bias)]
    conv_block += [norm_layer(dim)]

    return nn.Sequential(*conv_block)
示例#2
0
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReplicationPad3d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad3d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)

        conv_block += [nn.Conv3d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
                       norm_layer(dim),
                       nn.ReLU(True)]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReplicationPad3d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad3d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
        conv_block += [nn.Conv3d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
                       norm_layer(dim)]

        return nn.Sequential(*conv_block)
示例#3
0
def restrctuctre3d(nf0, norm, inplace=False):

    tmp = nn.Sequential(
        norm(nf0 * 1, affine=True),
        nn.ReLU(inplace),
        # nn.Dropout3d(0.1, inplace),
        nn.ReplicationPad3d(1),
        nn.Conv3d(nf0 * 1,
                  nf0 * 1,
                  kernel_size=[3, 3, 3],
                  padding=0,
                  stride=[1, 1, 1],
                  bias=False),
        norm(nf0 * 1, affine=True),
        nn.ReLU(inplace),
        # nn.Dropout3d(0.1, inplace),
        nn.ReplicationPad3d(1),
        nn.Conv3d(nf0 * 1,
                  nf0 * 1,
                  kernel_size=[3, 3, 3],
                  padding=0,
                  stride=[1, 1, 1],
                  bias=False),
    )

    return tmp
示例#4
0
 def __init__(self, nf0, inplace=False):
     super(ResConv3D, self).__init__()
     
     self.tmp = nn.Sequential(
             
             nn.ReplicationPad3d(1),
             nn.Conv3d(nf0 * 1,
                       nf0 * 1,
                       kernel_size=[3, 3, 3],
                       padding=0,
                       stride=[1, 1, 1],
                       bias=True),
             
             nn.LeakyReLU(negative_slope=0.2, inplace=inplace),
             # nn.Dropout3d(0.1, inplace),
             
             nn.ReplicationPad3d(1),
             nn.Conv3d(nf0 * 1,
                       nf0 * 1,
                       kernel_size=[3, 3, 3],
                       padding=0,
                       stride=[1, 1, 1],
                       bias=True),
     )
     
     self.inplace = inplace
示例#5
0
    def __init__(self, c_len_in, c_len_out, opt=None):

        super(Unet3D, self).__init__()

        # (BV,8,32,48,32) | conv3d(k3,s1,i8,o8,nb), BN3d, LearkyReLU(0.2) | (BV,8,32,48,32) ------> skip-0: (BV,8,32,48,32)
        c_len_1 = 8
        self.conv3d_pre_process = nn.Sequential(
            Conv3dSame(c_len_in, c_len_1, kernel_size=3, bias=False),
            nn.BatchNorm3d(c_len_1, affine=True), nn.LeakyReLU(0.2, True))

        # (BV,8,32,48,32) | conv3d(k4,s2,i8,o16,nb), BN3d, LeakyReLU(0.2) | (BV,16,16,24,16) ------> skip-1: (BV,16,16,24,16)
        c_len_2 = 16
        self.conv3d_enc_1 = nn.Sequential(
            nn.ReplicationPad3d(1),
            nn.Conv3d(c_len_1,
                      c_len_2,
                      kernel_size=4,
                      padding=0,
                      stride=2,
                      bias=False), nn.BatchNorm3d(c_len_2, affine=True),
            nn.LeakyReLU(0.2, True))

        # (BV,16,16,24,16) | conv3d(k4,s2,i16,o32,b), LeakyReLU(0.2) | (BV,32,8,12,8)
        c_len_3 = 32
        self.conv3d_enc_2 = nn.Sequential(
            nn.ReplicationPad3d(1),
            nn.Conv3d(c_len_2,
                      c_len_3,
                      kernel_size=4,
                      padding=0,
                      stride=2,
                      bias=True), nn.LeakyReLU(0.2, True))

        # (BV,32,8,12,8) | DeConv3d(k4,s2,i32,o16,b), ReLU | (BV,16,16,24,16)
        self.deconv3d_dec_2 = nn.Sequential(
            nn.ConvTranspose3d(c_len_3,
                               c_len_2,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias=True), nn.ReLU(True))

        # (BV,16+16,16,24,16) | DeConv3d(k4,s2,i32,o8,nb), BN3d, ReLU | (BV,8,32,48,32) <------ skip-1: (BV,16,16,24,16)
        self.deconv3d_dec_1 = nn.Sequential(
            nn.ConvTranspose3d(c_len_2 * 2,
                               c_len_1,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias=False), nn.BatchNorm3d(c_len_1,
                                                           affine=True),
            nn.ReLU(True))

        # (BV,8+8,32,48,32) | Conv3d(k3,s1,i16,o8,nb), BN3d, ReLU | (BV,8,32,48,32) <------ skip-0: (BV,8,32,48,32)
        self.conv3d_final_process = nn.Sequential(
            Conv3dSame(c_len_1 * 2, c_len_out, kernel_size=3, bias=False),
            nn.BatchNorm3d(c_len_out, affine=True), nn.ReLU(True))
 def __init__(self, n_input, n_output):
     super(BlockB, self).__init__()
     self.pad1 = nn.ReplicationPad3d((1, 1, 1, 1, 1, 1))
     self.left1 = nn.Conv3d(n_input, n_output, 3, 1, padding=1)
     self.pad2 = nn.ReplicationPad3d((1, 1, 1, 1, 1, 1))
     self.left2 = nn.Conv3d(n_output, n_output, 3, 1, padding=1)
     self.pad3 = nn.ReplicationPad3d((1, 1, 1, 1, 1, 1))
     self.right = nn.Conv3d(n_input, n_output, 3, 1, padding=1)
     self.relu = nn.ReLU()
示例#7
0
    def __init__(self):

        super(subplanar_pdd, self).__init__()
        self.alpha = nn.Parameter(torch.Tensor([1,.1,1,1,.1,5]))#1]))#.cuda()

        self.pad1 = nn.ReplicationPad3d((0,0,2,2,2,2))#.cuda()
        self.avg1 = nn.AvgPool3d((3,3,1),stride=1)#.cuda()
        self.max1 = nn.MaxPool3d((3,3,1),stride=1)#.cuda()
        self.pad2 = nn.ReplicationPad3d((0,0,2,2,2,2))#.cuda()##
示例#8
0
    def __init__(self):

        super(deeds, self).__init__()
        self.alpha = nn.Parameter(torch.Tensor([1, .1, 1, 1, .1, 1]))  #.cuda()

        self.pad1 = nn.ReplicationPad3d(3)  #.cuda()
        self.avg1 = nn.AvgPool3d(3, stride=1)  #.cuda()
        self.max1 = nn.MaxPool3d(3, stride=1)  #.cuda()
        self.pad2 = nn.ReplicationPad3d(2)  #.cuda()##
示例#9
0
    def __init__(self, input_channels):
        super(Kurvature3D, self).__init__()
        dx_0 = kernels3D['dx0']()
        dy_0 = kernels3D['dy0']()
        dz_0 = kernels3D['dz0']()
        self.padx_b = nn.ReplicationPad3d(list(reversed([1, 1, 0, 0, 0, 0])))
        self.pady_b = nn.ReplicationPad3d(list(reversed([0, 0, 1, 1, 0, 0])))
        self.padz_b = nn.ReplicationPad3d(list(reversed([0, 0, 0, 0, 1, 1])))
        self.conv_dx_0 = Convolve(dx_0, input_channels, padding=0)
        self.conv_dy_0 = Convolve(dy_0, input_channels, padding=0)
        self.conv_dz_0 = Convolve(dz_0, input_channels, padding=0)

        dx_bf = kernels3D['dx+-']()
        dy_bf = kernels3D['dy+-']()
        dz_bf = kernels3D['dz+-']()
        self.padx_f = nn.ReplicationPad3d(list(reversed([1, 0, 0, 0, 0, 0])))
        self.pady_f = nn.ReplicationPad3d(list(reversed([0, 0, 1, 0, 0, 0])))
        self.padz_f = nn.ReplicationPad3d(list(reversed([0, 0, 0, 0, 1, 0])))

        self.padx_b = nn.ReplicationPad3d(list(reversed([0, 1, 0, 0, 0, 0])))
        self.pady_b = nn.ReplicationPad3d(list(reversed([0, 0, 0, 1, 0, 0])))
        self.padz_b = nn.ReplicationPad3d(list(reversed([0, 0, 0, 0, 0, 1])))
        self.conv_dx_b = Convolve(dx_bf, input_channels, padding=0)
        self.conv_dx_f = self.conv_dx_b
        self.conv_dy_b = Convolve(dy_bf, input_channels, padding=0)
        self.conv_dy_f = self.conv_dy_b
        self.conv_dz_b = Convolve(dz_bf, input_channels, padding=0)
        self.conv_dz_f = self.conv_dz_b
示例#10
0
 def __init__(self, input_channels):
     super(Gradient3D, self).__init__()
     if method == '0':
         dx = kernels3D['dx0']()
         dy = kernels3D['dy0']()
         dz = kernels3D['dz0']()
         self.padx = nn.ReplicationPad3d(list(reversed([1, 1, 0, 0, 0, 0])))
         self.pady = nn.ReplicationPad3d(list(reversed([0, 0, 1, 1, 0, 0])))
         self.padz = nn.ReplicationPad3d(list(reversed([0, 0, 0, 0, 1, 1])))
     if method == '+':
         dx = kernels3D['dx+-']()
         dy = kernels3D['dy+-']()
         dz = kernels3D['dy+-']()
         self.padx = nn.ReplicationPad3d(list(reversed([1, 0, 0, 0, 0, 0])))
         self.pady = nn.ReplicationPad3d(list(reversed([0, 0, 1, 0, 0, 0])))
         self.padz = nn.ReplicationPad3d(list(reversed([0, 0, 0, 0, 1, 0])))
     if method == '-':
         dx = kernels3D['dx+-']()
         dy = kernels3D['dy+-']()
         dz = kernels3D['dy+-']()
         self.padx = nn.ReplicationPad3d(list(reversed([0, 1, 0, 0, 0, 0])))
         self.pady = nn.ReplicationPad3d(list(reversed([0, 0, 0, 1, 0, 0])))
         self.padz = nn.ReplicationPad3d(list(reversed([0, 0, 0, 0, 0, 1])))
     self.conv_dx = Convolve(dx, input_channels, padding=0)
     self.conv_dy = Convolve(dy, input_channels, padding=0)
     self.conv_dz = Convolve(dz, input_channels, padding=0)
    def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm3d, use_dropout=False, n_blocks=9, padding_type='replicate'):
        """Construct a Resnet-based generator
        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm3d
        else:
            use_bias = norm_layer == nn.InstanceNorm3d

        # model = [nn.ReflectionPad2d(3),
        #          nn.Conv3d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
        #          norm_layer(ngf),
        #          nn.ReLU(True)]

        model = [nn.ReplicationPad3d(3),
                 nn.Conv3d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
                 norm_layer(ngf),
                 nn.ReLU(True)]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2 ** i
            model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
                      norm_layer(ngf * mult * 2),
                      nn.ReLU(True)]

        mult = 2 ** n_downsampling
        for i in range(n_blocks):       # add ResNet blocks

            model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2 ** (n_downsampling - i)
            model += [nn.ConvTranspose3d(ngf * mult, int(ngf * mult / 2),
                                         kernel_size=3, stride=2,
                                         padding=1, output_padding=1,
                                         bias=use_bias),
                      norm_layer(int(ngf * mult / 2)),
                      nn.ReLU(True)]
        # model += [nn.ReflectionPad2d(3)]
        model += [nn.ReplicationPad3d(3)]
        model += [nn.Conv3d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]


        self.model = nn.Sequential(*model)
示例#12
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 depth,
                 ngf=64,
                 norm_layer=nn.BatchNorm3d,
                 use_naive=False):
        super(SrcnnGenerator3d, self).__init__()

        use_bias = True
        downconv_ab = [
            nn.ReplicationPad3d(1),
            nn.Conv3d(input_nc,
                      ngf,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=use_bias)
        ]
        downconv_ba = [
            nn.ReplicationPad3d(1),
            nn.Conv3d(input_nc,
                      ngf,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=use_bias)
        ]

        core = []
        for _ in range(depth):
            core += [RevBlock3d(ngf, use_bias, norm_layer, use_naive)]

        upconv_ab = [
            nn.Conv3d(ngf,
                      output_nc,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=use_bias)
        ]
        upconv_ba = [
            nn.Conv3d(ngf,
                      output_nc,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=use_bias)
        ]

        self.downconv_ab = nn.Sequential(*downconv_ab)
        self.downconv_ba = nn.Sequential(*downconv_ba)
        self.core = nn.ModuleList(core)
        self.upconv_ab = nn.Sequential(*upconv_ab)
        self.upconv_ba = nn.Sequential(*upconv_ba)
示例#13
0
 def __init__(self, in_channels, out_channels, leaky=False):
     super().__init__()
     self.double_conv = nn.Sequential(
         nn.ReplicationPad3d((1, 1, 1, 1, 1, 1)),
         nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=0),
         nn.BatchNorm3d(out_channels),
         nn.ReLU(inplace=True) if leaky is False else nn.LeakyReLU(negative_slope=0.2, inplace=True),
         nn.ReplicationPad3d((1, 1, 1, 1, 1, 1)),
         nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=0),
         nn.BatchNorm3d(out_channels),
         nn.ReLU(inplace=True) if leaky is False else nn.LeakyReLU(negative_slope=0.2, inplace=True)
     )
示例#14
0
def MINDSSC(img, radius=2, dilation=2):
    # see http://mpheinrich.de/pub/miccai2013_943_mheinrich.pdf for details on the MIND-SSC descriptor

    # kernel size
    kernel_size = radius * 2 + 1

    # define start and end locations for self-similarity pattern
    six_neighbourhood = torch.Tensor([[0, 1, 1], [1, 1, 0], [1, 0, 1],
                                      [1, 1, 2], [2, 1, 1], [1, 2, 1]]).long()

    # squared distances
    dist = pdist_squared(six_neighbourhood.t().unsqueeze(0)).squeeze(0)

    # define comparison mask
    x, y = torch.meshgrid(torch.arange(6), torch.arange(6))
    mask = ((x > y).view(-1) & (dist == 2).view(-1))

    # build kernel
    idx_shift1 = six_neighbourhood.unsqueeze(1).repeat(1, 6,
                                                       1).view(-1, 3)[mask, :]
    idx_shift2 = six_neighbourhood.unsqueeze(0).repeat(6, 1,
                                                       1).view(-1, 3)[mask, :]
    mshift1 = torch.zeros(12, 1, 3, 3, 3).cuda()
    mshift1.view(-1)[torch.arange(12) * 27 + idx_shift1[:, 0] * 9 +
                     idx_shift1[:, 1] * 3 + idx_shift1[:, 2]] = 1
    mshift2 = torch.zeros(12, 1, 3, 3, 3).cuda()
    mshift2.view(-1)[torch.arange(12) * 27 + idx_shift2[:, 0] * 9 +
                     idx_shift2[:, 1] * 3 + idx_shift2[:, 2]] = 1
    rpad1 = nn.ReplicationPad3d(dilation)
    rpad2 = nn.ReplicationPad3d(radius)

    # compute patch-ssd
    ssd = F.avg_pool3d(rpad2(
        (F.conv3d(rpad1(img), mshift1, dilation=dilation) -
         F.conv3d(rpad1(img), mshift2, dilation=dilation))**2),
                       kernel_size,
                       stride=1)

    # MIND equation
    mind = ssd - torch.min(ssd, 1, keepdim=True)[0]
    mind_var = torch.mean(mind, 1, keepdim=True)
    mind_var_mean = mind_var.mean().cpu().data
    mind_var = torch.clamp(mind_var, mind_var_mean * 0.001,
                           mind_var_mean * 1000)
    mind /= mind_var
    mind = torch.exp(-mind)

    #permute to have same ordering as C++ code
    mind = mind[:,
                torch.Tensor([6, 8, 1, 11, 2, 10, 0, 7, 9, 4, 5, 3]).long(
                ), :, :, :]

    return mind
 def __init__(self, dim):
     super(C3dResNetBlock, self).__init__()
     conv_block = []
     conv_block += [
         nn.ReplicationPad3d(1),
         nn.Conv3d(dim, dim, kernel_size=3),
         nn.InstanceNorm3d(dim),
         nn.ReLU(True),
         nn.ReplicationPad3d(1),
         nn.Conv3d(dim, dim, kernel_size=3),
         nn.InstanceNorm3d(dim)
     ]
     self.conv_block = nn.Sequential(*conv_block)
示例#16
0
 def __init__(self, ic:int, oc:int, scale_factor:Union[List[int],int]=2, full:bool=False, k:int=1):
     super().__init__()
     if isinstance(scale_factor, int): scale_factor = [scale_factor] * 3
     self.sf = scale_factor
     sf = (scale_factor[0] * scale_factor[1] * scale_factor[2])
     pad = k//2 if isinstance(k,int) else tuple([ks//2 for p in zip(reversed(k),reversed(k)) for ks in p])
     if isinstance(k,int): self.pad = None if  k < 3 else nn.ReplicationPad3d(pad)
     if isinstance(k,tuple): self.pad = None if all([p == 0 for p in pad]) else nn.ReplicationPad3d(pad)
     self.conv = nn.Conv3d(ic, oc*sf, k, bias=False)
     self.full = full
     if full:
         self.bn = nn.BatchNorm3d(oc)
         self.act = nn.ReLU(inplace=True)
示例#17
0
    def build_conv_block(self, dim, use_bias, norm_layer):
        conv_block = []
        conv_block += [nn.ReplicationPad3d(1)]
        conv_block += [
            nn.Conv3d(dim, dim, kernel_size=3, padding=0, bias=use_bias)
        ]
        conv_block += [norm_layer(dim)]
        conv_block += [nn.ReLU(True)]
        conv_block += [nn.ReplicationPad3d(1)]
        conv_block += [
            ZeroInit(dim, dim, kernel_size=3, padding=0, bias=use_bias)
        ]

        return nn.Sequential(*conv_block)
示例#18
0
 def get_padding(self, padding_type):
     """ Takes the input padding type and returns the appropiate padding
     layer and the ammount of padding to use in convolutional layer"""
     p, p_layer = 0, []  # Ammount of padding and type of layer
     if padding_type == 'reflect':
         p_layer = [nn.ReplicationPad3d(1)]
     elif padding_type == 'replicate':
         p_layer = [nn.ReplicationPad3d(1)]
     elif padding_type == 'zero':  # If using 'zero', don't add padding layer
         p = 1
     else:
         raise NotImplementedError(
             f"padding {padding_type} is not implemented")
     return p_layer, p
示例#19
0
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout,
                         use_bias):
        """Construct a convolutional block.

        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not

        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [ReflectionPad3d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad3d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv3d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            norm_layer(dim),
            nn.ReLU(True)
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [ReflectionPad3d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad3d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv3d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            norm_layer(dim)
        ]

        return nn.Sequential(*conv_block)
示例#20
0
 def __init__(self, num_classes=157):
     super(InceptionUp3D, self).__init__()
     self.conv1up = BasicConvUp3d(64, 3, (7, 7, 7), (2, 2, 2), padding=(3, 3,3))
     self.upsample1 = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear')
     self.conv2up = BasicConvUp3d(64, 64, (1, 1, 1))
     #self.conv3up = BasicConvUp3d(64, 64, (3, 3, 3))
     self.upsample2 = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear')
     #self.inc1up = InceptionUp([192, 64, 96, 128, 16, 32, 32])
     self.inc1up = InceptionUp([192, 16, 96, 32, 16, 8, 8])
     self.inc2up = InceptionUp([256, 32, 32, 64, 32, 64, 32])
     self.upsample3 = nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear')
     #self.inc3up = InceptionUp([480, 192, 96, 208, 16, 48, 64])
     self.inc3up = InceptionUp([480, 64, 96, 128, 16, 32, 32])
     self.inc4up = InceptionUp([512, 128, 128, 192, 32, 96, 64])
     #self.upsample5 = nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear')
     #self.inc4up = InceptionUp([512, 160, 112, 224, 24, 64, 64])
     self.inc5up = InceptionUp([512, 128, 128, 256, 24, 64, 64])
     self.inc6up = InceptionUp([512, 128, 128, 256, 24, 64, 64])
     #self.inc6up = InceptionUp([512, 112, 144, 288, 32, 64, 64])
     self.inc7up = InceptionUp([528, 128, 128, 256, 24, 64, 64])
     #self.inc7up = InceptionUp([528, 256, 160, 320, 32, 128, 128])
     self.upsample4 = nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear')
     self.inc8up = InceptionUp([832, 112, 144, 288, 32, 64, 64])
     #self.inc8up = InceptionUp([832, 256, 160, 320, 32, 128, 128])
     self.inc9up = InceptionUp([832, 256, 160, 320, 32, 128, 128])
     self.inc10up = InceptionUp([1024, 256, 160, 320, 32, 128, 128])
     self.padding = nn.ReplicationPad3d((1, 1, 1, 1, 0, 0))
     self.aapadding = nn.ReplicationPad3d((1, 1, 1, 1, 1, 1))
     #self.inc9up = InceptionUp([832, 384, 192, 384, 48, 128, 128])
     self.tpadding = nn.ReplicationPad3d((0, 0, 0, 0, 0, 1))
     self.spadding = nn.ReplicationPad3d((0, 1, 0, 1, 0, 0))
     self.apadding = nn.ReplicationPad3d((0, 1, 0, 1, 0, 1))
     self.attnout1 = nn.Conv3d(1024, 1, (3, 3, 3), padding=(1, 1, 1))
     self.attnout2 = nn.Conv3d(832, 1, (3, 3, 3), padding=(1, 1, 1))
     self.attnout3 = nn.Conv3d(512, 1, (3, 3, 3), padding=(1, 1, 1))
     self.attnout4 = nn.Conv3d(256, 1, (3, 3, 3), padding=(1, 1, 1))
     self.attnout5 = nn.Conv3d(64, 1, (3, 3, 3), padding=(1, 1, 1))
     self.refine1 = FactoredRefinement(832)
     self.refine2 = FactoredRefinement(832)
     self.refine3 = FactoredRefinement(528)
     self.refine4 = FactoredRefinement(512)
     self.refine5 = FactoredRefinement(512)
     self.refine6 = FactoredRefinement(512)
     self.refine7 = FactoredRefinement(480)
     self.refine8 = FactoredRefinement(256)
     self.refine9 = FactoredRefinement(192)
     self.refine10 = FactoredRefinement(64)
     self.refine11 = FactoredRefinement(64)
示例#21
0
def conv3d_norm_act(in_planes, out_planes, kernel_size=(3,3,3), stride=1, 
                  dilation=(1,1,1), padding=(1,1,1), bias=True, pad_mode='rep', norm_mode='', act_mode='', return_list=False):
    
    if isinstance(padding,int):
        pad_mode = pad_mode if padding!=0 else 'zeros'
    else:
        pad_mode = pad_mode if max(padding)!=0 else 'zeros'

    if pad_mode in ['zeros','circular']:
        layers = [nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size,
                     stride=stride, padding=padding, padding_mode=pad_mode, dilation=dilation, bias=bias)] 
    elif pad_mode=='rep':
        # the size of the padding should be a 6-tuple        
        padding = tuple([x for x in padding for _ in range(2)][::-1])
        layers = [nn.ReplicationPad3d(padding),
                  nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size,
                     stride=stride, padding=0, dilation=dilation, bias=bias)]
    else:
        raise ValueError('Unknown padding option {}'.format(mode))

    layers += get_layer_norm(out_planes, norm_mode, 3)
    layers += get_layer_act(act_mode)
    if return_list:
        return layers
    else:
        return nn.Sequential(*layers)
示例#22
0
    def __init__(self):
        super(Conv3d_convLSTM, self).__init__()
        self.pad = nn.ReplicationPad3d((1,1,1,1,0,0))

        self.conv_time = nn.Conv3d(1, 32, (6, 1, 1))
        self.conv_spat = nn.Conv3d(32, 32, (1, 3, 3))
        self.pool_time = nn.AvgPool3d(kernel_size=(3, 1, 1), stride=(3, 1, 1))
        self.batchnorm = nn.BatchNorm3d(32)       

        self.conv_time2 = nn.Conv3d(32, 64, (6, 1, 1))
        self.conv_spat2 = nn.Conv3d(64, 64, (1, 3, 3))
        self.pool_time2 = nn.AvgPool3d(kernel_size=(3, 1, 1), stride=(3, 1, 1))
        self.batchnorm2 = nn.BatchNorm3d(64)       

        self.conv_time3 = nn.Conv3d(64, 128, (6, 1, 1))
        self.conv_spat3 = nn.Conv3d(128, 128, (1, 3, 3))
        self.pool_time3 = nn.AvgPool3d(kernel_size=(3, 1, 1), stride=(3, 1, 1))       
        self.batchnorm3 = nn.BatchNorm3d(128)       

        self.convlstm = NlayersSeqConvLSTM(input_channels=128,
                                hidden_channels=[256],
                                kernel_sizes=[3])

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(0.5)
        self.linear = nn.Linear(256, 4)
    def __init__(self,
                 dim,
                 norm_layer='batch',
                 kernel=3,
                 use_bias=False,
                 act=Mish()):
        super(ResnetBlock, self).__init__()
        conv_block = []
        p = 0

        conv_block += [
            nn.Conv3d(dim, 64, kernel_size=1, padding=p, bias=use_bias),
            select_norm(64, norm_layer), act
        ]
        if kernel == 3:
            conv_block += [nn.ReplicationPad3d(1)]

        conv_block += [
            nn.Conv3d(64, 64, kernel_size=kernel, padding=p, bias=use_bias),
            select_norm(64, norm_layer), act
        ]

        conv_block += [
            nn.Conv3d(64, dim, kernel_size=1, padding=p, bias=use_bias),
            select_norm(dim, norm_layer)
        ]

        self.conv_block = nn.Sequential(*conv_block)
示例#24
0
    def __init__(self, in_features):
        super(ResidualBlock, self).__init__()

        conv_block = [  nn.ReplicationPad3d(1),
                        nn.Conv3d(in_features, in_features, 3),
                        # nn.BatchNorm3d(in_features),
                        nn.InstanceNorm3d(in_features),
                        # nn.BatchNorm3d(in_features),
                        nn.ReLU(inplace=True),
                        nn.ReplicationPad3d(1),
                        nn.Conv3d(in_features, in_features, 3),
                        # nn.BatchNorm3d(in_features)
                        nn.InstanceNorm3d(in_features)  ]
                        # nn.BatchNorm3d(in_features)  ]

        self.conv_block = nn.Sequential(*conv_block)
def conv(layers,
         c_in,
         c_out,
         k_size,
         stride=1,
         pad=0,
         padding='zero',
         lrelu=True,
         batch_norm=False,
         w_norm=False,
         d_gdrop=False,
         pixel_norm=False,
         only=False):

    if padding == 'replication':
        layers.append(nn.ReplicationPad3d(pad))
        pad = 0
    if d_gdrop: layers.append(GeneralizedDropOut(mode='prop', strength=0.0))
    if w_norm:
        layers.append(
            EqualizedConv3d(c_in,
                            c_out,
                            k_size,
                            stride,
                            pad,
                            initializer='kaiming'))
    else:
        layers.append(nn.Conv3d(c_in, c_out, k_size, stride, pad))
    if not only:
        if lrelu: layers.append(nn.LeakyReLU(0.2))
        else: layers.append(nn.ReLU())
        if batch_norm: layers.append(nn.BatchNorm3d(c_out))
        if pixel_norm: layers.append(PixelwiseNormLayer())
    return layers
def deconv_t(layers,
             c_in,
             c_out,
             k_size,
             stride=1,
             pad=0,
             padding='zero',
             lrelu=True,
             batch_norm=False,
             w_norm=False,
             pixel_norm=False,
             only=False):

    if padding == 'replication':
        layers.append(nn.ReplicationPad3d(pad))
        pad = 0
    if w_norm:
        layers.append(
            EqualizedConvTranspose3d(c_in, c_out, k_size, stride, pad))
    else:
        layers.append(nn.ConvTranspose3d(c_in, c_out, k_size, stride, pad))
    if not only:
        if lrelu: layers.append(nn.LeakyReLU(0.2))
        else: layers.append(nn.ReLU())
        if batch_norm: layers.append(nn.BatchNorm3d(c_out))
        if pixel_norm: layers.append(PixelwiseNormLayer())
    return layers
示例#27
0
 def _conv(self,
           in_c: int,
           out_c: int,
           kernel_sz: Optional[Tuple[int]] = None,
           bias: bool = False,
           seq: bool = True,
           stride: Tuple[int] = None):
     ksz = kernel_sz or self.kernel_sz
     bias = False if self.norm != 'none' and not bias else True
     stride = stride or tuple([1 for _ in ksz])
     if not self.separable or all([ks == 1 for ks in ksz]):
         layers = [nn.Conv3d(in_c, out_c, ksz, bias=bias, stride=stride)] if self.dim == 3 else \
                  [nn.Conv2d(in_c, out_c, ksz, bias=bias, stride=stride)] if self.dim == 2 else \
                  [nn.Conv1d(in_c, out_c, ksz, bias=bias, stride=stride)]
     else:
         layers = [SeparableConv3d(in_c, out_c, ksz, bias=bias, stride=stride)] if self.dim == 3 else \
                  [SeparableConv2d(in_c, out_c, ksz, bias=bias, stride=stride)] if self.dim == 2 else \
                  [SeparableConv1d(in_c, out_c, ksz, bias=bias, stride=stride)]
     if any([ks > 1 for ks in ksz]):
         rp = tuple([
             ks // 2 for p in zip(reversed(ksz), reversed(ksz)) for ks in p
         ])
         layers = [nn.ReplicationPad3d(rp)] + layers if self.dim == 3 else \
                  [nn.ReflectionPad2d(rp)] + layers  if self.dim == 2 else \
                  [nn.ReflectionPad1d(rp)] + layers
     if seq and len(layers) > 1:
         c = nn.Sequential(*layers)
     else:
         c = layers if len(layers) > 1 else layers[0]
     return c
示例#28
0
 def __init__(self, num_classes=157):
     super(Inception3D, self).__init__()
     self.conv1 = BasicConv3d(3,
                              64, (7, 7, 7), (2, 2, 2),
                              padding=(3, 3, 3))
     self.maxpool1 = nn.MaxPool3d((1, 3, 3), (1, 2, 2), padding=(0, 1, 1))
     self.conv2 = BasicConv3d(64, 64, (1, 1, 1))
     self.conv3 = BasicConv3d(64, 192, (3, 3, 3), padding=(1, 1, 1))
     self.maxpool2 = nn.MaxPool3d((1, 3, 3), (1, 2, 2), padding=(0, 1, 1))
     self.inc1 = Inception([192, 64, 96, 128, 16, 32, 32])
     self.inc2 = Inception([256, 128, 128, 192, 32, 96, 64])
     self.maxpool3 = nn.MaxPool3d((3, 3, 3), (2, 2, 2), padding=(1, 1, 1))
     self.inc3 = Inception([480, 192, 96, 208, 16, 48, 64])
     #self.inc3 = Inception([480, 192, 96, 204, 16, 48, 64])
     self.inc4 = Inception([512, 160, 112, 224, 24, 64, 64])
     #self.inc4 = Inception([508, 160, 112, 224, 24, 64, 64])
     self.inc5 = Inception([512, 128, 128, 256, 24, 64, 64])
     self.inc6 = Inception([512, 112, 144, 288, 32, 64, 64])
     self.inc7 = Inception([528, 256, 160, 320, 32, 128, 128])
     self.maxpool4 = nn.MaxPool3d((2, 2, 2), (2, 2, 2), padding=(0, 0, 0))
     self.inc8 = Inception([832, 256, 160, 320, 32, 128, 128])
     #self.inc8 = Inception([832, 256, 160, 320, 48, 128, 128])
     self.inc9 = Inception([832, 384, 192, 384, 48, 128, 128])
     self.padding = nn.ReplicationPad3d((1, 0, 1, 0, 0, 0))
     self.avgpool = nn.AvgPool3d((2, 7, 7), stride=(1, 1, 1))
     self.conv4 = nn.Conv3d(
         1024, 157, kernel_size=(1, 1, 1),
         stride=(1, 1, 1))  #BasicConv3d(1024, num_classes, (1, 1, 1))
示例#29
0
    def __init__(self,input_ch=1,output_ch=95): #Number of classes
        super(AttU_Net3D,self).__init__()
        
        self.Maxpool3D = nn.MaxPool3d(kernel_size=2,stride=2)

        self.pad = nn.ReplicationPad3d(1)
        self.sig = nn.Sigmoid()
        self.Conv3D_1 = conv_block3D(ch_in=input_ch,ch_out=64)
        self.Conv3D_2 = conv_block3D(ch_in=64,ch_out=128)
        self.Conv3D_3 = conv_block3D(ch_in=128,ch_out=256)
        self.Conv3D_4 = conv_block3D(ch_in=256,ch_out=512)
        self.Conv3D_5 = conv_block3D(ch_in=512,ch_out=1024)

        self.Up5 = up_conv3D(ch_in=1024,ch_out=512)
        self.Att3D_5 = Attention3D(F_g=512,F_l=512,F_int=256)
        self.Up3D_conv5 = conv_block3D(ch_in=1024, ch_out=512)

        self.Up4 = up_conv3D(ch_in=512,ch_out=256)
        self.Att3D_4 = Attention3D(F_g=256,F_l=256,F_int=128)
        self.Up3D_conv4 = conv_block3D(ch_in=512, ch_out=256)
        
        self.Up3 = up_conv3D(ch_in=256,ch_out=128)
        self.Att3D_3 = Attention3D(F_g=128,F_l=128,F_int=64)
        self.Up3D_conv3 = conv_block3D(ch_in=256, ch_out=128)
        
        self.Up2 = up_conv3D(ch_in=128,ch_out=64)
        self.Att3D_2 = Attention3D(F_g=64,F_l=64,F_int=32)
        self.Up3D_conv2 = conv_block3D2(ch_in=128, ch_out=64)

        self.Conv3D_1x1 = nn.Conv3d(64,output_ch,kernel_size=1,stride=1,padding=0)
示例#30
0
    def __init__(self,
                 n_downsamples=5,
                 in_channels=1,
                 filters=24,
                 activation=nn.LeakyReLU(0.1),
                 maxfeatures=256):
        super().__init__()

        self.maxfeatures = maxfeatures
        self.n_downsamples = n_downsamples
        self.filters = filters
        self.intro = nn.Sequential(
            nn.ReplicationPad3d(3),
            nn.Conv3d(in_channels, filters, kernel_size=7, padding=0),
            nn.BatchNorm3d(filters), activation)
        model = []
        for i in range(1, self.n_downsamples):
            inp = self.filters * i
            inp = min(inp, self.maxfeatures)
            out = self.filters * (i + 1)
            out = min(out, self.maxfeatures)
            model += [
                nn.Conv3d(inp, out, kernel_size=4, stride=2, padding=1),
                nn.BatchNorm3d(out), activation
            ]
            print(f'{inp} -> {out}')
        self.model = nn.Sequential(*model)
        self.dense = nn.Sequential(nn.Linear(120 * 4 * 4 * 3, 512), activation,
                                   nn.Dropout(), nn.Linear(512, 2))
        print()