예제 #1
0
    def __init__(self, config):
        super(VoxResNet, self).__init__()

        self.seq1 = nn.Sequential(Conv3d(1, 32, 3, padding=1), BatchNorm3d(32),
                                  ReLU(),
                                  Conv3d(32, 32, (1, 3, 3), padding=(0, 1, 1)))

        self.seq2 = nn.Sequential(
            BatchNorm3d(32),
            ReLU(),
            Conv3d(32, 64, 3, padding=1, stride=2),
            #MaxPool3d(2),
            VoxResNet_ResBlock(),
            VoxResNet_ResBlock())

        self.seq3 = nn.Sequential(
            BatchNorm3d(64),
            ReLU(),
            Conv3d(64, 64, 3, padding=1, stride=2),
            #MaxPool3d(2, padding=(1,0,0)),
            VoxResNet_ResBlock(),
            VoxResNet_ResBlock())

        self.seq4 = nn.Sequential(
            BatchNorm3d(64),
            ReLU(),
            Conv3d(64, 64, 3, padding=1, stride=2),
            #MaxPool3d(2, padding=(1,0,0)),
            VoxResNet_ResBlock(),
            VoxResNet_ResBlock())
        """
        # For Leiden dataset, 16 slices
        self.transposed1 = ConvTranspose3d(32, 2, 3, padding=1)
        self.transposed2 = ConvTranspose3d(64, 2, 3, stride=2, padding=1,
                output_padding=1)
        self.transposed3 = ConvTranspose3d(64, 2, 3, stride=4, padding=1,
                output_padding=3)
        self.transposed4 = ConvTranspose3d(64, 2, 3, stride=8, padding=1,
                output_padding=7)
        """
        # For CR dataset, 18 slices
        self.transposed1 = ConvTranspose3d(32, 2, 3, padding=1)
        self.transposed2 = ConvTranspose3d(64,
                                           2,
                                           3,
                                           stride=2,
                                           padding=1,
                                           output_padding=1)
        self.transposed3 = ConvTranspose3d(64,
                                           2,
                                           3,
                                           stride=4,
                                           padding=1,
                                           output_padding=(1, 3, 3))
        self.transposed4 = ConvTranspose3d(64,
                                           2,
                                           3,
                                           stride=8,
                                           padding=1,
                                           output_padding=(1, 7, 7))
예제 #2
0
    def __init__(self):
        print("\ninitializing \"decoder\"")
        super(VoxelDecoder, self).__init__()
        self.n_deconvfilter = [128, 128, 128, 64, 32, 2]

        #3d conv1
        conv1_kernel_size = 3
        self.conv1 = ConvTranspose3d(in_channels= self.n_deconvfilter[0], \
                            out_channels= self.n_deconvfilter[1], \
                            kernel_size= conv1_kernel_size,  stride=2, \
                            padding = int((conv1_kernel_size - 1) / 2), \
                            output_padding = int((conv1_kernel_size - 1) / 2))

        #3d conv2
        conv2_kernel_size = 3
        self.conv2 = ConvTranspose3d(in_channels= self.n_deconvfilter[1], \
                            out_channels= self.n_deconvfilter[2], \
                            kernel_size= conv1_kernel_size,  stride=2, \
                            padding = int((conv1_kernel_size - 1) / 2), \
                            output_padding = int((conv1_kernel_size - 1) / 2))

        #3d conv3
        conv3_kernel_size = 3
        self.conv3 = ConvTranspose3d(in_channels= self.n_deconvfilter[2], \
                            out_channels= self.n_deconvfilter[3], \
                            kernel_size= conv1_kernel_size,  stride=2, \
                            padding = int((conv1_kernel_size - 1) / 2), \
                            output_padding = int((conv1_kernel_size - 1) / 2))

        #3d conv4
        conv4_kernel_size = 3
        self.conv4 = ConvTranspose3d(in_channels= self.n_deconvfilter[3], \
                            out_channels= self.n_deconvfilter[4], \
                            kernel_size= conv1_kernel_size,  stride=2, \
                            padding = int((conv1_kernel_size - 1) / 2), \
                            output_padding = int((conv1_kernel_size - 1) / 2))

        #3d conv5
        conv5_kernel_size = 3
        self.conv5 = Conv3d(in_channels= self.n_deconvfilter[4], \
                            out_channels= self.n_deconvfilter[5], \
                            kernel_size= conv5_kernel_size, \
                            padding = int((conv5_kernel_size - 1) / 2))

        #nonlinearities of the network
        self.leaky_relu = LeakyReLU(negative_slope=0.01)

        self.softmax = nn.Softmax(dim=1)
        self.bn1 = nn.BatchNorm3d(128)
        self.bn2 = nn.BatchNorm3d(128)
        self.bn3 = nn.BatchNorm3d(64)
        self.bn4 = nn.BatchNorm3d(32)
        self.bn5 = nn.BatchNorm3d(2)
예제 #3
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1,
                 activation=ReLU(inplace=True)):
        super(ConvTransposeBlock3D, self).__init__()
        self.convt = ConvTranspose3d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
                                     stride=stride, padding=padding, output_padding=output_padding, dilation=dilation)

        self.activation = activation
예제 #4
0
    def __init__(self, inp_feat, out_feat, kernel=3, stride=2, padding=1):
        super(Deconv3D_Block, self).__init__()

        self.deconv = Sequential(
            ConvTranspose3d(inp_feat, out_feat, kernel_size=(kernel, kernel, kernel),
                            stride=(stride, stride, stride), padding=(padding, padding, padding), output_padding=1, bias=True),
            ReLU())
 def __init__(self, inp_feat, out_feat, kernel=4, stride=2, padding=1):
     
     super(Deconv3D_Block, self).__init__()
     
     self.deconv = Sequential(
                     ConvTranspose3d(inp_feat, out_feat, kernel_size=kernel, 
                                 stride=stride, padding=padding, output_padding=0, bias=True),
                     BatchNorm3d(out_feat),
                     ReLU())
예제 #6
0
파일: network.py 프로젝트: PCIHD/3D-Net
    def __init__(self):
        super(CNN_Autoencoder, self).__init__()
        self.encoder = nn.Sequential(Conv2d(1, 16, kernel_size=3, stride=1),
                                     nn.ReLU(inplace=True), MaxPool2d(2, 2),
                                     Conv2d(16, 8, kernel_size=3, stride=1),
                                     nn.ReLU(inplace=True), MaxPool2d(2, 2),
                                     Conv2d(8, 8, kernel_size=3, stride=1))

        self.decoder = nn.Sequential(
            ConvTranspose3d(8, 12, kernel_size=3, stride=1),
            nn.Upsample(scale_factor=(1, 1, 1)),
            nn.ReLU(inplace=True),
            ConvTranspose3d(12, 16, kernel_size=3, stride=1),
            nn.Upsample(scale_factor=(1, 1, 1)),
            nn.ReLU(inplace=True),
            ConvTranspose3d(16, 22, kernel_size=3, stride=1),
            nn.Upsample(scale_factor=(1.6, 2, 1.6)),
            nn.ReLU(inplace=True),
            ConvTranspose3d(22, 1, kernel_size=3, stride=1),
            #nn.ReLU(inplace=True),
        )
예제 #7
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              stride=1,
              padding=0,
              output_padding=0,
              groups=1,
              bias=True,
              dilation=1,
              padding_mode='zeros'):
     super(ComplexConvTranspose3d, self).__init__()
     self.conv_tran_r = ConvTranspose3d(in_channels, out_channels,
                                        kernel_size, stride, padding,
                                        output_padding, groups, bias,
                                        dilation, padding_mode)
     self.conv_tran_i = ConvTranspose3d(in_channels, out_channels,
                                        kernel_size, stride, padding,
                                        output_padding, groups, bias,
                                        dilation, padding_mode)
     IF_R = torch.Tensor([[[[[
         np.cos(2 * (j) * (i) * np.pi / kernel_size[2]) / kernel_size[2]
         for j in range(kernel_size[2])
     ] for i in range(kernel_size[1])] for i in range(kernel_size[0])]
                           for i in range(out_channels)]
                          for i in range(in_channels)]).double()
     IF_IMAG = torch.Tensor([[[[[
         np.sin(2 * (j) * (i) * np.pi / kernel_size[2]) / kernel_size[2]
         for j in range(kernel_size[2])
     ] for i in range(kernel_size[1])] for i in range(kernel_size[0])]
                              for i in range(out_channels)]
                             for i in range(in_channels)]).double()
     print(IF_R.shape)
     #IFB=torch.Tensor([0 for i in range(out_channels)]).double()
     with torch.no_grad():
         self.conv_tran_r.weight = torch.nn.Parameter(IF_R)
         self.conv_tran_i.weight = torch.nn.Parameter(IF_IMAG)
예제 #8
0
def test_conv_transpose3d(batch, length,
                          in_channels, out_channels,
                          kernel_size, stride, padding, output_padding, dilation, groups, bias, padding_mode):

    x = torch.randn(batch, in_channels, *length,
                    requires_grad=True, device=device)
    conv = ConvTranspose3d(in_channels, out_channels, kernel_size, stride,
                           padding, output_padding, groups, bias, dilation, padding_mode).to(device)
    fft_conv = FFTConvTranspose3d(in_channels, out_channels, kernel_size,
                                  stride, padding, output_padding, groups, bias, dilation, padding_mode).to(device)
    fft_conv.load_state_dict(conv.state_dict())

    y1 = conv(x)
    y2 = fft_conv(x)
    assert torch.allclose(
        y1, y2, atol=1e-5, rtol=1e-5), torch.abs(y1 - y2).max().item()
    y2.sum().backward()
예제 #9
0
 def __init__(self, embedding_dim):
     super(Decoder, self).__init__()
     self.embedding_dim = embedding_dim
     self.mlp = Sequential(Linear(self.embedding_dim,
                                  1024), BatchNorm1d(1024), LeakyReLU(),
                           Linear(1024,
                                  2048), BatchNorm1d(2048), LeakyReLU(),
                           Linear(2048, 4096), BatchNorm1d(4096),
                           LeakyReLU())
     self.deconv = Sequential(
         ConvTranspose3d(64, 64, kernel_size=3, stride=1), BatchNorm3d(64),
         LeakyReLU(), ConvTranspose3d(64, 64, kernel_size=3, stride=1),
         BatchNorm3d(64), LeakyReLU(),
         ConvTranspose3d(64, 32, kernel_size=3, stride=3), BatchNorm3d(32),
         LeakyReLU(), ConvTranspose3d(32, 16, kernel_size=3, stride=1),
         BatchNorm3d(16), LeakyReLU(),
         ConvTranspose3d(16, 8, kernel_size=3, stride=1), BatchNorm3d(8),
         LeakyReLU(), ConvTranspose3d(8, 4, kernel_size=3, stride=1),
         BatchNorm3d(4), LeakyReLU(),
         ConvTranspose3d(4, 2, kernel_size=3, stride=1), BatchNorm3d(2),
         LeakyReLU(), ConvTranspose3d(2, 1, kernel_size=3, stride=1),
         BatchNorm3d(1), LeakyReLU(),
         ConvTranspose3d(1, 1, kernel_size=3, stride=1), BatchNorm3d(1),
         LeakyReLU(), ConvTranspose3d(1, 1, kernel_size=3, stride=1),
         BatchNorm3d(1), LeakyReLU(),
         ConvTranspose3d(1, 1, kernel_size=3, stride=1), BatchNorm3d(1),
         Tanh())