Esempio n. 1
0
 def __init__(self, input_channel, output_channel, ID):
     super(UNet_up_block2, self).__init__()
     kernel_size = 3
     self.ID = ID
     self.up_sampling = torch.nn.ConvTranspose2d(input_channel,
                                                 input_channel,
                                                 4,
                                                 stride=(2, 2),
                                                 padding=(1, 1))
     self.conv1 = torch.nn.Conv2d(input_channel,
                                  output_channel,
                                  kernel_size,
                                  stride=(1, 1),
                                  padding=(1, 1),
                                  bias=False)
     self.bn1 = Norm(output_channel)
     self.conv2 = torch.nn.Conv2d(output_channel,
                                  output_channel,
                                  kernel_size,
                                  stride=(1, 1),
                                  padding=(1, 1),
                                  bias=False)
     self.bn2 = Norm(output_channel)
     self.conv3 = torch.nn.Conv2d(output_channel,
                                  output_channel,
                                  kernel_size,
                                  stride=(1, 1),
                                  padding=(1, 1),
                                  bias=False)
     self.bn3 = Norm(output_channel)
Esempio n. 2
0
 def __init__(self, input_channel, output_channel, down_sample):
     super(UNet_down_block, self).__init__()
     kernel_size = 3
     self.conv1 = torch.nn.Conv2d(input_channel,
                                  output_channel,
                                  kernel_size,
                                  stride=(1, 1),
                                  padding=(1, 1),
                                  bias=False)
     self.bn1 = Norm(output_channel)
     self.conv2 = torch.nn.Conv2d(output_channel,
                                  output_channel,
                                  kernel_size,
                                  stride=(1, 1),
                                  padding=(1, 1),
                                  bias=False)
     self.bn2 = Norm(output_channel)
     self.conv3 = torch.nn.Conv2d(output_channel,
                                  output_channel,
                                  kernel_size,
                                  stride=(1, 1),
                                  padding=(1, 1),
                                  bias=False)
     self.bn3 = Norm(output_channel)
     self.down_sampling = torch.nn.Conv2d(input_channel,
                                          input_channel,
                                          kernel_size,
                                          stride=(2, 2),
                                          padding=(1, 1),
                                          bias=False)
     self.down_sample = down_sample
Esempio n. 3
0
    def __init__(self):
        super(UNet, self).__init__()

        # self.opts = opts
        input_channel_number =160
        output_channel_number =160
        # kernel_size = opts.kernel_size # we could change this later
        kernel_size = 3

        # self.linear1 = torch.nn.Sequential(*self.lin_tan_drop(400 * 520, 400 * 520, 64))
        # self.linear2 = torch.nn.Sequential(*self.lin_tan_drop(200 * 260, 200 * 260, 64))
        #
        # self.linear1 = torch.nn.Sequential(*self.lin_tan_drop(220 * 220, 229 * 220, 64))
        # self.linear2 = torch.nn.Sequential(*self.lin_tan_drop(110 * 110, 110 * 110, 64))
        # self.linear1 = torch.nn.Sequential(*self.lin_tan_drop(400 * 520, 400 * 520, 64))
        # self.linear2 = torch.nn.Sequential(*self.lin_tan_drop(200 * 260, 200 * 260, 64))
        # self.linear3 = torch.nn.Sequential(*self.lin_tan_drop(100 * 130, 100 * 130, 64))

        # Encoder network
        self.down_block1 = UNet_down_block(input_channel_number, 64, False) # 64*520
        self.down_block2 = UNet_down_block(64, 128, True) # 64*520
        self.down_block3 = UNet_down_block(128, 256, True) # 64*260


        # bottom convolution
        self.mid_conv1 = torch.nn.Conv2d(256, 256, kernel_size, padding=(1, 1), bias=False)# 64*260
        self.bn1 = Norm(256)
        self.mid_conv2 = torch.nn.Conv2d(256, 256, kernel_size, padding=(1, 1), bias=False)# 64*260
        self.bn2 = Norm(256)
        self.mid_conv3 = torch.nn.Conv2d(256, 256, kernel_size, padding=(1, 1), bias=False) #, dilation=4 # 64*260
        self.bn3 = Norm(256)
        self.mid_conv4 = torch.nn.Conv2d(256, 256, kernel_size, padding=(1, 1), bias=False)# 64*260
        self.bn4 = Norm(256)
        self.mid_conv5 = torch.nn.Conv2d(256, 256, kernel_size, padding=(1, 1), bias=False)# 64*260
        self.bn5 = Norm(256)

        # Decoder network
        # self.up_block2 = UNet_up_block(128, 256, 128, 1)# 64*520
        # self.up_block3 = UNet_up_block(64, 128, 64, 1)# 64*520
        #
        self.up_block21 = UNet_up_block2(128, 128, 256, 128, 1)# 64*520
        self.up_block31 = UNet_up_block2(64, 64, 128, 64, 1)# 64*520
        # Final output
        self.last_conv1 = torch.nn.Conv2d(64, 64, 3, padding=(1, 1), bias=False)# 64*520
        self.last_bn = Norm(64) #
        self.last_conv2 = torch.nn.Conv2d(64, output_channel_number, 3, padding=(1, 1))# 64*520
        self.last_bn2 = Norm(output_channel_number) # 64*520

        self.softplus = torch.nn.Softplus(beta=5, threshold=100)
        self.relu = torch.nn.ReLU()
        self.tanhshrink = torch.nn.Tanhshrink()
        self.tanh = torch.nn.Tanh()
Esempio n. 4
0
    def __init__(self):
        super(UNet, self).__init__()

        # self.opts = opts
        input_channel_number = 1
        output_channel_number = 1
        kernel_size = 3
        # Encoder network
        self.down_block1 = UNet_down_block(input_channel_number, 64,
                                           False)  # 64*520
        self.down_block2 = UNet_down_block(64, 128, True)  # 64*520
        self.down_block3 = UNet_down_block(128, 256, True)  # 64*260
        self.down_block4 = UNet_down_block(256, 512, True)  # 64*260

        # bottom convolution
        self.mid_conv1 = torch.nn.Conv2d(512,
                                         512,
                                         kernel_size,
                                         padding=(1, 1),
                                         bias=False)  # 64*260
        self.bn1 = Norm(512)
        self.mid_conv2 = torch.nn.Conv2d(512,
                                         512,
                                         kernel_size,
                                         padding=(1, 1),
                                         bias=False)  # 64*260
        self.bn2 = Norm(512)
        self.mid_conv3 = torch.nn.Conv2d(512,
                                         512,
                                         kernel_size,
                                         padding=(1, 1),
                                         bias=False)  #, dilation=4 # 64*260
        self.bn3 = Norm(512)
        self.mid_conv4 = torch.nn.Conv2d(512,
                                         512,
                                         kernel_size,
                                         padding=(1, 1),
                                         bias=False)  # 64*260
        self.bn4 = Norm(512)
        self.mid_conv5 = torch.nn.Conv2d(512,
                                         512,
                                         kernel_size,
                                         padding=(1, 1),
                                         bias=False)  # 64*260
        self.bn5 = Norm(512)

        # Decoder network
        self.up_block2 = UNet_up_block2(512, 256, 1)  # 64*520
        self.up_block3 = UNet_up_block2(256, 64, 1)  # 64*520
        # self.up_block4 = UNet_up_block2(128, 64, 1)# 64*520

        # Final output
        self.last_conv1 = torch.nn.Conv2d(64,
                                          64,
                                          3,
                                          padding=(1, 1),
                                          bias=False)  # 64*520
        self.last_bn = Norm(1)  #
        self.last_conv2 = torch.nn.Conv2d(64,
                                          output_channel_number,
                                          3,
                                          padding=(1, 1))  # 64*520
        self.last_bn2 = Norm(output_channel_number)  # 64*520

        self.softplus = torch.nn.Softplus(beta=5, threshold=100)
        self.relu = torch.nn.ReLU()
        self.tanhshrink = torch.nn.Tanhshrink()
        self.tanh = torch.nn.Tanh()