コード例 #1
0
ファイル: Models.py プロジェクト: hsl323/cartoonGAN_pytorch
    def __init__(self, init_weights_path=None):
        super(Discriminator, self).__init__()

        # Activation Functions
        self.LeakyReLU = nn.LeakyReLU(0.2, True)
        self.Sigmoid = nn.Sigmoid()

        ## 1st Block
        self.conv_01_1 = nn.Conv2d(3, 32, 3, 1, 1)
        # LeakyReLU

        ## 2nd Block
        self.conv_02_1 = nn.Conv2d(32, 64, 3, 2, 1)
        # LeakyReLU
        self.conv_02_2 = nn.Conv2d(64, 128, 3, 1, 1)
        self.in_norm_02_1 = InstanceNormalization(128)
        # LeakyReLU

        ## 3rd Block
        self.conv_03_1 = nn.Conv2d(128, 128, 3, 2, 1)
        # Leaky ReLU
        self.conv_03_2 = nn.Conv2d(128, 256, 3, 1, 1)
        self.in_norm_03_1 = InstanceNormalization(256)
        # LeakyReLU

        ## 4th Block
        self.conv_04_1 = nn.Conv2d(256, 256, 3, 1, 1)
        self.in_norm_04_1 = InstanceNormalization(256)
        # LeakyReLU

        ## Final Block
        self.conv_05_1 = nn.Conv2d(256, 1, 3, 1, 1)
        # Sigmoid

        # Initialize weights
        if init_weights_path is None:
            init_weights(self)
        else:
            self.load_state_dict(
                torch.load(init_weights_path)["model_state_dict"])
コード例 #2
0
ファイル: Models.py プロジェクト: hsl323/cartoonGAN_pytorch
    def __init__(self, init_weights_path=None):
        super(Inception_v3, self).__init__()

        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)
        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)
        self.Mixed_5b = InceptionA(192, pool_features=32)
        self.Mixed_5c = InceptionA(256, pool_features=64)
        self.Mixed_5d = InceptionA(288, pool_features=64)
        self.Mixed_6a = InceptionB(288)
        self.Mixed_6b = InceptionC(768, channels_7x7=128)
        self.Mixed_6c = InceptionC(768, channels_7x7=160)
        self.Mixed_6d = InceptionC(768, channels_7x7=160)
        self.Mixed_6e = InceptionC(768, channels_7x7=192)

        # Initialize weights
        if init_weights_path is None:
            init_weights(self)
        else:
            self.load_state_dict(torch.load(init_weights_path))
コード例 #3
0
ファイル: Models.py プロジェクト: hsl323/cartoonGAN_pytorch
    def __init__(self, init_weights_path=None):
        super(Generator, self).__init__()

        # Activation Functions
        self.ReLU = nn.ReLU()
        self.Tanh = nn.Tanh()

        ## First Block
        self.pad_01_1 = nn.ReflectionPad2d(3)
        self.conv_01_1 = nn.Conv2d(3, 64, 7)
        self.in_norm_01_1 = InstanceNormalization(64)
        # ReLU

        ## Down Convolution Block 1
        self.conv_02_1 = nn.Conv2d(64, 128, 3, 2, 1)
        self.conv_02_2 = nn.Conv2d(128, 128, 3, 1, 1)
        self.in_norm_02_1 = InstanceNormalization(128)
        # ReLU

        ## Down Convolution Block 2
        self.conv_03_1 = nn.Conv2d(128, 256, 3, 2, 1)
        self.conv_03_2 = nn.Conv2d(256, 256, 3, 1, 1)
        self.in_norm_03_1 = InstanceNormalization(256)
        # ReLU

        ## Residual Block 1
        self.pad_04_1 = nn.ReflectionPad2d(1)
        self.conv_04_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_04_1 = InstanceNormalization(256)
        # ReLU
        self.pad_04_2 = nn.ReflectionPad2d(1)
        self.conv_04_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_04_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 2
        self.pad_05_1 = nn.ReflectionPad2d(1)
        self.conv_05_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_05_1 = InstanceNormalization(256)
        # ReLU
        self.pad_05_2 = nn.ReflectionPad2d(1)
        self.conv_05_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_05_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 3
        self.pad_06_1 = nn.ReflectionPad2d(1)
        self.conv_06_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_06_1 = InstanceNormalization(256)
        # ReLU
        self.pad_06_2 = nn.ReflectionPad2d(1)
        self.conv_06_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_06_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 4
        self.pad_07_1 = nn.ReflectionPad2d(1)
        self.conv_07_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_07_1 = InstanceNormalization(256)
        # ReLU
        self.pad_07_2 = nn.ReflectionPad2d(1)
        self.conv_07_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_07_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 5
        self.pad_08_1 = nn.ReflectionPad2d(1)
        self.conv_08_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_08_1 = InstanceNormalization(256)
        # ReLU
        self.pad_08_2 = nn.ReflectionPad2d(1)
        self.conv_08_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_08_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 6
        self.pad_09_1 = nn.ReflectionPad2d(1)
        self.conv_09_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_09_1 = InstanceNormalization(256)
        # ReLU
        self.pad_09_2 = nn.ReflectionPad2d(1)
        self.conv_09_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_09_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 7
        self.pad_10_1 = nn.ReflectionPad2d(1)
        self.conv_10_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_10_1 = InstanceNormalization(256)
        # ReLU
        self.pad_10_2 = nn.ReflectionPad2d(1)
        self.conv_10_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_10_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Residual Block 8
        self.pad_11_1 = nn.ReflectionPad2d(1)
        self.conv_11_1 = nn.Conv2d(256, 256, 3)
        self.in_norm_11_1 = InstanceNormalization(256)
        # ReLU
        self.pad_11_2 = nn.ReflectionPad2d(1)
        self.conv_11_2 = nn.Conv2d(256, 256, 3)
        self.in_norm_11_2 = InstanceNormalization(256)
        # Elementwise Sum

        ## Up Convolution Block 1
        self.deconv_12_1 = nn.ConvTranspose2d(256, 128, 3, 2, 1, 1)
        self.deconv_12_2 = nn.Conv2d(128, 128, 3, 1, 1)
        self.in_norm_12_1 = InstanceNormalization(128)
        # ReLU

        ## Up Convolution Block 2
        self.deconv_13_1 = nn.ConvTranspose2d(128, 64, 3, 2, 1, 1)
        self.deconv_13_2 = nn.Conv2d(64, 64, 3, 1, 1)
        self.in_norm_13_1 = InstanceNormalization(64)
        # ReLU

        ## Final Block
        self.pad_14_1 = nn.ReflectionPad2d(3)
        self.conv_14_1 = nn.Conv2d(64, 3, 7)
        # tanh

        # Initialize weights
        if init_weights_path is None:
            init_weights(self)
        else:
            self.load_state_dict(
                torch.load(init_weights_path)["model_state_dict"])
コード例 #4
0
ファイル: Models.py プロジェクト: hsl323/cartoonGAN_pytorch
    def __init__(self, init_weights_path=None):
        super(VGG19, self).__init__()
        self.features = nn.Sequential(
            # Block 1
            nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(64,
                      64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2,
                         stride=2,
                         padding=0,
                         dilation=1,
                         ceil_mode=False),
            # Block 2
            nn.Conv2d(64,
                      128,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(128,
                      128,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2,
                         stride=2,
                         padding=0,
                         dilation=1,
                         ceil_mode=False),
            # Block 3
            nn.Conv2d(128,
                      256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(256,
                      256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(256,
                      256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(256,
                      256,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2,
                         stride=2,
                         padding=0,
                         dilation=1,
                         ceil_mode=False),
            # Block 4
            nn.Conv2d(256,
                      512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(512,
                      512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(512,
                      512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv2d(512,
                      512,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),
            nn.ReLU(inplace=True),
        )

        # Initialize weights
        if init_weights_path is None:
            init_weights(self)
        else:
            self.load_state_dict(
                torch.load(init_weights_path)["model_state_dict"])