Example #1
0
    def __init__(self, in_size=3, ndf=64):
        super(Discriminator_WITHOUT_FC_x64, self).__init__()
        self.in_size = in_size
        self.ndf = ndf

        self.main = nn.Sequential(
            # input size is in_size x 64 x 64
            SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)),
            nn.LeakyReLU(0.2, inplace=True),
            # state size: ndf x 32 x 32
            Self_Attention(self.ndf),
            SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size: (ndf * 2) x 16 x 16
            SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size: (ndf * 4) x 8 x 8
            SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # sate size: (ndf * 8) x 4 x 4
        )

        self.last = SpectralNorm(nn.Conv2d(self.ndf * 8, 1, 4, 1, 0))
Example #2
0
    def __init__(self, in_size=6, ndf=64):
        super(Discriminator_WITHOUT_FC_x64_video, self).__init__()
        self.in_size = in_size
        self.ndf = ndf

        self.layer1 = nn.Sequential(
            # input size is in_size x 64 x 64
            SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.layer2 = nn.Sequential(
            # state size: ndf x 32 x 32
            SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.layer3 = nn.Sequential(
            # state size: (ndf * 2) x 16 x 16
            SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.layer4 = nn.Sequential(
            # state size: (ndf * 4) x 8 x 8
            # Self_Attention(self.ndf * 4, 'relu'),
            SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.last = SpectralNorm(nn.Conv2d(self.ndf * 8, 1, [3, 6], 1, 0))
 def __init__(self,
              num_inputs,
              hidden_size=(128, 128),
              encode_size=64,
              activation='tanh',
              normalize=False,
              dropout=False,
              slope=0.1,
              dprob=0.2):
     super().__init__()
     if activation == 'tanh':
         self.activation = nn.Tanh()  #torch.tanh
     elif activation == 'relu':
         self.activation = nn.ReLU()  #torch.relu: function, nn.ReLU: layer
     elif activation == 'sigmoid':
         self.activation = nn.Sigmoid()  #torch.sigmoid
     elif activation == 'leakyrelu':
         self.activation = nn.LeakyReLU(slope)
     self.encode_size = encode_size
     self.encoder = nn.Sequential()
     last_dim = num_inputs
     for ih, nh in enumerate(hidden_size):
         self.encoder.add_module('enc_lin' + str(ih),
                                 SpectralNorm(nn.Linear(last_dim, nh)))
         if normalize:
             self.encoder.add_module('enc_norm' + str(ih),
                                     nn.BatchNorm1d(nh))
         self.encoder.add_module('enc_act' + str(ih), self.activation)
         if dropout:
             self.encoder.add_module('enc_dro' + str(ih),
                                     nn.Dropout(p=dprob))
         last_dim = nh
     self.encoder.add_module('encoder_out',
                             SpectralNorm(nn.Linear(last_dim, encode_size)))
     #self.dec_layers = nn.Modulelist()
     #last_dim = encode_size
     #for nh in hidden_size:
     #    self.dec_layers.append(SpectralNorm(nn.Linear(last_dim, nh)))
     #    self.dec_layers.append(self.activation)
     #    if dropout:
     #        self.dec_layers.append(nn.Dropout(p=dprob))
     #    last_dim = nh
     #self.decoder = nn.Sequential(self.dec_layers)
     self.decoder = nn.Sequential()
     last_dim = encode_size
     for ih, nh in enumerate(hidden_size):
         self.decoder.add_module('dec_lin' + str(ih),
                                 SpectralNorm(nn.Linear(last_dim, nh)))
         if normalize:
             self.decoder.add_module('dec_norm' + str(ih),
                                     nn.BatchNorm1d(nh))
         self.decoder.add_module('dec_act' + str(ih), self.activation)
         if dropout:
             self.decoder.add_module('dec_dro' + str(ih),
                                     nn.Dropout(p=dprob))
         last_dim = nh
     self.logic = nn.Linear(last_dim, num_inputs)
     #self.logic.weight.data.mul_(0.1)
     #self.logic.bias.data.mul_(0.0)
     self.decoder.add_module('decoder_out', SpectralNorm(self.logic))
Example #4
0
    def __init__(self, in_dim):
        super(Self_Attention, self).__init__()
        self.chanel_in = in_dim

        self.query_conv = SpectralNorm(
            nn.Conv2d(in_channels=in_dim,
                      out_channels=in_dim // 1,
                      kernel_size=1))
        self.key_conv = SpectralNorm(
            nn.Conv2d(in_channels=in_dim,
                      out_channels=in_dim // 1,
                      kernel_size=1))
        self.value_conv = SpectralNorm(
            nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1))
        self.gamma = nn.Parameter(torch.zeros(1))

        self.softmax = nn.Softmax(dim=-1)  #
Example #5
0
    def __init__(self, in_size=3, ndf=64):
        super(Discriminator_WITHOUT_FC_x64_BIG, self).__init__()
        self.in_size = in_size
        self.ndf = ndf

        self.main = nn.Sequential(
            # 256 X 256
            SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)),
            nn.LeakyReLU(0.2, inplace=True),
            # 128 X 128
            SpectralNorm(nn.Conv2d(self.ndf, self.ndf, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf),
            nn.LeakyReLU(0.2, inplace=True),
            # 64 X 64
            Self_Attention(self.ndf),
            SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # 32 X 32
            SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # 16 X 16
            SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # 8 X 8
            SpectralNorm(nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 16),
            nn.LeakyReLU(0.2, inplace=True),
        )
        # 4 X 4
        self.last = SpectralNorm(nn.Conv2d(self.ndf * 16, 1, 4, 1, 0))
Example #6
0
 def __init__(self, config):
     super(Discriminator, self).__init__()
     self.ngpu = int(config['ngpu'])
     ndf = int(config['ndf'])
     nc = int(config['nc'])
     label_nc = int(config['label_nc'])
     self.main = nn.Sequential(
         # input is (nc) x 128 x 128
         # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
         # nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf) x 64 x 64
         SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)),
         # nn.BatchNorm2d(ndf * 2),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*2) x 32 x 32
         SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False)),
         # nn.BatchNorm2d(ndf * 4),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*4) x 16 x 16
         SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)),
         # nn.BatchNorm2d(ndf * 8),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*8) x 8 x 8
         SpectralNorm(nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False)),
         # nn.BatchNorm2d(ndf * 16),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*16) x 4 x 4
         SpectralNorm(nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False)),
         nn.Sigmoid()
         # output: 1x1x1
     )
     self.input_conv = nn.Sequential(
         # input is (nc) x 128 x 128
         nn.Conv2d(nc, ndf // 2, 4, 2, 1, bias=False),
         nn.LeakyReLU(0.2, inplace=True)
         # state size. (ndf/2) x 64 x 64
     )
     self.label_conv = nn.Sequential(
         # input is Z, going into a convolution
         nn.Conv2d(label_nc, ndf // 2, 4, 2, 1, bias=False),
         nn.LeakyReLU(0.2, inplace=True)
         # state size. (ndf/2) x 64 x 64
     )
Example #7
0
 def __init__(self, d=64):
     super(discriminator_snIns, self).__init__()
     #self.conv1 = nn.Conv2d(6, d, 4, 2, 1)
     self.conv1 = SpectralNorm(nn.Conv2d(3, d, 4, 2, 1))
     self.conv2 = SpectralNorm(nn.Conv2d(d, d, 4, 2, 1))
     self.conv2_in = nn.InstanceNorm2d(d)
     self.conv3 = SpectralNorm(nn.Conv2d(d, d * 2, 3, 1, 1))
     self.conv3_in = nn.InstanceNorm2d(d * 2)
     self.conv4 = SpectralNorm(nn.Conv2d(d * 2, d * 2, 4, 1, 1))
     self.conv4_in = nn.InstanceNorm2d(d * 2)
     self.conv5 = SpectralNorm(nn.Conv2d(d * 2, d * 4, 3, 1, 1))
     self.conv5_in = nn.InstanceNorm2d(d * 4)
     self.conv6 = SpectralNorm(nn.Conv2d(d * 4, d * 4, 2, 1, 1))
     self.conv6_in = nn.InstanceNorm2d(d * 4)
     self.conv7 = SpectralNorm(nn.Conv2d(d * 4, d * 4, 3, 1, 1))
     self.conv7_in = nn.InstanceNorm2d(d * 4)
     self.conv8 = SpectralNorm(nn.Conv2d(d * 4, d * 8, 4, 2, 1))
     self.fc = SpectralNorm(nn.Linear(4 * 4 * d * 8, 1))
Example #8
0
 def __init__(self,
              num_inputs,
              hidden_size=(128, 128),
              encode_size=64,
              activation='tanh',
              dropout=False,
              slope=0.1,
              dprob=0.2,
              sn=True):
     super().__init__()
     if activation == 'tanh':
         self.activation = nn.Tanh()  #torch.tanh
     elif activation == 'relu':
         self.activation = nn.ReLU()  #torch.relu: function, nn.ReLU: layer
     elif activation == 'sigmoid':
         self.activation = nn.Sigmoid()  #torch.sigmoid
     elif activation == 'leakyrelu':
         self.activation = nn.LeakyReLU(slope)
     self.encode_size = encode_size
     self.encoder = nn.Sequential()
     last_dim = num_inputs
     for ih, nh in enumerate(hidden_size):
         if sn:
             self.encoder.add_module('enc_lin' + str(ih),
                                     SpectralNorm(nn.Linear(last_dim, nh)))
         else:
             self.encoder.add_module('enc_lin' + str(ih),
                                     nn.Linear(last_dim, nh))
         self.encoder.add_module('enc_act' + str(ih), self.activation)
         if dropout:
             self.encoder.add_module('enc_dro' + str(ih),
                                     nn.Dropout(p=dprob))
         last_dim = nh
     if sn:
         self.encoder.add_module(
             'encoder_out', SpectralNorm(nn.Linear(last_dim, encode_size)))
     else:
         self.encoder.add_module('encoder_out',
                                 nn.Linear(last_dim, encode_size))
Example #9
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.conv1 = SpectralNorm(
            nn.Conv2d(channels, 64, 3, stride=1, padding=(1, 1)))
        self.conv2 = SpectralNorm(
            nn.Conv2d(64, 64, 4, stride=2, padding=(1, 1)))
        self.conv3 = SpectralNorm(
            nn.Conv2d(64, 128, 3, stride=1, padding=(1, 1)))
        self.conv4 = SpectralNorm(
            nn.Conv2d(128, 128, 4, stride=2, padding=(1, 1)))
        self.conv5 = SpectralNorm(
            nn.Conv2d(128, 256, 3, stride=1, padding=(1, 1)))
        self.conv6 = SpectralNorm(
            nn.Conv2d(256, 256, 4, stride=2, padding=(1, 1)))
        self.conv7 = SpectralNorm(
            nn.Conv2d(256, 256, 3, stride=1, padding=(1, 1)))
        self.conv8 = SpectralNorm(
            nn.Conv2d(256, 512, 4, stride=2, padding=(1, 1)))
        self.fc = SpectralNorm(nn.Linear(w_g * w_g * 512, 1))
Example #10
0
    def __init__(self, in_size=6, ndf=64):
        super(Discriminator_WITHOUT_FC_x64_video_BIG, self).__init__()
        self.in_size = in_size
        self.ndf = ndf

        self.layer1 = nn.Sequential(
            SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)),
            nn.LeakyReLU(0.2, inplace=True))
        self.layer2 = nn.Sequential(
            SpectralNorm(nn.Conv2d(self.ndf, self.ndf, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.attention = Self_Attention(self.ndf)
        self.layer3 = nn.Sequential(
            SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.layer4 = nn.Sequential(
            SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.layer5 = nn.Sequential(
            SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.layer6 = nn.Sequential(
            SpectralNorm(nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1)),
            nn.InstanceNorm2d(self.ndf * 16),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.last = SpectralNorm(nn.Conv2d(self.ndf * 16, 1, [3, 6], 1, 0))
    def __init__(self,
                 num_inputs,
                 num_outputs,
                 sigmoid_out=True,
                 sn=False,
                 w_init=False,
                 test=True,
                 hidden_size_enc=(64, ),
                 hidden_size_dec=(64, ),
                 encode_size=64,
                 activation='tanh',
                 dropout=False,
                 slope=0.1,
                 dprob=0.2):
        super().__init__()
        if activation == 'tanh':
            self.activation = nn.Tanh()  #torch.tanh
        elif activation == 'relu':
            self.activation = nn.ReLU()  #torch.relu: function, nn.ReLU: layer
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()  #torch.sigmoid
        elif activation == 'leakyrelu':
            self.activation = nn.LeakyReLU(slope)
        self.encode_size = encode_size
        self.sigmoid_out = sigmoid_out
        """
        class Q(nn.Module):
            def __init__(self):
                super(Q,self).__init__()
            def forward(self,x):
        """
        self.encoder = nn.Sequential()
        last_dim = num_inputs
        for ih, nh in enumerate(hidden_size_enc):
            if sn:
                self.encoder.add_module(
                    'enc_lin' + str(ih),
                    SpectralNorm(nn.Linear(last_dim, nh), w_init))
            else:
                self.encoder.add_module('enc_lin' + str(ih),
                                        nn.Linear(last_dim, nh))
            self.encoder.add_module('enc_act' + str(ih), self.activation)
            if dropout:
                self.encoder.add_module('enc_dro' + str(ih),
                                        nn.Dropout(p=dprob))
            last_dim = nh
        if sn:
            self.encoder.add_module(
                'encoder_out',
                SpectralNorm(nn.Linear(last_dim, encode_size * 2), w_init))
        else:
            self.encoder.add_module('encoder_out',
                                    nn.Linear(last_dim, encode_size * 2))

        self.decoder = nn.Sequential()
        last_dim = encode_size

        # to be deleted
        if test:
            self.decoder.add_module('dec_act', self.activation)
            if dropout:
                self.decoder.add_module('dec_dro', nn.Dropout(p=dprob))

        for ih, nh in enumerate(hidden_size_dec):
            if sn:
                self.decoder.add_module(
                    'dec_lin' + str(ih),
                    SpectralNorm(nn.Linear(last_dim, nh), w_init))
            else:
                self.decoder.add_module('dec_lin' + str(ih),
                                        nn.Linear(last_dim, nh))
            self.decoder.add_module('dec_act' + str(ih), self.activation)
            if dropout:
                self.decoder.add_module('dec_dro' + str(ih),
                                        nn.Dropout(p=dprob))
            last_dim = nh
        self.logic = nn.Linear(last_dim, num_outputs)
        if sn:
            self.decoder.add_module('decoder_out',
                                    SpectralNorm(self.logic, w_init))
        else:
            self.decoder.add_module('decoder_out', self.logic)