示例#1
0
    def __init__(self, embedding, dropout=0, padding=False):
        self.odim = odim = len(embedding)
        super().__init__()
        self.padding = padding

        self.attV = LastDim(
                nn.Linear(idim, hdim),
                nn.Dropout(dropout),
                )

        self.attH = LastDim(
                nn.Linear(hdim, hdim),
                nn.Dropout(dropout),
                )

        self.attOut = LastDim(
                nn.Linear(hdim, 1),
                nn.Dropout(dropout),
                )

        self.ctx = LastDim(
                nn.Linear(hdim, hdim),
                nn.Dropout(dropout),
                nn.SELU(),
                )

        self.emb = nn.Embedding(odim, edim)
        self.emb.weight.data = torch.from_numpy(embedding).float()
        self.emb.weight.requires_grad = False

        self.encoder = nn.Sequential(
                nn.BatchNorm1d(idim),
                Stack(2, lambda i: (
                    nn.Linear(idim, idim),
                    nn.BatchNorm1d(idim),
                    nn.Dropout(dropout),
                    nn.SELU(),
                    )),
                nn.Linear(idim, encopt),
                nn.BatchNorm1d(encopt),
                nn.Dropout(dropout),
                nn.Tanh(),
                )

        self.decoder = nn.LSTM(edim, hdim, nrnn, dropout=dropout,
                bidirectional=False)

        opt = hdim
        self.out = LastDim(
                nn.BatchNorm1d(opt),
                Stack(3, lambda i: (
                    nn.Linear(hdim if i else opt, hdim),
                    nn.BatchNorm1d(hdim),
                    nn.Dropout(dropout),
                    nn.SELU(),
                    )),
                nn.Linear(hdim, odim),
                nn.BatchNorm1d(odim),
                )
        self.cnt = 0
示例#2
0
    def __init__(self, dropout=0):
        super().__init__()

        self.conv = nn.Sequential(
            Stack(
                len(channel) - 2,
                lambda i: (
                    nn.Conv2d(channel[i], channel[i + 1], kernel[i + 1],
                              stride[i + 1], padding[i + 1]),
                    #  nn.Dropout(dropout),
                    nn.LeakyReLU(),
                    Stack(
                        2,
                        lambda _: (
                            Residual(
                                nn.Conv2d(channel[i + 1],
                                          channel[i + 1],
                                          3,
                                          padding=1),
                                #  nn.Dropout(dropout),
                                nn.LeakyReLU(),
                                nn.Conv2d(channel[i + 1],
                                          channel[i + 1],
                                          3,
                                          padding=1),
                                #  nn.Dropout(dropout),
                            ),
                            nn.LeakyReLU(),
                        )),
                )),
            nn.Conv2d(channel[-2],
                      channel[-1],
                      kernel[-1],
                      stride[-1],
                      padding=kernel[-1] // 2),
            #  nn.Dropout(dropout),
            nn.LeakyReLU(),
        )

        self.isReal = nn.Sequential(
            #  nn.Linear(cdim, cdim),
            #  nn.Dropout(dropout),
            #  nn.LeakyReLU(),
            #  nn.Linear(cdim, cdim),
            #  nn.Dropout(dropout),
            #  nn.LeakyReLU(),
            nn.Linear(cdim, 1), )

        self.illum = nn.Sequential(
            #  nn.Linear(cdim, cdim),
            #  #  nn.Dropout(dropout),
            #  nn.LeakyReLU(),
            #  nn.Linear(cdim, cdim),
            #  #  nn.Dropout(dropout),
            #  nn.LeakyReLU(),
            nn.Linear(cdim, odim), )

        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0.0, 0.02)
示例#3
0
    def __init__(self, dropout=0):
        super().__init__()

        self.conv = nn.Sequential(
            Stack(
                5, lambda i: (
                    nn.Conv2d(channel[i],
                              channel[i + 1],
                              kernel[i + 1],
                              2,
                              padding=1),
                    nn.LeakyReLU(),
                    Stack(
                        2, lambda _: (
                            Residual(
                                nn.Conv2d(channel[i + 1],
                                          channel[i + 1],
                                          3,
                                          padding=1),
                                nn.LeakyReLU(),
                                nn.Conv2d(channel[i + 1],
                                          channel[i + 1],
                                          3,
                                          padding=1),
                            ),
                            nn.LeakyReLU(),
                        )),
                )),
            nn.Conv2d(512, 1024, 3, 2, padding=1),
            nn.LeakyReLU(),
        )

        self.validity = nn.Linear(1024 * 2 * 2, 1)
        self.illum = nn.Linear(1024 * 2 * 2, 1)
        self.weights_init()
示例#4
0
    def __init__(self, dropout=0):
        super().__init__()

        #  self.noise = nn.Sequential(
        #  nn.Linear(128, 48 * hdim * hdim)
        #  )

        #  self.hair = nn.Embedding(12, 8 * hdim * hdim)
        #  self.eyes = nn.Embedding(11, 8 * hdim * hdim)
        self.inp = nn.Sequential(nn.Linear(noiseDim + 1, 64 * hdim * hdim), )

        self.conv = nn.Sequential(
            #  nn.BatchNorm2d(64),
            nn.ReLU(),
            Stack(
                8,
                lambda i: (
                    Residual(
                        nn.Conv2d(64, 64, 3, padding=1),
                        #  ForceDropout(dropout if i % 2 == 1 else 0),
                        #  nn.BatchNorm2d(64),
                        nn.ReLU(),
                        nn.Conv2d(64, 64, 3, padding=1),
                        #  nn.BatchNorm2d(64),
                        #  ForceDropout(dropout),
                    ), )),
            Stack(
                len(up),
                lambda i: (
                    nn.Conv2d(64, 64, 3, padding=1),
                    #  ForceDropout(dropout),
                    #  nn.ReLU(),
                    #  nn.BatchNorm2d(64),
                    nn.ReLU(),
                    nn.Upsample(scale_factor=up[i], mode='nearest'),
                    nn.Conv2d(64, 64, 3, padding=1),
                    #  nn.BatchNorm2d(64),
                    #  ForceDropout(dropout),
                    #  nn.ReLU(),
                    nn.ReLU(),
                )),
            nn.Conv2d(64, 64, 3, padding=1),
            #  nn.BatchNorm2d(64),
            #  ForceDropout(dropout),
            #  nn.ReLU(),
            nn.ReLU(),
            nn.Conv2d(64, 1, 1),
            #  nn.Sigmoid(),
            #  nn.Tanh(),
        )

        #  for p in self.inp.parameters():
        #  p.requires_grad = False

        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0.0, 0.02)
示例#5
0
    def __init__(self, odim, dropout=0, padding=False):
        super().__init__()
        self.padding = padding

        self.encoder = nn.LSTM(idim, hdim, nrnn, dropout=dropout,
                bidirectional=bi)
        self.emb = nn.Embedding(odim, edim)
        self.att = nn.Linear(hdim, hdim)
        self.decoder = nn.LSTM(edim, hdim, nrnn, dropout=dropout,
                bidirectional=bi)
        #  self.bridge = nn.Sequential(
                #  nn.BatchNorm1d(encopt),
                #  Stack(2, lambda i: (
                    #  nn.Linear(nrnn * hdim if i else encopt, nrnn * hdim),
                    #  nn.BatchNorm1d(nrnn * hdim),
                    #  nn.SELU(),
                    #  )),
                #  )
        self.out = nn.Sequential(
                nn.BatchNorm1d(rnnopt),
                Stack(2, lambda i: (
                    nn.Linear(hdim if i else rnnopt, hdim),
                    nn.BatchNorm1d(hdim),
                    nn.SELU(),
                    )),
                nn.Linear(hdim, odim),
                nn.BatchNorm1d(odim),
                )
示例#6
0
    def __init__(self, embedding, dropout=0, padding=False):
        odim = len(embedding)
        super().__init__()
        self.padding = padding

        self.emb = nn.Embedding(odim, edim)
        self.emb.weight.data = torch.from_numpy(embedding).float()
        self.emb.weight.requires_grad = False

        self.lmrnn = nn.LSTM(edim,
                             hdim,
                             nrnn,
                             dropout=dropout,
                             bidirectional=False)

        self.out = LastDim(
            #  nn.BatchNorm1d(rnnopt),
            Stack(
                3, lambda i: (
                    nn.Linear(hdim if i else rnnopt, hdim),
                    nn.BatchNorm1d(hdim),
                    nn.Dropout(dropout),
                    nn.SELU(),
                )),
            nn.Linear(hdim, odim),
            #  nn.BatchNorm1d(odim),
        )
示例#7
0
 def __init__(self, dropout=0, padding=False):
     super().__init__()
     self.padding = padding
     self.conv = nn.Sequential(
         nn.Conv1d(idim, hdim, ksz, padding=ksz // 2),
         nn.SELU(),
         nn.BatchNorm1d(hdim),
         Stack(9, lambda i: ResNetCell(ksz, hdim, dropout)),
         nn.Conv1d(hdim, odim, ksz, padding=ksz // 2),
     )
     self.emb = nn.Linear(odim, odim)
     self.fixlayers = nn.ModuleList()
     for i in range(3):
         fix = nn.Sequential(
             Stack(3, lambda i: ResNetCell(ksz, odim, dropout)), )
         self.fixlayers.append(fix)
示例#8
0
 def __init__(self, padding=False, dropout=0):
     super().__init__()
     self.padding = padding
     self.emb = nn.Embedding(edim, edim)
     self.conv = nn.Sequential(
         nn.Conv1d(idim, odim, ksz, padding=ksz // 2),
         nn.SELU(),
         nn.BatchNorm1d(odim),
         Stack(9, lambda i: GLU(ksz, odim, dropout=dropout)),
     )
示例#9
0
    def __init__(self, dropout=0, tag='hair'):
        super().__init__()
        self.tag = tag

        self.conv = nn.Sequential(
            Stack(
                len(channel) - 2,
                lambda i: (
                    nn.Conv2d(channel[i], channel[i + 1], kernel[i + 1],
                              stride[i + 1], padding[i + 1]),
                    nn.MaxPool2d(2),
                    nn.Dropout(dropout),
                    nn.LeakyReLU(),
                    #  Stack(2, lambda _: (
                    #  Residual(
                    #  nn.Conv2d(channel[i+1], channel[i+1], 3, padding=1),
                    #  nn.Dropout(dropout),
                    #  nn.LeakyReLU(),
                    #  nn.Conv2d(channel[i+1], channel[i+1], 3, padding=1),
                    #  nn.Dropout(dropout),
                    #  ),
                    #  nn.LeakyReLU(),
                    #  )),
                )),
            nn.Conv2d(channel[-2],
                      channel[-1],
                      kernel[-1],
                      stride[-1],
                      padding=kernel[-1] // 2),
            nn.AdaptiveMaxPool2d(3),
            nn.Dropout(dropout),
            nn.LeakyReLU(),
        )

        self.out = nn.Sequential(
            nn.Linear(cdim, cdim),
            nn.Dropout(dropout),
            nn.LeakyReLU(),
            nn.Linear(cdim, cdim),
            nn.Dropout(dropout),
            nn.LeakyReLU(),
            nn.Linear(cdim, odim),
        )

        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0.0, 0.02)