def __init__(self, ngpu, ndf, nc, k): super(Discriminator, self).__init__() self.ngpu = ngpu layers = [] layers.append(nn.Conv2d(nc, ndf, kernel_size, stride=stride, padding=padding, bias=False) ) layers.append(nn.LeakyReLU(0.2, inplace=True)) # state size. (ndf) x 64 x 64 #-------------------------------------------- for i in range(k): layers.append(ll.DisLayerSN_d(ndf, i)) #-------------------------------------------- d_out = 2**k layers.append(sa.Self_Attn(ndf*d_out, "relu")) layers.append(sa.Self_Attn(ndf*d_out, "relu")) layers.append(nn.Conv2d(ndf * d_out, 1, kernel_size, stride=1, padding=0, bias=False)) layers.append(nn.Sigmoid()) # state size. 1 self.main = nn.ModuleList(layers)
def __init__(self, ngpu, nz, ngf, nc, k): super(Generator, self).__init__() self.ngpu = ngpu layers = [] d_in = 2**k layers.append( nn.ConvTranspose2d( nz, ngf * d_in, kernel_size, 1, 0, bias=False) ) layers.append( nn.BatchNorm2d(ngf * d_in) ) layers.append( nn.ReLU(True) ) # state size. (ngf*16) x 4 x 4 #------------------------------------------ for i in range(k): n = k-i layers.append( ll.GenLayerSN(ngf, n) ) #------------------------------------------ layers.append(sa.Self_Attn(ngf,"relu")) layers.append(sa.Self_Attn(ngf,"relu")) layers.append(nn.ConvTranspose2d( ngf, nc, kernel_size, stride, padding, bias=False) ) layers.append(nn.Tanh() ) # state size. (nc) x 128 x 128 self.main = nn.ModuleList(layers)
def __init__(self, ngpu, nz, ngf, nc, k): super(Generator, self).__init__() self.ngpu = ngpu layers = [] d_in = stride**2 layers.append( nn.ConvTranspose2d(nz, ngf * d_in, kernel_size, stride, padding, bias=False)) layers.append(nn.BatchNorm2d(ngf * d_in)) layers.append(nn.ReLU(True)) #------------------------------------------ layers.append(GenLayerSN(ngf, 2)) layers.append(GenLayerSN(ngf, 1)) #------------------------------------------ layers.append(sa.Self_Attn(ngf, "relu")) layers.append( nn.ConvTranspose2d(ngf, nc, kernel_size, stride, padding, bias=False)) layers.append(nn.Tanh()) self.main = nn.ModuleList(layers)
def __init__(self, ngpu, ndf, nc, k): super(Discriminator, self).__init__() self.ngpu = ngpu layers = [] layers.append(nn.Conv2d(nc, ndf, kernel_size, stride=stride, padding=padding, bias=False) ) layers.append(nn.LeakyReLU(0.2, inplace=True)) # state size. (ndf) x 64 x 64 k=4 # ------------------------------------------- # per immagini 64 x 64 ci volgliono 3 strati # e k = 3 # # per immagini 128 x 128 ci vogliono 4 strati # e k = 4 #-------------------------------------------- layers.append(DisLayerSN_d(ndf, 0)) layers.append(DisLayerSN_d(ndf, 1)) layers.append(sa.Self_Attn(ndf*(2**2), "relu")) layers.append(DisLayerSN_d(ndf, 2)) layers.append(DisLayerSN_d(ndf, 3)) #-------------------------------------------- d_out = 2**k layers.append(sa.Self_Attn(ndf*d_out, "relu")) layers.append(nn.Conv2d(ndf * d_out, 1, kernel_size, stride=1, padding=0, bias=False)) layers.append(nn.Sigmoid()) # state size. 1 self.main = nn.ModuleList(layers)
def __init__(self, ngpu, nz, ngf, nc, k): super(Generator, self).__init__() self.ngpu = ngpu layers = [] k=4 d_in = 2**k layers.append( nn.ConvTranspose2d( nz, ngf * d_in, kernel_size, 1, 0, bias=False) ) layers.append( nn.BatchNorm2d(ngf * d_in) ) layers.append( nn.ReLU(True) ) # state size. (ngf*16) x 4 x 4 # ------------------------------------------- # per immagini 64 x 64 ci volgliono 3 strati # e k = 3 # # per immagini 128 x 128 ci vogliono 4 strati # e k = 4 #-------------------------------------------- #------------------------------------------ layers.append( GenLayerSN(ngf, 4) ) layers.append( GenLayerSN(ngf, 3) ) layers.append( GenLayerSN(ngf, 2) ) layers.append(sa.Self_Attn(ngf*(2**1),"relu")) layers.append( GenLayerSN(ngf, 1) ) #------------------------------------------ layers.append(sa.Self_Attn(ngf,"relu")) layers.append(nn.ConvTranspose2d( ngf, nc, kernel_size, stride, padding, bias=False) ) layers.append(nn.Tanh() ) # state size. (nc) x 128 x 128 self.main = nn.ModuleList(layers)