def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) # self.adain1 = ADAIN_NORM(ngf * 2) self.adain2 = ADAIN_NORM(ngf) self.residual = self._make_layer(ResBlock, ngf * 2) self.upsample = upBlock(ngf * 2, ngf)
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) self.jointConv = Block3x3_relu(ngf + 100, ngf) # FIXME del self.residual = self._make_layer(ResBlock, ngf) # FIXME ngf * 2 # self.upsample = upBlock(ngf * 2, ngf) # FIXME self.upsample = upBlock(ngf , ngf)
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) self.att_sent = ATT_NET_sent(ngf, self.ef_dim) self.residual1 = ResBlock(ngf) self.residual2 = ResBlock(ngf) self.residual3 = ResBlock(ngf) self.upsample = upBlock(ngf, ngf)
def define_module(self): ngf = self.gf_dim nef = self.ef_dim nef2 = self.ef_dim2 self.att = ATT_NET(ngf, nef) self.bt_att = BT_ATT_NET(ngf, nef) self.residual = self._make_layer(HmapResBlock, ngf*3+nef2) self.upsample = upBlock(ngf*3+nef2, ngf)
def define_module(self): self.att = ATT_NET(self.gen_feat_dim, self.text_emb_dim) self.residual = self._make_layer(ResBlock, self.gen_feat_dim * 2) self.upsample = upBlock(self.gen_feat_dim * 3, self.gen_feat_dim) # local pathway label_input_dim = cfg.GAN.TEXT_CONDITION_DIM + cfg.TEXT.CLASSES_NUM # no noise anymore self.label = nn.Sequential( nn.Linear(label_input_dim, self.label_dim, bias=False), nn.BatchNorm1d(self.label_dim), nn.ReLU(True)) self.local1 = upBlock(self.label_dim + self.gen_feat_dim, self.gen_feat_dim * 2) self.local2 = upBlock(self.gen_feat_dim * 2, self.gen_feat_dim)
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) self.residual = self._make_layer(ResBlock, ngf * 2) self.upsample = upBlock(ngf * 3, ngf) # local pathway linput = cfg.GAN.Z_DIM + 81 self.label = nn.Sequential( nn.Linear(linput, self.ef_dim // 2, bias=False), nn.BatchNorm1d(self.ef_dim // 2), nn.ReLU(True)) self.local1 = upBlock(self.ef_dim // 2 + ngf, ngf * 2) self.local2 = upBlock(ngf * 2, ngf)
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) self.residual = self._make_layer(ResBlock, ngf * 2)
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) # GlobalAttentionGeneral模块,作为输入 self.residual = self._make_layer(ResBlock, ngf * 2) # 两块残差块 self.upsample = upBlock(ngf * 2, ngf) # 上采样
def __init__(self): super(generator, self).__init__() self.image_size = 64 self.num_channels = 3 self.noise_dim = 100 self.embed_dim = 2400 # compatible with skip thought (1024) self.projected_embed_dim = 128 self.latent_dim = self.noise_dim + self.projected_embed_dim self.ngf = 64 self.projection = nn.Sequential( nn.Linear(in_features=self.embed_dim, out_features=self.projected_embed_dim), nn.BatchNorm1d(num_features=self.projected_embed_dim), nn.LeakyReLU(negative_slope=0.2, inplace=True)) self.att = ATT_NET(self.ngf, self.ef_dim) # based on: https://github.com/pytorch/examples/blob/master/dcgan/main.py self.netG = nn.Sequential( nn.ConvTranspose2d(self.latent_dim, self.ngf * 8, 4, 1, 0, bias=True), nn.BatchNorm2d(self.ngf * 8), nn.ReLU(True), # adding extra convs will give output (ngf*8) x 4 x 4 nn.Conv2d(self.ngf * 8, self.ngf * 2, 1, 1, 0), nn.BatchNorm2d(self.ngf * 2), nn.ReLU(True), nn.Conv2d(self.ngf * 2, self.ngf * 2, 3, 1, 1), nn.BatchNorm2d(self.ngf * 2), nn.ReLU(True), nn.Conv2d(self.ngf * 2, self.ngf * 8, 3, 1, 1), nn.BatchNorm2d(self.ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(self.ngf * 8, self.ngf * 4, 4, 2, 1, bias=True), nn.BatchNorm2d(self.ngf * 4), nn.ReLU(True), # adding extra convs will give output (ngf*4) x 4 x 4 nn.Conv2d(self.ngf * 4, self.ngf, 1, 1, 0), nn.BatchNorm2d(self.ngf), nn.ReLU(True), nn.Conv2d(self.ngf, self.ngf, 3, 1, 1), nn.BatchNorm2d(self.ngf), nn.ReLU(True), nn.Conv2d(self.ngf, self.ngf * 4, 3, 1, 1), nn.BatchNorm2d(self.ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(self.ngf * 4, self.ngf * 2, 4, 2, 1, bias=True), nn.BatchNorm2d(self.ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(self.ngf * 2, self.ngf, 4, 2, 1, bias=True), nn.BatchNorm2d(self.ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(self.ngf, self.num_channels, 4, 2, 1, bias=True), nn.Tanh() # state size. (num_channels) x 64 x 64 )
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim, nhw=self.hw_size) ngf_counts = 3 self.residual = self._make_layer(ResBlock, ngf * ngf_counts) self.upsample = upBlock(ngf * ngf_counts, ngf)
def define_module(self): ngf = self.gf_dim self.att = ATT_NET(ngf, self.ef_dim) self.ca_net = CA_NET() self.residual = self._make_layer(ResBlock, ngf * 2) self.upsample = upBlock(ngf * 2, ngf)