def __init__(self, pretrained=False, checkpoint_path=None, freeze_nlayers=0): super(SensorDeconvToDryspot2, self).__init__() self.ct1 = ConvTranspose2d(1, 16, 3, stride=2, padding=0) self.ct2 = ConvTranspose2d(16, 32, 7, stride=2, padding=0) self.ct3 = ConvTranspose2d(32, 64, 15, stride=2, padding=0) self.ct4 = ConvTranspose2d(64, 128, 17, stride=2, padding=0) self.shaper0 = Conv2d(128, 64, 17, stride=2, padding=0) self.shaper = Conv2d(64, 32, 15, stride=2, padding=0) self.med = Conv2d(32, 32, 7, padding=0) self.maxpool = nn.MaxPool2d(2, 2) self.linear2 = Linear(1024, 512) self.linear3 = Linear(512, 256) self.linear4 = Linear(256, 1) if pretrained: self.load_model(checkpoint_path) if freeze_nlayers == 0: return for i, c in enumerate(self.children()): logger = logging.getLogger(__name__) logger.info(f'Freezing: {c}') for param in c.parameters(): param.requires_grad = False if i == freeze_nlayers - 1: break
def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int]], stride: Union[int, Tuple[int]] = 1, padding: Union[int, Tuple[int]] = 0, output_padding: Union[int, Tuple[int]] = 0, dilation: Union[int, Tuple[int]] = 1, groups: int = 1, bias: bool = True, weight_quant: Optional[WeightQuantType] = Int8WeightPerTensorFloat, bias_quant: Optional[BiasQuantType] = None, input_quant: Optional[ActQuantType] = None, output_quant: Optional[ActQuantType] = None, return_quant_tensor: bool = False, **kwargs) -> None: ConvTranspose2d.__init__(self, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, dilation=dilation, groups=groups, bias=bias) QuantWBIOL.__init__(self, weight_quant=weight_quant, bias_quant=bias_quant, input_quant=input_quant, output_quant=output_quant, return_quant_tensor=return_quant_tensor, **kwargs) self._output_size = None
def __init__(self): super(Decoder1, self).__init__() kernel_size = 3 stride = 2 padding = self.same_padding(kernel_size) self.dense1 = Sequential( Linear(parameter.latent_dim, flat_dim), BatchNorm1d(flat_dim), ReLU(), Reshape(*orig_dim) ) self.conv1 = Sequential( ConvTranspose2d(orig_dim[0], 32, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=padding), ReLU(), ) self.conv2 = Sequential( ConvTranspose2d(32, 16, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=padding), ReLU(), ) self.conv3 = Sequential( ConvTranspose2d(16, 8, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=padding), BatchNorm2d(8), ReLU(), ) self.conv4 = Sequential( ConvTranspose2d(8, colors_dim, kernel_size=1, stride=1), Sigmoid(), ) self.set_optimizer(parameter.optimizer, lr=parameter.learning_rate, betas=parameter.betas)
def __init__(self): super(NetWork, self).__init__() self.encoder = Sequential( Conv2d(in_channels=3, out_channels=16, kernel_size=4, stride=2), LeakyReLU(), Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=2), LeakyReLU(), Conv2d(in_channels=32, out_channels=10, kernel_size=3, stride=2), LeakyReLU(), ) self.decoder = Sequential( ConvTranspose2d(in_channels=10, out_channels=32, kernel_size=5, stride=3, padding=1), LeakyReLU(), ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=5, stride=3), LeakyReLU(), Conv2d(in_channels=16, out_channels=8, kernel_size=2, stride=1), LeakyReLU(), Conv2d(in_channels=8, out_channels=3, kernel_size=1, stride=1), )
def __init__(self, ngf=32): super(ConcatGenerator, self).__init__() self.rdcb4 = RDCBBlock(in_channels=ngf * 64, out_channels=ngf * 16) self.rdcb3 = RDCBBlock(in_channels=ngf * 16, out_channels=ngf * 8) self.rdcb2 = RDCBBlock(in_channels=ngf * 8, out_channels=ngf * 4) self.rdcb1 = RDCBBlock(in_channels=ngf * 4, out_channels=ngf * 2) self.rdcb0 = RDCBBlock(in_channels=ngf * 2, out_channels=ngf) # for 64*64 output in progressive training self.conv2 = ConvTranspose2d(out_channels=3, kernel_size=3, stride=1, padding=1, in_channels=ngf * 4) # for 128*128 output in progressive training self.conv1 = ConvTranspose2d(out_channels=3, kernel_size=3, stride=1, padding=1, in_channels=ngf * 2) # for 256*256 output in progressive training self.conv0 = ConvTranspose2d(out_channels=3, kernel_size=3, stride=1, padding=1, in_channels=ngf) self.tanh = Tanh()
def __init__(self, ngf=32, n_layers = 5): super(GlyphGenerator, self).__init__() encoder = [] encoder.append(ReplicationPad2d(padding=4)) encoder.append(Conv2d(out_channels=ngf, kernel_size=9, padding=0, in_channels=3)) encoder.append(LeakyReLU(0.2)) encoder.append(myGConv(ngf*2, 2, ngf)) encoder.append(myGConv(ngf*4, 2, ngf*2)) transformer = [] for n in range(int(n_layers/2)-1): transformer.append(myGCombineBlock(ngf*4,p=0.0)) # dropout to make model more robust transformer.append(myGCombineBlock(ngf*4,p=0.5)) transformer.append(myGCombineBlock(ngf*4,p=0.5)) for n in range(int(n_layers/2)+1,n_layers): transformer.append(myGCombineBlock(ngf*4,p=0.0)) decoder = [] decoder.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4)) decoder.append(BatchNorm2d(num_features=ngf*2, track_running_stats=True)) decoder.append(LeakyReLU(0.2)) decoder.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2)) decoder.append(BatchNorm2d(num_features=ngf, track_running_stats=True)) decoder.append(LeakyReLU(0.2)) decoder.append(ReplicationPad2d(padding=1)) decoder.append(Conv2d(out_channels=3, kernel_size=9, padding=0, in_channels=ngf)) decoder.append(Tanh()) self.encoder = nn.Sequential(*encoder) self.transformer = nn.Sequential(*transformer) self.decoder = nn.Sequential(*decoder)
def __init__(self): super(Auto_encoder, self).__init__() self.conv1 = nn.Sequential( Conv2d(in_channels=3, out_channels=10, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.5), MaxPool2d(kernel_size=2)) self.conv2 = nn.Sequential( Conv2d(10, 20, 3, 1, 1), nn.LeakyReLU(0.5), MaxPool2d(2) ) self.conv3 = nn.Sequential( Conv2d(20, 40, 3, 1, 1), Conv2d(40, 160, 3, 1, 1) #nn.LeakyReLU(0.5), ) self.deconv1 = nn.Sequential( Upsample(scale_factor=2, mode="bilinear", align_corners=True), nn.LeakyReLU(0.5), ConvTranspose2d(160, 40, 3, 1, 1), ConvTranspose2d(40, 20, 3, 1, padding=2, dilation=2, output_padding=1), nn.LeakyReLU(0.5), Upsample(scale_factor=2, mode="bilinear", align_corners=True), ConvTranspose2d(20, 10, 3, 1, 1), #nn.LeakyReLU(0.5), ConvTranspose2d(10, 3, 3, 1, 1) )
def __init__(self, noise_dim, output_channels=3): super(Generator, self).__init__() self.noise_dim = noise_dim #################################### # YOUR CODE HERE # #################################### self.hidden0 = Sequential( # input size noise dimension, which for PA3 is 100 ConvTranspose2d(noise_dim, 1024, kernel_size=4, stride=1, padding=0, bias=False), BatchNorm2d(1024), ReLU(True)) self.hidden1 = Sequential( # state size (1024) x 4 x 4 # Inverse of Formula (W - K + 2P / S) + 1 for up-sampling # W is input size, K is kernel size, P is padding, S is stride ConvTranspose2d(1024, 512, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(512), ReLU(True)) self.hidden2 = Sequential( # state size (512) x 8 x 8 ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(256), ReLU(True)) self.hidden3 = Sequential( # state size (256) x 16 x 16 ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(128), ReLU(True)) self.out = Sequential( # state size (128) x 32 x 32 ConvTranspose2d(128, output_channels, kernel_size=4, stride=2, padding=1, bias=False), # output size (3) x 64 x 64 Tanh())
def __init__(self): super().__init__() self.conv_transpose1 = ConvTranspose2d(100, 64, kernel_size=18) self.conv_transpose2 = ConvTranspose2d(self.conv_transpose1.out_channels, 32, kernel_size=4, stride=2, padding=1) self.conv_transpose3 = ConvTranspose2d(self.conv_transpose2.out_channels, 3, kernel_size=4, stride=2, padding=1)
def __init__(self): super(Generator, self).__init__() self.fc1 = Linear(z_len + c1_len + c2_len + c3_len, 1024) self.fc2 = Linear(1024, 4 * 4 * 256) self.convt1 = ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1) self.convt2 = ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1) self.convt3 = ConvTranspose2d(64, IMAGE_CHANNEL, kernel_size=4, stride=2, padding=1) self.bn1 = BatchNorm1d(1024) self.bn2 = BatchNorm1d(4 * 4 * 256) self.bn3 = BatchNorm2d(128) self.bn4 = BatchNorm2d(64)
def __init__(self, block1, block2, layers, num_input_channels, num_latent_dims, zero_init_residual=False): super().__init__() ngf = 64 nc = num_input_channels self.conv1 = ConvTranspose2d(num_latent_dims, ngf * 8, 4, 2, 0, bias=False) self.bn1 = BatchNorm2d(ngf * 8) self.conv2 = ConvTranspose2d(ngf * 8, ngf * 4, 3, 3, 1, bias=False) self.bn2 = BatchNorm2d(ngf * 4) self.conv3 = ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False) self.bn3 = BatchNorm2d(ngf * 2) self.conv4 = ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False) self.bn4 = BatchNorm2d(ngf) self.conv5 = ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)
def __init__(self, ngf = 32, n_layers = 5): super(TextureGenerator, self).__init__() modelList = [] modelList.append(ReplicationPad2d(padding=4)) modelList.append(Conv2d(out_channels=ngf, kernel_size=9, padding=0, in_channels=3)) modelList.append(ReLU()) modelList.append(myTConv(ngf*2, 2, ngf)) modelList.append(myTConv(ngf*4, 2, ngf*2)) for n in range(int(n_layers/2)): modelList.append(myTBlock(ngf*4, p=0.0)) # dropout to make model more robust modelList.append(myTBlock(ngf*4, p=0.5)) for n in range(int(n_layers/2)+1,n_layers): modelList.append(myTBlock(ngf*4, p=0.0)) modelList.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4)) modelList.append(BatchNorm2d(num_features=ngf*2, track_running_stats=True)) modelList.append(ReLU()) modelList.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2)) modelList.append(BatchNorm2d(num_features=ngf, track_running_stats=True)) modelList.append(ReLU()) modelList.append(ReplicationPad2d(padding=1)) modelList.append(Conv2d(out_channels=3, kernel_size=9, padding=0, in_channels=ngf)) modelList.append(Tanh()) self.model = nn.Sequential(*modelList)
def __init__(self, in_channels = 4, ngf = 32, n_layers = 5): super(SketchGenerator, self).__init__() encoder = [] encoder.append(Conv2d(out_channels=ngf, kernel_size=9, padding=4, in_channels=in_channels)) encoder.append(ReLU()) encoder.append(mySConv(ngf*2, 2, ngf)) encoder.append(mySConv(ngf*4, 2, ngf*2)) transformer = [] for n in range(n_layers): transformer.append(mySBlock(ngf*4+1)) decoder1 = [] decoder2 = [] decoder3 = [] decoder1.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4+2)) decoder1.append(InstanceNorm2d(num_features=ngf*2)) decoder1.append(ReLU()) decoder2.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2+1)) decoder2.append(InstanceNorm2d(num_features=ngf)) decoder2.append(ReLU()) decoder3.append(Conv2d(out_channels=3, kernel_size=9, padding=1, in_channels=ngf+1)) decoder3.append(Tanh()) self.encoder = nn.Sequential(*encoder) self.transformer = nn.Sequential(*transformer) self.decoder1 = nn.Sequential(*decoder1) self.decoder2 = nn.Sequential(*decoder2) self.decoder3 = nn.Sequential(*decoder3)
def __init__(self): super(GeneratorNet, self).__init__() self.mainNetwork = Sequential( # 100,1,1 => 64*8,4,4 ConvTranspose2d(config.NOISE_Z, config.GENERATOR_FEATURES_NUM * 8, kernel_size=4, stride=1, padding=0, bias=False), BatchNorm2d(config.GENERATOR_FEATURES_NUM * 8), ReLU(True), # 64*8,4,4 => 64*4,8,8 ConvTranspose2d(config.GENERATOR_FEATURES_NUM * 8, config.GENERATOR_FEATURES_NUM * 4, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(config.GENERATOR_FEATURES_NUM * 4), ReLU(True), # 64*4,8,8 => 64*2,16,16 ConvTranspose2d(config.GENERATOR_FEATURES_NUM * 4, config.GENERATOR_FEATURES_NUM * 2, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(config.GENERATOR_FEATURES_NUM * 2), ReLU(True), # 64*2,16,16 => 64,32,32 ConvTranspose2d(config.GENERATOR_FEATURES_NUM * 2, config.GENERATOR_FEATURES_NUM, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(config.GENERATOR_FEATURES_NUM), ReLU(True), # 64*2,32,32 => 3,96,96 ConvTranspose2d(config.GENERATOR_FEATURES_NUM, 3, kernel_size=5, stride=3, padding=1, bias=False), Tanh() # 3 * 96 * 96 )
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros'): super(ComplexConvTranspose2d, self).__init__() self.conv_tran_r = ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation, padding_mode) self.conv_tran_i = ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation, padding_mode)
def __init__(self, pretrained="", checkpoint_path=None, freeze_nlayers=0): super(SensorDeconvToDryspotEfficient2, self).__init__() self.ct1 = ConvTranspose2d(1, 128, 3, stride=2, padding=0) self.ct2 = ConvTranspose2d(128, 64, 7, stride=2, padding=0) self.ct3 = ConvTranspose2d(64, 32, 15, stride=2, padding=0) self.ct4 = ConvTranspose2d(32, 8, 17, stride=2, padding=0) self.shaper0 = Conv2d(8, 16, 17, stride=2, padding=0) self.shaper = Conv2d(16, 32, 15, stride=2, padding=0) self.med = Conv2d(32, 32, 7, padding=0) self.details = Conv2d(32, 32, 3) ### self.details2 = Conv2d(32, 64, 13, padding=0) self.details3 = Conv2d(64, 128, 7, padding=0) self.details4 = Conv2d(128, 256, 5, padding=0) self.details5 = Conv2d(256, 512, 3, padding=0) self.details6 = Conv2d(512, 512, 3, padding=0) self.maxpool = nn.MaxPool2d(2, 2) self.linear2 = Linear(1536, 1024) self.linear3 = Linear(1024, 1) self.bn32 = nn.BatchNorm2d(32) self.bn512 = nn.BatchNorm2d(512) self.dropout = nn.Dropout(0.3) if pretrained == "deconv_weights": weights = load_model_layers_from_path(path=checkpoint_path, layer_names={ "ct1", "ct2", "ct3", "ct4", "shaper0", "shaper", "med", "details" }) self.load_state_dict(weights, strict=False) elif pretrained == "all_weights": weights = load_model_layers_from_path( path=checkpoint_path, layer_names={ "ct1", "ct2", "ct3", "ct4", "shaper0", "shaper", "med", "details", "details2", "details3", "details4", "details5", "details6", "linear2", "linear3", "bn32", "bn512" }) self.load_state_dict(weights, strict=False) if freeze_nlayers == 0: return for i, c in enumerate(self.children()): logger = logging.getLogger(__name__) logger.info(f'Freezing: {c}') for param in c.parameters(): param.requires_grad = False if i == freeze_nlayers - 1: break
def __init__(self, in_channels=4, ngf=32, n_layers=5): """ 生成器 :param in_channels: 传入的是 4 :param ngf: 生成器第一层的特征的数量 传入的是 32 :param n_layers: 生成器的网络的层数 传入的是 6 """ super(SketchGenerator, self).__init__() encoder = [] encoder.append( Conv2d(in_channels=in_channels, out_channels=ngf, kernel_size=9, padding=4)) encoder.append(ReLU()) encoder.append(mySConv(ngf * 2, 2, ngf)) encoder.append(mySConv(ngf * 4, 2, ngf * 2)) transformer = [] for n in range(n_layers): # n_layers: 生成器的网络的层数 transformer.append(mySBlock(ngf * 4 + 1)) # TODO 额外加 1??? decoder1 = [] decoder2 = [] decoder3 = [] decoder1.append( ConvTranspose2d(out_channels=ngf * 2, kernel_size=4, stride=2, padding=0, in_channels=ngf * 4 + 2)) # 为什么加 2??? decoder1.append(InstanceNorm2d(num_features=ngf * 2)) decoder1.append(ReLU()) decoder2.append( ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf * 2 + 1)) # 为什么加 1??? decoder2.append(InstanceNorm2d(num_features=ngf)) decoder2.append(ReLU()) decoder3.append( Conv2d(out_channels=3, kernel_size=9, padding=1, in_channels=ngf + 1)) decoder3.append(Tanh()) self.encoder = nn.Sequential(*encoder) self.transformer = nn.Sequential(*transformer) self.decoder1 = nn.Sequential(*decoder1) self.decoder2 = nn.Sequential(*decoder2) self.decoder3 = nn.Sequential(*decoder3)
def create_generator(): mapping = nn.Sequential( ConvTranspose2d(NLAT, DIM * 4, 4, 1, 0, bias=False), BatchNorm2d(DIM * 4), ReLU(inplace=True), ConvTranspose2d(DIM * 4, DIM * 2, 4, 2, 1, bias=False), BatchNorm2d(DIM * 2), ReLU(inplace=True), ConvTranspose2d(DIM * 2, DIM, 4, 2, 1, bias=False), BatchNorm2d(DIM), ReLU(inplace=True), ConvTranspose2d(DIM, NUM_CHANNELS, 4, 2, 1, bias=False), Tanh()) return DeterministicConditional(mapping)
def determine_layers(side, random_dim, num_channels): assert side >= 4 and side <= 32 layer_dims = [(1, side), (num_channels, side // 2)] while layer_dims[-1][1] > 3 and len(layer_dims) < 4: layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2)) layers_D = [] for prev, curr in zip(layer_dims, layer_dims[1:]): layers_D += [ Conv2d(prev[0], curr[0], 4, 2, 1, bias=False), BatchNorm2d(curr[0]), LeakyReLU(0.2, inplace=True) ] layers_D += [ Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0), Sigmoid() ] layers_G = [ ConvTranspose2d(random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False) ] for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])): layers_G += [ BatchNorm2d(prev[0]), ReLU(True), ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True) ] layers_G += [Tanh()] layers_C = [] for prev, curr in zip(layer_dims, layer_dims[1:]): layers_C += [ Conv2d(prev[0], curr[0], 4, 2, 1, bias=False), BatchNorm2d(curr[0]), LeakyReLU(0.2, inplace=True) ] layers_C += [Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0)] return layers_D, layers_G, layers_C
def __init__(self): super(Generator, self).__init__() self.fc1 = Linear(z_len + c1_len + c2_len + c3_len, 1024) self.fc2 = Linear(1024, 7 * 7 * 128) self.convt1 = ConvTranspose2d(128, 64, kernel_size = 4, stride = 2, padding = 1) self.convt2 = ConvTranspose2d(64, 1, kernel_size = 4, stride = 2, padding = 1) self.bn1 = BatchNorm1d(1024) self.bn2 = BatchNorm1d(7 * 7 * 128) self.bn3 = BatchNorm2d(64)
def __init__(self): super(MNISTConvDecoder, self).__init__() self.fc1 = Linear(15, 256) self.fc2 = Linear(256, 64 * 4 * 4) self.deconv1 = ConvTranspose2d(64, 32, (4, 4), stride=2, padding=1) self.deconv2 = ConvTranspose2d(32, 32, (4, 4), stride=2, padding=1) self.deconv3 = ConvTranspose2d(32, 1, (4, 4), stride=2, padding=1) self.sig = Sigmoid()
def __init__(self): super(S80DeconvModelEfficient, self).__init__() self.ct1 = ConvTranspose2d(1, 128, 3, stride=2, padding=0) self.ct3 = ConvTranspose2d(128, 64, 7, stride=2, padding=0) self.ct5 = ConvTranspose2d(64, 32, 15, stride=2, padding=0) self.ct6 = ConvTranspose2d(32, 8, 17, stride=2, padding=0) self.c1 = Conv2d(8, 32, 11, stride=2) self.ck = Conv2d(32, 32, 3, padding=0) self.cj = Conv2d(32, 1, 3, padding=0)
def __init__(self, demo_mode=False): super(S20DeconvModelEfficient, self).__init__() self.ct1 = ConvTranspose2d(1, 256, 3, stride=2, padding=0) self.ct2 = ConvTranspose2d(256, 128, 5, stride=2, padding=0) self.ct3 = ConvTranspose2d(128, 64, 10, stride=2, padding=0) self.ct4 = ConvTranspose2d(64, 16, 17, stride=2, padding=0) self.details = Conv2d(16, 8, 5) self.details2 = Conv2d(8, 1, 3, padding=0) self.demo_mode = demo_mode
def create_generator(): mapping = nn.Sequential( ConvTranspose2d(NLAT, FEATURES_GEN * 16, 4, 1, 0), BatchNorm2d(FEATURES_GEN * 16), ReLU(), ConvTranspose2d(FEATURES_GEN * 16, FEATURES_GEN * 8, 4, 2, 1), BatchNorm2d(FEATURES_GEN * 8), ReLU(), ConvTranspose2d(FEATURES_GEN * 8, FEATURES_GEN * 4, 4, 2, 1), BatchNorm2d(FEATURES_GEN * 4), ReLU(), ConvTranspose2d(FEATURES_GEN * 4, FEATURES_GEN * 2, 4, 2, 1), BatchNorm2d(FEATURES_GEN * 2), ReLU(), ConvTranspose2d(FEATURES_GEN * 2, NUM_CHANNELS, 4, 2, 1), nn.Tanh()) return DeterministicConditional(mapping)
def __init__(self): super(GeneratorNet, self).__init__() self.mainNetwork = Sequential( # 100,1,1 => 64*8,4,4 ConvTranspose2d(CONFIG["NOISE_DIM"], CONFIG["NGF"] * 8, kernel_size=4, stride=1, padding=0, bias=False), BatchNorm2d(CONFIG["NGF"] * 8), ReLU(True), # 64*8,4,4 => 64*4,8,8 ConvTranspose2d(CONFIG["NGF"] * 8, CONFIG["NGF"] * 4, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(CONFIG["NGF"] * 4), ReLU(True), # 64*4,8,8 => 64*2,16,16 ConvTranspose2d(CONFIG["NGF"] * 4, CONFIG["NGF"] * 2, kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(CONFIG["NGF"] * 2), ReLU(True), # 64*2,16,16 => 64,32,32 ConvTranspose2d(CONFIG["NGF"] * 2, CONFIG["NGF"], kernel_size=4, stride=2, padding=1, bias=False), BatchNorm2d(CONFIG["NGF"]), ReLU(True), # 64*2,32,32 => 3,96,96 ConvTranspose2d(CONFIG["NGF"], 3, kernel_size=5, stride=3, padding=1, bias=False), Tanh() # 3 * 96 * 96 )
def __init__(self): super(BasicAutoEncoder, self).__init__() self.pool = MaxPool2d(kernel_size=2, stride=2) self.poolt = UpsamplingNearest2d(scale_factor=2) self.cv1 = Conv2d(1, 64, kernel_size=4, stride=1) self.cv2 = Conv2d(64, 32, kernel_size=3, stride=1) self.cv3 = Conv2d(32, 16, kernel_size=3, stride=1) self.cv1t = ConvTranspose2d(16, 32, kernel_size=3, stride=1) self.cv2t = ConvTranspose2d(32, 64, kernel_size=3, stride=1) self.cv3t = ConvTranspose2d(64, 1, kernel_size=4, stride=1)
def __init__(self): super(Generator, self).__init__() self.gc_full = Linear(NOISE_DIM + 10, 512) self.dconv1 = ConvTranspose2d(512, 256, kernel_size=3, stride=2) self.dconv2 = ConvTranspose2d(256, 128, kernel_size=3, stride=2) self.dconv3 = ConvTranspose2d(128, 64, kernel_size=3, stride=2) self.dconv4 = ConvTranspose2d(64, 3, kernel_size=3, stride=2, output_padding=1) self.bn4 = BatchNorm2d(64) self.bn3 = BatchNorm2d(128) self.bn2 = BatchNorm2d(256) self.bn1 = BatchNorm2d(512)
def __init__(self, config): super(Generator, self).__init__() self.parse_config(config) self.generator = Sequential() #first layer. Project and reshape noise self.c_latent = self.g_feature_size * (2**(self.g_layers - 1)) self.latent_hw = int(self.img_h / (2**self.g_layers)) self.generator.add_module( 'TConvLatent', ConvTranspose2d(self.c_input, self.c_latent, self.latent_hw, bias=False)) self.generator.add_module('BNLatent', BatchNorm2d(self.c_latent)) self.generator.add_module('ReLULatent', ReLU(inplace=True)) c_input = self.c_latent layer_number = 1 for i in range(self.g_layers - 2, -1, -1): c_layer = int(self.g_feature_size * (2**i)) self.generator.add_module( 'TConv' + str(layer_number), ConvTranspose2d(c_input, c_layer, self.kernel_size, self.stride, self.g_input_pad, output_padding=self.g_output_pad, bias=False)) self.generator.add_module('BN' + str(layer_number), BatchNorm2d(c_layer)) self.generator.add_module('ReLU' + str(layer_number), ReLU(inplace=True)) c_input = copy(c_layer) layer_number += 1 #final image layer self.generator.add_module( 'F_TConv1', ConvTranspose2d(c_input, self.img_c, self.kernel_size, self.stride, self.g_input_pad, output_padding=self.g_output_pad, bias=False)) #no batch norm in output layer acc to DCGAN paper self.generator.add_module('F_Tanh', Tanh())
def __init__(self, z_feautures: int): super(Generator, self).__init__() self.z_feautures = z_feautures self.generator = nn.Sequential( ConvTranspose2d( in_channels=z_feautures, out_channels=128, kernel_size=4, stride=1, padding=0, bias=False ), BatchNorm2d(128), LeakyReLU(0.2, True), ConvTranspose2d( in_channels=128, out_channels=64, kernel_size=4, stride=1, padding=0, bias=False ), BatchNorm2d(64), LeakyReLU(0.2, True), ConvTranspose2d( in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1, bias=False ), BatchNorm2d(32), LeakyReLU(0.2, True), spectral_norm(ConvTranspose2d( in_channels=32, out_channels=1, kernel_size=4, stride=2, padding=1, bias=False ), dim=None), Tanh() ) self.apply(weights_init)
def __init__(self, input_dim=1140): super(DeconvModel2x, self).__init__() self.ct1 = ConvTranspose2d(1, 16, 3, stride=2, padding=0) self.ct2 = ConvTranspose2d(16, 32, 5, stride=2, padding=2) self.ct3 = ConvTranspose2d(32, 32, 7, stride=2, padding=3) self.ct4 = ConvTranspose2d(32, 64, 15, stride=2, padding=0) self.ct5 = ConvTranspose2d(64, 128, 17, stride=2, padding=0) self.shaper0 = Conv2d(128, 64, 17, stride=2, padding=0) self.shaper = Conv2d(64, 32, 15, stride=2, padding=0) self.med = Conv2d(32, 32, 7, padding=0) self.details = Conv2d(32, 32, 3) self.details2 = Conv2d(32, 1, 3, padding=0)