def __init__(self, dim_in, dim_out, mode=None): super(ResidualBlock, self).__init__() if mode == 't': weight_attr = False bias_attr = False elif mode == 'p' or (mode is None): weight_attr = None bias_attr = None self.main = nn.Sequential( nn.Conv2D(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias_attr=False), nn.InstanceNorm2D(dim_out, weight_attr=weight_attr, bias_attr=bias_attr), nn.ReLU(), nn.Conv2D(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias_attr=False), nn.InstanceNorm2D(dim_out, weight_attr=weight_attr, bias_attr=bias_attr))
def _build_weights(self, dim_in, dim_out): self.conv1 = nn.Conv2D(dim_in, dim_in, 3, 1, 1) self.conv2 = nn.Conv2D(dim_in, dim_out, 3, 1, 1) if self.normalize: self.norm1 = nn.InstanceNorm2D(dim_in, weight_attr=True, bias_attr=True) self.norm2 = nn.InstanceNorm2D(dim_in, weight_attr=True, bias_attr=True) if self.learned_sc: self.conv1x1 = nn.Conv2D(dim_in, dim_out, 1, 1, 0, bias_attr=False)
def __init__(self): super(ModelConv, self).__init__() with supernet(kernel_size=(3, 5, 7), channel=((4, 8, 12), (8, 12, 16), (8, 12, 16), (8, 12, 16))) as ofa_super: models = [] models += [nn.Conv2D(3, 4, 3, padding=1)] models += [nn.InstanceNorm2D(4)] models += [ReLU()] models += [nn.Conv2D(4, 4, 3, groups=4)] models += [nn.InstanceNorm2D(4)] models += [ReLU()] models += [nn.Conv2DTranspose(4, 4, 3, groups=4, padding=1)] models += [nn.BatchNorm2D(4)] models += [ReLU()] models += [nn.Conv2D(4, 3, 3)] models += [ReLU()] models = ofa_super.convert(models) models += [ Block(SuperSeparableConv2D(3, 6, 1, padding=1, candidate_config={'channel': (3, 6)}), fixed=True) ] with supernet(kernel_size=(3, 5, 7), expand_ratio=(1, 2, 4)) as ofa_super: models1 = [] models1 += [nn.Conv2D(6, 4, 3)] models1 += [nn.BatchNorm2D(4)] models1 += [ReLU()] models1 += [nn.Conv2D(4, 4, 3, groups=2)] models1 += [nn.InstanceNorm2D(4)] models1 += [ReLU()] models1 += [nn.Conv2DTranspose(4, 4, 3, groups=2)] models1 += [nn.BatchNorm2D(4)] models1 += [ReLU()] models1 += [nn.Conv2DTranspose(4, 4, 3)] models1 += [nn.BatchNorm2D(4)] models1 += [ReLU()] models1 += [nn.Conv2DTranspose(4, 4, 1)] models1 += [nn.BatchNorm2D(4)] models1 += [ReLU()] models1 = ofa_super.convert(models1) models += models1 self.models = paddle.nn.Sequential(*models)
def __init__(self, in_channels): super().__init__() self.bnorm_channels = in_channels // 2 self.inorm_channels = in_channels - self.bnorm_channels self.bnorm = nn.BatchNorm2D(self.bnorm_channels) self.inorm = nn.InstanceNorm2D(self.inorm_channels)
def __convblock(dim_in, dim_out): return nn.Sequential( nn.InstanceNorm2D(dim_in, weight_attr=False, bias_attr=False), nn.ReLU(), nn.Pad2D([1, 1, 1, 1], 'reflect'), nn.Conv2D(dim_in, dim_out, kernel_size=3, stride=1, bias_attr=False) )
def __init__(self, dim, use_bias=False): super(ResnetBlock, self).__init__() conv_block = [] conv_block += [ nn.Pad2D([1, 1, 1, 1], 'reflect'), nn.Conv2D(dim, dim, kernel_size=3, stride=1, bias_attr=use_bias), nn.InstanceNorm2D(dim, weight_attr=False, bias_attr=False), nn.ReLU() ] conv_block += [ nn.Pad2D([1, 1, 1, 1], 'reflect'), nn.Conv2D(dim, dim, kernel_size=3, stride=1, bias_attr=use_bias), nn.InstanceNorm2D(dim, weight_attr=False, bias_attr=False) ] self.conv_block = nn.Sequential(*conv_block)
def __init__(self, conv_dim=64, repeat_num=3, w=0.01): super(MANet, self).__init__() self.encoder = TNetDown(conv_dim=conv_dim, repeat_num=repeat_num) curr_dim = conv_dim * 4 self.w = w self.beta = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1) self.gamma = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1) self.simple_spade = GetMatrix(curr_dim, 1) # get the makeup matrix self.repeat_num = repeat_num for i in range(repeat_num): setattr(self, "bottlenecks_" + str(i), ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t')) # Up-Sampling self.upsamplers = [] self.up_betas = [] self.up_gammas = [] self.up_acts = [] y_dim = curr_dim for i in range(2): layers = [] layers.append( nn.Conv2DTranspose(curr_dim, curr_dim // 2, kernel_size=4, stride=2, padding=1, bias_attr=False)) layers.append( nn.InstanceNorm2D(curr_dim // 2, weight_attr=False, bias_attr=False)) setattr(self, "up_acts_" + str(i), nn.ReLU()) setattr( self, "up_betas_" + str(i), nn.Conv2DTranspose(y_dim, curr_dim // 2, kernel_size=4, stride=2, padding=1)) setattr( self, "up_gammas_" + str(i), nn.Conv2DTranspose(y_dim, curr_dim // 2, kernel_size=4, stride=2, padding=1)) setattr(self, "up_samplers_" + str(i), nn.Sequential(*layers)) curr_dim = curr_dim // 2 self.img_reg = [ nn.Conv2D(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias_attr=False) ] self.img_reg = nn.Sequential(*self.img_reg)
def __init__(self, conv_dim=64, repeat_num=3): super(TNetDown, self).__init__() layers = [] layers.append( nn.Conv2D(3, conv_dim, kernel_size=7, stride=1, padding=3, bias_attr=False)) layers.append( nn.InstanceNorm2D(conv_dim, weight_attr=False, bias_attr=False)) layers.append(nn.ReLU()) # Down-Sampling curr_dim = conv_dim for i in range(2): layers.append( nn.Conv2D(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias_attr=False)) layers.append( nn.InstanceNorm2D(curr_dim * 2, weight_attr=False, bias_attr=False)) layers.append(nn.ReLU()) curr_dim = curr_dim * 2 # Bottleneck for i in range(repeat_num): layers.append( ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t')) self.main = nn.Sequential(*layers)
def __init__(self, dim_in, dim_out): super(ConvBlock, self).__init__() self.dim_in = dim_in self.dim_out = dim_out self.conv_block1 = self.__convblock(dim_in, dim_out//2) self.conv_block2 = self.__convblock(dim_out//2, dim_out//4) self.conv_block3 = self.__convblock(dim_out//4, dim_out//4) if self.dim_in != self.dim_out: self.conv_skip = nn.Sequential( nn.InstanceNorm2D(dim_in, weight_attr=False, bias_attr=False), nn.ReLU(), nn.Conv2D(dim_in, dim_out, kernel_size=1, stride=1, bias_attr=False) )
def __init__(self, dim_in, dim_out, use_res=True): super(HourGlass, self).__init__() self.use_res = use_res self.HG = nn.Sequential( HourGlassBlock(dim_in), ConvBlock(dim_out, dim_out), nn.Conv2D(dim_out, dim_out, kernel_size=1, stride=1, bias_attr=False), nn.InstanceNorm2D(dim_out, weight_attr=False, bias_attr=False), nn.ReLU() ) self.Conv1 = nn.Conv2D(dim_out, 3, kernel_size=1, stride=1) if self.use_res: self.Conv2 = nn.Conv2D(dim_out, dim_out, kernel_size=1, stride=1) self.Conv3 = nn.Conv2D(3, dim_out, kernel_size=1, stride=1)
def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1): super().__init__() dim_in = 2**14 // img_size self.img_size = img_size self.from_rgb = nn.Conv2D(3, dim_in, 3, 1, 1) self.encode = nn.LayerList() self.decode = nn.LayerList() self.to_rgb = nn.Sequential( nn.InstanceNorm2D(dim_in, weight_attr=True, bias_attr=True), nn.LeakyReLU(0.2), nn.Conv2D(dim_in, 3, 1, 1, 0)) # down/up-sampling blocks repeat_num = int(np.log2(img_size)) - 4 if w_hpf > 0: repeat_num += 1 for _ in range(repeat_num): dim_out = min(dim_in*2, max_conv_dim) self.encode.append( ResBlk(dim_in, dim_out, normalize=True, downsample=True)) if len(self.decode) == 0: self.decode.append(AdainResBlk(dim_out, dim_in, style_dim, w_hpf=w_hpf, upsample=True)) else: self.decode.insert( 0, AdainResBlk(dim_out, dim_in, style_dim, w_hpf=w_hpf, upsample=True)) # stack-like dim_in = dim_out # bottleneck blocks for _ in range(2): self.encode.append( ResBlk(dim_out, dim_out, normalize=True)) self.decode.insert( 0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf)) if w_hpf > 0: self.hpf = HighPass(w_hpf)
def __init__(self, in_features, out_features, norm=False, kernel_size=4, pool=False, sn=False): super(DownBlock2d, self).__init__() self.conv = nn.Conv2D(in_features, out_features, kernel_size=kernel_size) if sn: self.sn = nn.SpectralNorm(self.conv.weight.shape, dim=0) else: self.sn = None if norm: self.norm = nn.InstanceNorm2D(num_features=out_features, epsilon=1e-05) else: self.norm = None self.pool = pool
def __init__(self, ngf=32, img_size=256, n_blocks=4, light=True): super(ResnetGenerator, self).__init__() self.light = light self.n_blocks = n_blocks DownBlock = [] DownBlock += [ nn.Pad2D([3, 3, 3, 3], 'reflect'), nn.Conv2D(3, ngf, kernel_size=7, stride=1, bias_attr=False), nn.InstanceNorm2D(ngf, weight_attr=False, bias_attr=False), nn.ReLU() ] DownBlock += [HourGlass(ngf, ngf), HourGlass(ngf, ngf)] # Down-Sampling n_downsampling = 2 for i in range(n_downsampling): mult = 2**i DownBlock += [ nn.Pad2D([1, 1, 1, 1], 'reflect'), nn.Conv2D(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, bias_attr=False), nn.InstanceNorm2D(ngf * mult * 2, weight_attr=False, bias_attr=False), nn.ReLU() ] # Encoder Bottleneck mult = 2**n_downsampling for i in range(n_blocks): setattr(self, 'EncodeBlock' + str(i + 1), ResnetBlock(ngf * mult)) # Class Activation Map self.gap_fc = nn.Linear(ngf * mult, 1, bias_attr=False) self.gmp_fc = nn.Linear(ngf * mult, 1, bias_attr=False) self.conv1x1 = nn.Conv2D(ngf * mult * 2, ngf * mult, kernel_size=1, stride=1) self.relu = nn.ReLU() # Gamma, Beta block FC = [] if self.light: FC += [ nn.Linear(ngf * mult, ngf * mult, bias_attr=False), nn.ReLU(), nn.Linear(ngf * mult, ngf * mult, bias_attr=False), nn.ReLU() ] else: FC += [ nn.Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias_attr=False), nn.ReLU(), nn.Linear(ngf * mult, ngf * mult, bias_attr=False), nn.ReLU() ] # Decoder Bottleneck mult = 2**n_downsampling for i in range(n_blocks): setattr(self, 'DecodeBlock' + str(i + 1), ResnetSoftAdaLINBlock(ngf * mult)) # Up-Sampling UpBlock = [] for i in range(n_downsampling): mult = 2**(n_downsampling - i) UpBlock += [ nn.Upsample(scale_factor=2), nn.Pad2D([1, 1, 1, 1], 'reflect'), nn.Conv2D(ngf * mult, ngf * mult // 2, kernel_size=3, stride=1, bias_attr=False), LIN(ngf * mult // 2), nn.ReLU() ] UpBlock += [HourGlass(ngf, ngf), HourGlass(ngf, ngf, False)] UpBlock += [ nn.Pad2D([3, 3, 3, 3], 'reflect'), nn.Conv2D(3, 3, kernel_size=7, stride=1, bias_attr=False), nn.Tanh() ] self.DownBlock = nn.Sequential(*DownBlock) self.FC = nn.Sequential(*FC) self.UpBlock = nn.Sequential(*UpBlock)
def func_test_layer_str(self): module = nn.ELU(0.2) self.assertEqual(str(module), 'ELU(alpha=0.2)') module = nn.CELU(0.2) self.assertEqual(str(module), 'CELU(alpha=0.2)') module = nn.GELU(True) self.assertEqual(str(module), 'GELU(approximate=True)') module = nn.Hardshrink() self.assertEqual(str(module), 'Hardshrink(threshold=0.5)') module = nn.Hardswish(name="Hardswish") self.assertEqual(str(module), 'Hardswish(name=Hardswish)') module = nn.Tanh(name="Tanh") self.assertEqual(str(module), 'Tanh(name=Tanh)') module = nn.Hardtanh(name="Hardtanh") self.assertEqual(str(module), 'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)') module = nn.PReLU(1, 0.25, name="PReLU", data_format="NCHW") self.assertEqual( str(module), 'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)' ) module = nn.ReLU() self.assertEqual(str(module), 'ReLU()') module = nn.ReLU6() self.assertEqual(str(module), 'ReLU6()') module = nn.SELU() self.assertEqual( str(module), 'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)') module = nn.LeakyReLU() self.assertEqual(str(module), 'LeakyReLU(negative_slope=0.01)') module = nn.Sigmoid() self.assertEqual(str(module), 'Sigmoid()') module = nn.Hardsigmoid() self.assertEqual(str(module), 'Hardsigmoid()') module = nn.Softplus() self.assertEqual(str(module), 'Softplus(beta=1, threshold=20)') module = nn.Softshrink() self.assertEqual(str(module), 'Softshrink(threshold=0.5)') module = nn.Softsign() self.assertEqual(str(module), 'Softsign()') module = nn.Swish() self.assertEqual(str(module), 'Swish()') module = nn.Tanhshrink() self.assertEqual(str(module), 'Tanhshrink()') module = nn.ThresholdedReLU() self.assertEqual(str(module), 'ThresholdedReLU(threshold=1.0)') module = nn.LogSigmoid() self.assertEqual(str(module), 'LogSigmoid()') module = nn.Softmax() self.assertEqual(str(module), 'Softmax(axis=-1)') module = nn.LogSoftmax() self.assertEqual(str(module), 'LogSoftmax(axis=-1)') module = nn.Maxout(groups=2) self.assertEqual(str(module), 'Maxout(groups=2, axis=1)') module = nn.Linear(2, 4, name='linear') self.assertEqual( str(module), 'Linear(in_features=2, out_features=4, dtype=float32, name=linear)' ) module = nn.Upsample(size=[12, 12]) self.assertEqual( str(module), 'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)' ) module = nn.UpsamplingNearest2D(size=[12, 12]) self.assertEqual( str(module), 'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)') module = nn.UpsamplingBilinear2D(size=[12, 12]) self.assertEqual( str(module), 'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)') module = nn.Bilinear(in1_features=5, in2_features=4, out_features=1000) self.assertEqual( str(module), 'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)' ) module = nn.Dropout(p=0.5) self.assertEqual(str(module), 'Dropout(p=0.5, axis=None, mode=upscale_in_train)') module = nn.Dropout2D(p=0.5) self.assertEqual(str(module), 'Dropout2D(p=0.5, data_format=NCHW)') module = nn.Dropout3D(p=0.5) self.assertEqual(str(module), 'Dropout3D(p=0.5, data_format=NCDHW)') module = nn.AlphaDropout(p=0.5) self.assertEqual(str(module), 'AlphaDropout(p=0.5)') module = nn.Pad1D(padding=[1, 2], mode='constant') self.assertEqual( str(module), 'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)') module = nn.Pad2D(padding=[1, 0, 1, 2], mode='constant') self.assertEqual( str(module), 'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)' ) module = nn.ZeroPad2D(padding=[1, 0, 1, 2]) self.assertEqual(str(module), 'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)') module = nn.Pad3D(padding=[1, 0, 1, 2, 0, 0], mode='constant') self.assertEqual( str(module), 'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)' ) module = nn.CosineSimilarity(axis=0) self.assertEqual(str(module), 'CosineSimilarity(axis=0, eps=1e-08)') module = nn.Embedding(10, 3, sparse=True) self.assertEqual(str(module), 'Embedding(10, 3, sparse=True)') module = nn.Conv1D(3, 2, 3) self.assertEqual(str(module), 'Conv1D(3, 2, kernel_size=[3], data_format=NCL)') module = nn.Conv1DTranspose(2, 1, 2) self.assertEqual( str(module), 'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)') module = nn.Conv2D(4, 6, (3, 3)) self.assertEqual(str(module), 'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)') module = nn.Conv2DTranspose(4, 6, (3, 3)) self.assertEqual( str(module), 'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)') module = nn.Conv3D(4, 6, (3, 3, 3)) self.assertEqual( str(module), 'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)') module = nn.Conv3DTranspose(4, 6, (3, 3, 3)) self.assertEqual( str(module), 'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)') module = nn.PairwiseDistance() self.assertEqual(str(module), 'PairwiseDistance(p=2.0)') module = nn.InstanceNorm1D(2) self.assertEqual(str(module), 'InstanceNorm1D(num_features=2, epsilon=1e-05)') module = nn.InstanceNorm2D(2) self.assertEqual(str(module), 'InstanceNorm2D(num_features=2, epsilon=1e-05)') module = nn.InstanceNorm3D(2) self.assertEqual(str(module), 'InstanceNorm3D(num_features=2, epsilon=1e-05)') module = nn.GroupNorm(num_channels=6, num_groups=6) self.assertEqual( str(module), 'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)') module = nn.LayerNorm([2, 2, 3]) self.assertEqual( str(module), 'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)') module = nn.BatchNorm1D(1) self.assertEqual( str(module), 'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)' ) module = nn.BatchNorm2D(1) self.assertEqual( str(module), 'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)') module = nn.BatchNorm3D(1) self.assertEqual( str(module), 'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)' ) module = nn.SyncBatchNorm(2) self.assertEqual( str(module), 'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)') module = nn.LocalResponseNorm(size=5) self.assertEqual( str(module), 'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)') module = nn.AvgPool1D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'AvgPool1D(kernel_size=2, stride=2, padding=0)') module = nn.AvgPool2D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'AvgPool2D(kernel_size=2, stride=2, padding=0)') module = nn.AvgPool3D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'AvgPool3D(kernel_size=2, stride=2, padding=0)') module = nn.MaxPool1D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'MaxPool1D(kernel_size=2, stride=2, padding=0)') module = nn.MaxPool2D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'MaxPool2D(kernel_size=2, stride=2, padding=0)') module = nn.MaxPool3D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'MaxPool3D(kernel_size=2, stride=2, padding=0)') module = nn.AdaptiveAvgPool1D(output_size=16) self.assertEqual(str(module), 'AdaptiveAvgPool1D(output_size=16)') module = nn.AdaptiveAvgPool2D(output_size=3) self.assertEqual(str(module), 'AdaptiveAvgPool2D(output_size=3)') module = nn.AdaptiveAvgPool3D(output_size=3) self.assertEqual(str(module), 'AdaptiveAvgPool3D(output_size=3)') module = nn.AdaptiveMaxPool1D(output_size=16, return_mask=True) self.assertEqual( str(module), 'AdaptiveMaxPool1D(output_size=16, return_mask=True)') module = nn.AdaptiveMaxPool2D(output_size=3, return_mask=True) self.assertEqual(str(module), 'AdaptiveMaxPool2D(output_size=3, return_mask=True)') module = nn.AdaptiveMaxPool3D(output_size=3, return_mask=True) self.assertEqual(str(module), 'AdaptiveMaxPool3D(output_size=3, return_mask=True)') module = nn.SimpleRNNCell(16, 32) self.assertEqual(str(module), 'SimpleRNNCell(16, 32)') module = nn.LSTMCell(16, 32) self.assertEqual(str(module), 'LSTMCell(16, 32)') module = nn.GRUCell(16, 32) self.assertEqual(str(module), 'GRUCell(16, 32)') module = nn.PixelShuffle(3) self.assertEqual(str(module), 'PixelShuffle(upscale_factor=3)') module = nn.SimpleRNN(16, 32, 2) self.assertEqual( str(module), 'SimpleRNN(16, 32, num_layers=2\n (0): RNN(\n (cell): SimpleRNNCell(16, 32)\n )\n (1): RNN(\n (cell): SimpleRNNCell(32, 32)\n )\n)' ) module = nn.LSTM(16, 32, 2) self.assertEqual( str(module), 'LSTM(16, 32, num_layers=2\n (0): RNN(\n (cell): LSTMCell(16, 32)\n )\n (1): RNN(\n (cell): LSTMCell(32, 32)\n )\n)' ) module = nn.GRU(16, 32, 2) self.assertEqual( str(module), 'GRU(16, 32, num_layers=2\n (0): RNN(\n (cell): GRUCell(16, 32)\n )\n (1): RNN(\n (cell): GRUCell(32, 32)\n )\n)' ) module1 = nn.Sequential( ('conv1', nn.Conv2D(1, 20, 5)), ('relu1', nn.ReLU()), ('conv2', nn.Conv2D(20, 64, 5)), ('relu2', nn.ReLU())) self.assertEqual( str(module1), 'Sequential(\n '\ '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n '\ '(relu1): ReLU()\n '\ '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n '\ '(relu2): ReLU()\n)' ) module2 = nn.Sequential( nn.Conv3DTranspose(4, 6, (3, 3, 3)), nn.AvgPool3D(kernel_size=2, stride=2, padding=0), nn.Tanh(name="Tanh"), module1, nn.Conv3D(4, 6, (3, 3, 3)), nn.MaxPool3D(kernel_size=2, stride=2, padding=0), nn.GELU(True)) self.assertEqual( str(module2), 'Sequential(\n '\ '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n '\ '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n '\ '(2): Tanh(name=Tanh)\n '\ '(3): Sequential(\n (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n (relu1): ReLU()\n'\ ' (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n (relu2): ReLU()\n )\n '\ '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n '\ '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n '\ '(6): GELU(approximate=True)\n)' )
def __init__(self, style_dim, num_features): super().__init__() self.norm = nn.InstanceNorm2D(num_features, weight_attr=False, bias_attr=False) self.fc = nn.Linear(style_dim, num_features*2)