def __init__(self, n_classes=40): super(DGCNN, self).__init__() self.k = 20 self.knn = KNN(self.k) self.bn1 = nn.BatchNorm(64) self.bn2 = nn.BatchNorm(64) self.bn3 = nn.BatchNorm(128) self.bn4 = nn.BatchNorm(256) self.bn5 = nn.BatchNorm1d(1024) self.conv1 = nn.Sequential(nn.Conv(6, 64, kernel_size=1, bias=False), self.bn1, nn.LeakyReLU(scale=0.2)) self.conv2 = nn.Sequential(nn.Conv(64*2, 64, kernel_size=1, bias=False), self.bn2, nn.LeakyReLU(scale=0.2)) self.conv3 = nn.Sequential(nn.Conv(64*2, 128, kernel_size=1, bias=False), self.bn3, nn.LeakyReLU(scale=0.2)) self.conv4 = nn.Sequential(nn.Conv(128*2, 256, kernel_size=1, bias=False), self.bn4, nn.LeakyReLU(scale=0.2)) self.conv5 = nn.Sequential(nn.Conv1d(512, 1024, kernel_size=1, bias=False), self.bn5, nn.LeakyReLU(scale=0.2)) self.linear1 = nn.Linear(1024*2, 512, bias=False) self.bn6 = nn.BatchNorm1d(512) self.dp1 = nn.Dropout(p=0.5) self.linear2 = nn.Linear(512, 256) self.bn7 = nn.BatchNorm1d(256) self.dp2 = nn.Dropout(p=0.5) self.linear3 = nn.Linear(256, n_classes)
def __init__(self, part_num=50): super(Point_Transformer_partseg, self).__init__() self.part_num = part_num self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm1d(128) self.bn2 = nn.BatchNorm1d(128) self.sa1 = SA_Layer(128) self.sa2 = SA_Layer(128) self.sa3 = SA_Layer(128) self.sa4 = SA_Layer(128) self.conv_fuse = nn.Sequential( nn.Conv1d(512, 1024, kernel_size=1, bias=False), nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2)) self.label_conv = nn.Sequential( nn.Conv1d(16, 64, kernel_size=1, bias=False), nn.BatchNorm1d(64), nn.LeakyReLU(scale=0.2)) self.convs1 = nn.Conv1d(1024 * 3 + 64, 512, 1) self.dp1 = nn.Dropout(0.5) self.convs2 = nn.Conv1d(512, 256, 1) self.convs3 = nn.Conv1d(256, self.part_num, 1) self.bns1 = nn.BatchNorm1d(512) self.bns2 = nn.BatchNorm1d(256) self.relu = nn.ReLU()
def test_relu(self): # *************************************************************** # Test ReLU Layer # *************************************************************** arr = np.random.randn(16,10,224,224) check_equal(arr, jnn.ReLU(), tnn.ReLU()) # *************************************************************** # Test PReLU Layer # *************************************************************** arr = np.random.randn(16,10,224,224) check_equal(arr, jnn.PReLU(), tnn.PReLU()) check_equal(arr, jnn.PReLU(10, 99.9), tnn.PReLU(10, 99.9)) check_equal(arr, jnn.PReLU(10, 2), tnn.PReLU(10, 2)) check_equal(arr, jnn.PReLU(10, -0.2), tnn.PReLU(10, -0.2)) # *************************************************************** # Test ReLU6 Layer # *************************************************************** arr = np.random.randn(16,10,224,224) check_equal(arr, jnn.ReLU6(), tnn.ReLU6()) # *************************************************************** # Test LeakyReLU Layer # *************************************************************** arr = np.random.randn(16,10,224,224) check_equal(arr, jnn.LeakyReLU(), tnn.LeakyReLU()) check_equal(arr, jnn.LeakyReLU(2), tnn.LeakyReLU(2)) check_equal(arr, jnn.LeakyReLU(99.9), tnn.LeakyReLU(99.9))
def __init__(self, in_channel, out_channel, kernel_size=3, padding=1, style_dim=512, initial=False, upsample=False, fused=False): super(StyledConvBlock, self).__init__() if initial: self.conv1 = ConstantInput(in_channel) else: if upsample: if fused: self.conv1 = nn.Sequential( #FusedUpsample(in_channel, out_channel, kernel_size, padding=padding) Blur(out_channel) nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding)# todo: equal ) else: self.conv1 = nn.Sequential( nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding)# todo: equal Blur(out_channel) ) self.noise1 = NoiseInjection(out_channel) self.adain1 = AdaptiveInstanceNorm(out_channel, style_dim) self.lrelu1 = nn.LeakyReLU(0.2) self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding) self.noise2 = NoiseInjection(out_channel) self.adain2 = AdaptiveInstanceNorm(out_channel, style_dim) self.lrelu2 = nn.LeakyReLU(0.2)
def __init__(self): super(Discriminator, self).__init__() self.model = nn.Sequential(nn.Linear(int(np.prod(img_shape)), 512), nn.LeakyReLU(scale=0.2), nn.Linear(512, 256), nn.LeakyReLU(scale=0.2), nn.Linear(256, 1), nn.Sigmoid())
def __init__(self, in_channel, out_channel, kernel_size, padding, downsample=False, fused=False): super(ConvBlock, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding), nn.LeakyReLU(0.2) ) if downsample: if fused: self.conv2 = nn.Sequential( nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding), nn.Pool(2), nn.LeakyReLU(0.2) ) else: self.conv2 = nn.Sequential( nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding), nn.Pool(2), nn.LeakyReLU(0.2) ) else: self.conv2 = nn.Sequential( nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding), nn.LeakyReLU(0.2) )
def __init__(self, channels, filters=64, num_res_blocks=16, num_upsample=2): super(GeneratorRRDB, self).__init__() self.conv1 = nn.Conv(channels, filters, 3, stride=1, padding=1) self.res_blocks = nn.Sequential(*[ ResidualInResidualDenseBlock(filters) for _ in range(num_res_blocks) ]) self.conv2 = nn.Conv(filters, filters, 3, stride=1, padding=1) upsample_layers = [] for _ in range(num_upsample): upsample_layers += [ nn.Conv(filters, (filters * 4), 3, stride=1, padding=1), nn.LeakyReLU(), nn.PixelShuffle(upscale_factor=2) ] self.upsampling = nn.Sequential(*upsample_layers) self.conv3 = nn.Sequential( nn.Conv(filters, filters, 3, stride=1, padding=1), nn.LeakyReLU(), nn.Conv(filters, channels, 3, stride=1, padding=1)) for m in self.modules(): weights_init_normal(m)
def __init__(self): super(Discriminator, self).__init__() self.label_embedding = nn.Embedding(n_classes, n_classes) self.model = nn.Sequential( nn.Linear((n_classes + int(np.prod(img_shape))), 512), nn.LeakyReLU(0.2), nn.Linear(512, 512), nn.Dropout(0.4), nn.LeakyReLU(0.2), nn.Linear(512, 512), nn.Dropout(0.4), nn.LeakyReLU(0.2), nn.Linear(512, 1))
def discriminator_block(in_filters, out_filters, first_block=False): layers = [] layers.append( nn.Conv(in_filters, out_filters, 3, stride=1, padding=1)) if (not first_block): layers.append(nn.BatchNorm(out_filters)) layers.append(nn.LeakyReLU(scale=0.2)) layers.append( nn.Conv(out_filters, out_filters, 3, stride=2, padding=1)) layers.append(nn.BatchNorm(out_filters)) layers.append(nn.LeakyReLU(scale=0.2)) return layers
def test_relu(self): # *************************************************************** # Test ReLU Layer # *************************************************************** arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.ReLU(), tnn.ReLU()) # *************************************************************** # Test PReLU Layer # *************************************************************** arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.PReLU(), tnn.PReLU()) check_equal(arr, jnn.PReLU(10, 99.9), tnn.PReLU(10, 99.9)) check_equal(arr, jnn.PReLU(10, 2), tnn.PReLU(10, 2)) check_equal(arr, jnn.PReLU(10, -0.2), tnn.PReLU(10, -0.2)) # *************************************************************** # Test ReLU6 Layer # *************************************************************** arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.ReLU6(), tnn.ReLU6()) # *************************************************************** # Test LeakyReLU Layer # *************************************************************** arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.LeakyReLU(), tnn.LeakyReLU()) check_equal(arr, jnn.LeakyReLU(2), tnn.LeakyReLU(2)) check_equal(arr, jnn.LeakyReLU(99.9), tnn.LeakyReLU(99.9)) # *************************************************************** # Test ELU Layer # *************************************************************** arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.ELU(), tnn.ELU()) check_equal(arr, jnn.ELU(0.3), tnn.ELU(0.3)) check_equal(arr, jnn.ELU(2), tnn.ELU(2)) check_equal(arr, jnn.ELU(99.9), tnn.ELU(99.9)) # *************************************************************** # Test GELU Layer # *************************************************************** if hasattr(tnn, "GELU"): arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.GELU(), tnn.GELU()) # *************************************************************** # Test Softplus Layer # *************************************************************** arr = np.random.randn(16, 10, 224, 224) check_equal(arr, jnn.Softplus(), tnn.Softplus()) check_equal(arr, jnn.Softplus(2), tnn.Softplus(2)) check_equal(arr, jnn.Softplus(2, 99.9), tnn.Softplus(2, 99.9))
def __init__(self, cin, cout, zdim=128, nf=64): super(ConfNet, self).__init__() network = [ nn.Conv(cin, nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.LeakyReLU(scale=0.2), nn.Conv(nf, (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 2), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 4), (nf * 8), 4, stride=2, padding=1, bias=False), nn.LeakyReLU(scale=0.2), nn.Conv((nf * 8), zdim, 4, stride=1, padding=0, bias=False), nn.ReLU() ] network += [ nn.ConvTranspose(zdim, (nf * 8), 4, padding=0, bias=False), nn.ReLU(), nn.ConvTranspose((nf * 8), (nf * 4), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 4), (nf * 4)), nn.ReLU(), nn.ConvTranspose((nf * 4), (nf * 2), 4, stride=2, padding=1, bias=False), nn.GroupNorm((16 * 2), (nf * 2)), nn.ReLU() ] self.network = nn.Sequential(*network) out_net1 = [ nn.ConvTranspose((nf * 2), nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.ConvTranspose(nf, nf, 4, stride=2, padding=1, bias=False), nn.GroupNorm(16, nf), nn.ReLU(), nn.Conv(nf, 2, 5, stride=1, padding=2, bias=False), nn.Softplus() ] self.out_net1 = nn.Sequential(*out_net1) out_net2 = [ nn.Conv((nf * 2), 2, 3, stride=1, padding=1, bias=False), nn.Softplus() ] self.out_net2 = nn.Sequential(*out_net2)
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.InstanceNorm2d, use_sigmoid=False, getIntermFeat=False): super(NLayerDiscriminator, self).__init__() self.getIntermFeat = getIntermFeat self.n_layers = n_layers kw = 4 padw = int(np.ceil((kw - 1.0) / 2)) padw = 1 sequence = [[ nn.Conv(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2) ]] nf = ndf for n in range(1, n_layers): nf_prev = nf nf = min(nf * 2, 512) sequence += [[ nn.Conv(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), # norm_layer(nf), nn.LeakyReLU(0.2) ]] nf_prev = nf nf = min(nf * 2, 512) sequence += [[ nn.Conv(nf_prev, nf, kernel_size=kw, stride=1, padding=1), # norm_layer(nf), nn.LeakyReLU(0.2) ]] sequence += [[nn.Conv(nf, 1, kernel_size=kw, stride=1, padding=2)]] if use_sigmoid: sequence += [[nn.Sigmoid()]] if getIntermFeat: for n in range(len(sequence)): setattr(self, 'model' + str(n), nn.Sequential(*sequence[n])) else: sequence_stream = [] for n in range(len(sequence)): sequence_stream += sequence[n] self.model = nn.Sequential(*sequence_stream)
def __init__(self, part_num): super(DGCNN_partseg, self).__init__() self.seg_num_all = part_num self.k = 40 self.knn = KNN(self.k) self.bn1 = nn.BatchNorm2d(64) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(64) self.bn5 = nn.BatchNorm2d(64) self.bn6 = nn.BatchNorm1d(1024) self.bn7 = nn.BatchNorm1d(64) self.bn8 = nn.BatchNorm1d(256) self.bn9 = nn.BatchNorm1d(256) self.bn10 = nn.BatchNorm1d(128) self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), self.bn1, nn.LeakyReLU(scale=0.2)) self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False), self.bn2, nn.LeakyReLU(scale=0.2)) self.conv3 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False), self.bn3, nn.LeakyReLU(scale=0.2)) self.conv4 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False), self.bn4, nn.LeakyReLU(scale=0.2)) self.conv5 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False), self.bn5, nn.LeakyReLU(scale=0.2)) self.conv6 = nn.Sequential(nn.Conv1d(192, 1024, kernel_size=1, bias=False), self.bn6, nn.LeakyReLU(scale=0.2)) self.conv7 = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=False), self.bn7, nn.LeakyReLU(scale=0.2)) self.conv8 = nn.Sequential(nn.Conv1d(1280, 256, kernel_size=1, bias=False), self.bn8, nn.LeakyReLU(scale=0.2)) self.dp1 = nn.Dropout(p=0.5) self.conv9 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=False), self.bn9, nn.LeakyReLU(scale=0.2)) self.dp2 = nn.Dropout(p=0.5) self.conv10 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=False), self.bn10, nn.LeakyReLU(scale=0.2)) self.conv11 = nn.Conv1d(128, self.seg_num_all, kernel_size=1, bias=False)
def __init__( self, c1, c2, k=1, s=1, p=None, g=1, act=True, ): # ch_in, ch_out, kernel, stride, padding, groups assert isinstance( self.use_v3, bool), "You need to decide whether use_yolov3 is True or False" super(Conv, self).__init__() if isinstance(k, list): assert len(k) <= 2 and k[0] == k[-1] k = k[0] if isinstance(s, list): assert len(s) <= 2 and s[0] == s[-1] s = s[0] self.conv = nn.Conv(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm(c2) self.act = ( nn.LeakyReLU(0.1) if self.use_v3 else SiLU()) if act is True else ( act if isinstance(act, nn.Module) else nn.Identity())
def discriminator_block(in_filters, out_filters, stride=2, normalization=True): 'Returns downsampling layers of each discriminator block' layers = [nn.Conv(in_filters, out_filters, 4, stride=stride, padding=1)] if normalization: layers.append(nn.BatchNorm2d(out_filters)) layers.append(nn.LeakyReLU(scale=0.2)) return layers
def __init__(self, output_channels=40): super(Point_Transformer, self).__init__() self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm1d(128) self.bn2 = nn.BatchNorm1d(128) self.sa1 = SA_Layer(128) self.sa2 = SA_Layer(128) self.sa3 = SA_Layer(128) self.sa4 = SA_Layer(128) self.conv_fuse = nn.Sequential( nn.Conv1d(512, 1024, kernel_size=1, bias=False), nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2)) self.linear1 = nn.Linear(1024, 512, bias=False) self.bn6 = nn.BatchNorm1d(512) self.dp1 = nn.Dropout(p=0.5) self.linear2 = nn.Linear(512, 256) self.bn7 = nn.BatchNorm1d(256) self.dp2 = nn.Dropout(p=0.5) self.linear3 = nn.Linear(256, output_channels) self.relu = nn.ReLU()
def discriminator_block(in_filters, out_filters, bn=True): block = [nn.Conv(in_filters, out_filters, 3, stride=2, padding=1), nn.LeakyReLU(scale=0.2), nn.Dropout(p=0.25)] if bn: block.append(nn.BatchNorm(out_filters, eps=0.8)) for m in block: weights_init_normal(m) return block
def __init__(self, input_nc, num_plane, num_quat, biasTerms, activation=nn.LeakyReLU(scale=0.2)): super(symPred, self).__init__() self.num_quat = num_quat for i in range(self.num_quat): quatLayer = [ nn.Linear(input_nc, int((input_nc / 2))), activation, nn.Linear(int((input_nc / 2)), int((input_nc / 4))), activation ] last = nn.Linear(int((input_nc / 4)), 4) last.bias.data = jt.transform.to_tensor( jt.array(biasTerms[('quat' + str((i + 1)))])) quatLayer += [last] setattr(self, ('quatLayer' + str((i + 1))), nn.Sequential(*quatLayer)) self.num_plane = num_plane for i in range(self.num_plane): planeLayer = [ nn.Linear(int(input_nc), int((input_nc / 2))), activation, nn.Linear(int((input_nc / 2)), int((input_nc / 4))), activation ] last = nn.Linear(int((input_nc / 4)), 4) last.weight.data = jt.zeros((4, int(input_nc / 4))) last.bias.data = jt.transform.to_tensor( jt.array(biasTerms[('plane' + str((i + 1)))])).float() planeLayer += [last] setattr(self, ('planeLayer' + str((i + 1))), nn.Sequential(*planeLayer))
def __init__(self): super(Generator, self).__init__() self.init_size = (opt.img_size // 4) self.l1 = nn.Sequential( nn.Linear(opt.latent_dim, (128 * (self.init_size**2)))) self.conv_blocks = nn.Sequential( nn.BatchNorm(128), nn.Upsample(scale_factor=2), nn.Conv(128, 128, 3, stride=1, padding=1), nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(scale=0.2), nn.Upsample(scale_factor=2), nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(scale=0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh()) for m in self.conv_blocks: weights_init_normal(m)
def discriminator_block(in_filters, out_filters, normalize=True): 'Returns downsampling layers of each discriminator block' layers = [nn.Conv(in_filters, out_filters, 4, stride=2, padding=1)] if normalize: layers.append(nn.InstanceNorm2d(out_filters, affine=None)) layers.append(nn.LeakyReLU(scale=0.2)) return layers
def __init__(self, in_size, out_size, inner_nc, dropout=0.0, innermost=False, outermost=False, submodule=None): super(UnetBlock, self).__init__() self.outermost = outermost downconv = nn.Conv(in_size, inner_nc, 4, stride=2, padding=1, bias=False) downnorm = nn.BatchNorm2d(inner_nc) downrelu = nn.LeakyReLU(0.2) upnorm = nn.BatchNorm2d(out_size) uprelu = nn.ReLU() if outermost: upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose(inner_nc, out_size, 4, stride=2, padding=1, bias=False) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1, bias=False) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if dropout: model = down + [submodule] + up + [nn.Dropout(dropout)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, channel_in, channel_out, kernel_size=4, padding=1, stride=2, output_padding=0, norelu=False): super(DecoderBlock, self).__init__() layers_list = [] layers_list.append(nn.ConvTranspose(channel_in, channel_out, kernel_size, padding=padding, stride=stride, output_padding=output_padding)) layers_list.append(nn.BatchNorm(channel_out, momentum=0.9)) if (norelu == False): layers_list.append(nn.LeakyReLU(1)) self.conv = nn.Sequential(*layers_list)
def discriminator_block(in_filters, out_filters): 'Returns downsampling layers of each discriminator block' layers = [ nn.Conv(in_filters, out_filters, 4, stride=2, padding=1), nn.LeakyReLU(scale=0.01) ] for m in layers: weights_init_normal(m) return layers
def __init__(self, input_nc, ngf=64): super(Regressor4, self).__init__() use_bias = True sequence = [ nn.Conv(input_nc, ngf, 3, stride=1, padding=1, bias=use_bias),#11->11 nn.LeakyReLU(0.2), nn.Conv(ngf, ngf*2, 3, stride=1, padding=1, bias=use_bias),#11->11 nn.LeakyReLU(0.2), nn.Conv(ngf*2, ngf*4, 3, stride=1, padding=1, bias=use_bias),#11->11 nn.LeakyReLU(0.2), nn.Conv(ngf*4, 1, 11, stride=1, padding=0, bias=use_bias),#11->1 ] self.model = nn.Sequential(*sequence) for m in self.modules(): weights_init_normal(m)
def block(in_features, out_features, normalization=True): 'Classifier block' layers = [ nn.Conv(in_features, out_features, 3, stride=2, padding=1), nn.LeakyReLU(scale=0.2) ] if normalization: layers.append(nn.InstanceNorm2d(out_features, affine=None)) return layers
def discriminator_block(in_filters, out_filters, bn=True): 'Returns layers of each discriminator block' block = [ nn.Conv(in_filters, out_filters, 3, stride=2, padding=1), nn.LeakyReLU(scale=0.2), nn.Dropout(p=0.25) ] if bn: block.append(nn.BatchNorm(out_filters, eps=0.8)) return block
def __init__(self, code_dim=512, n_mlp=8): super(StyledGenerator, self).__init__() self.generator = Generator(code_dim) layers = [PixelNorm()] for i in range(n_mlp): layers.append(nn.Linear(code_dim, code_dim)) layers.append(nn.LeakyReLU(0.2)) self.style = nn.Sequential(*layers)
def __init__(self, in_size, out_size, normalize=True, dropout=0.0): super(UNetDown, self).__init__() layers = [ nn.Conv(in_size, out_size, 4, stride=2, padding=1, bias=False) ] if normalize: layers.append(nn.BatchNorm2d(out_size)) layers.append(nn.LeakyReLU(scale=0.2)) if dropout: layers.append(nn.Dropout(dropout)) self.model = nn.Sequential(*layers)
def darknetconvlayer(in_channels, out_channels, *args, **kwdargs): """ Implements a conv, activation, then batch norm. Arguments are passed into the conv layer. """ return nn.Sequential( nn.Conv(in_channels, out_channels, *args, **kwdargs, bias=False), nn.BatchNorm(out_channels), # Darknet uses 0.1 here. # See https://github.com/pjreddie/darknet/blob/680d3bde1924c8ee2d1c1dea54d3e56a05ca9a26/src/activations.h#L39 nn.LeakyReLU(0.1, ))
def block(in_features, non_linearity=True): layers = [ nn.Conv(in_features, filters, 3, stride=1, padding=1, bias=True) ] if non_linearity: layers += [nn.LeakyReLU()] return nn.Sequential(*layers)