Ejemplo n.º 1
0
    def __init__(self, latent_dim, output_nc, size=64, ngf=64):
        """Construct a Deep Convolutional generator
        Args:
            latent_dim (int)    -- the number of latent dimension
            output_nc (int)     -- the number of channels in output images
            size (int)          -- size of output tensor
            ngf (int)           -- the number of filters in the last conv layer

        Refer to https://arxiv.org/abs/1511.06434
        """
        super(DeepConvGenerator, self).__init__()

        self.latent_dim = latent_dim
        self.ngf = ngf
        self.init_size = size // 4
        self.l1 = nn.Sequential(
            nn.Linear(latent_dim, ngf * 2 * self.init_size**2))

        self.conv_blocks = nn.Sequential(
            nn.BatchNorm2D(ngf * 2),
            nn.Upsample(scale_factor=2),
            nn.Conv2D(ngf * 2, ngf * 2, 3, stride=1, padding=1),
            nn.BatchNorm2D(ngf * 2, 0.2),
            nn.LeakyReLU(0.2),
            nn.Upsample(scale_factor=2),
            nn.Conv2D(ngf * 2, ngf, 3, stride=1, padding=1),
            nn.BatchNorm2D(ngf, 0.2),
            nn.LeakyReLU(0.2),
            nn.Conv2D(ngf, output_nc, 3, stride=1, padding=1),
            nn.Tanh(),
        )
Ejemplo n.º 2
0
 def __init__(self, name_scope='VoxNet_', num_classes=10):
     super(VoxNet, self).__init__()
     self.backbone = nn.Sequential(nn.Conv3D(1, 32, 5, 2), nn.BatchNorm(32),
                                   nn.LeakyReLU(), nn.Conv3D(32, 32, 3, 1),
                                   nn.MaxPool3D(2, 2, 0))
     self.head = nn.Sequential(nn.Linear(32 * 6 * 6 * 6, 128),
                               nn.LeakyReLU(), nn.Dropout(0.2),
                               nn.Linear(128, num_classes))
Ejemplo n.º 3
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.dis == nn.Sequential(
         nn.Conv2D(1, 64, 4, 2, 1, bias_attr=False), nn.LeakyReLU(0.2),
         nn.Conv2D(64, 64 * 2, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64 * 2), nn.LeakyReLU(0.2),
         nn.Conv2D(64 * 2, 64 * 4, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64 * 4), nn.LeakyReLU(0.2),
         nn.Conv2D(64 * 4, 1, 4, 1, 0, bias_attr=False), nn.Sigmoid())
Ejemplo n.º 4
0
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type='instance'):
        """Construct a PatchGAN discriminator

        Args:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_type (str)      -- normalization layer type
        """
        super(NLayerDiscriminator, self).__init__()
        norm_layer = build_norm_layer(norm_type)
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm
        else:
            use_bias = norm_layer == nn.InstanceNorm

        kw = 4
        padw = 1
        sequence = [
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2)
        ]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev,
                          ndf * nf_mult,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias_attr=use_bias),
                norm_layer(ndf * nf_mult),
                nn.LeakyReLU(0.2)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev,
                      ndf * nf_mult,
                      kernel_size=kw,
                      stride=1,
                      padding=padw,
                      bias_attr=use_bias),
            norm_layer(ndf * nf_mult),
            nn.LeakyReLU(0.2)
        ]

        sequence += [
            nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
        ]
        self.model = nn.Sequential(*sequence)
Ejemplo n.º 5
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            nn.Pad2D([1, 1, 1, 1], 'reflect'),
            spectral_norm(
                nn.Conv2D(input_nc,
                          ndf,
                          kernel_size=4,
                          stride=2,
                          bias_attr=True)),
            nn.LeakyReLU(0.2)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                nn.Pad2D([1, 1, 1, 1], 'reflect'),
                spectral_norm(
                    nn.Conv2D(ndf * mult,
                              ndf * mult * 2,
                              kernel_size=4,
                              stride=2,
                              bias_attr=True)),
                nn.LeakyReLU(0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            nn.Pad2D([1, 1, 1, 1], 'reflect'),
            spectral_norm(
                nn.Conv2D(ndf * mult,
                          ndf * mult * 2,
                          kernel_size=4,
                          stride=1,
                          bias_attr=True)),
            nn.LeakyReLU(0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = spectral_norm(nn.Linear(ndf * mult, 1, bias_attr=False))
        self.gmp_fc = spectral_norm(nn.Linear(ndf * mult, 1, bias_attr=False))
        self.conv1x1 = nn.Conv2D(ndf * mult * 2,
                                 ndf * mult,
                                 kernel_size=1,
                                 stride=1,
                                 bias_attr=True)
        self.leaky_relu = nn.LeakyReLU(0.2)

        self.pad = nn.Pad2D([1, 1, 1, 1], 'reflect')
        self.conv = spectral_norm(
            nn.Conv2D(ndf * mult, 1, kernel_size=4, stride=1, bias_attr=False))

        self.model = nn.Sequential(*model)
Ejemplo n.º 6
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(int(np.prod(img_shape)), 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2),
            nn.Linear(256, 1),
            nn.Sigmoid(),
        )
Ejemplo n.º 7
0
    def __init__(self, channel: int = 64, nblocks: int = 3) -> None:
        super().__init__()
        channel = channel // 2
        last_channel = channel
        f = [
            spectral_norm(
                nn.Conv2D(3, channel, 3, stride=1, padding=1,
                          bias_attr=False)),
            nn.LeakyReLU(0.2)
        ]
        in_h = 256
        for i in range(1, nblocks):
            f.extend([
                spectral_norm(
                    nn.Conv2D(last_channel,
                              channel * 2,
                              3,
                              stride=2,
                              padding=1,
                              bias_attr=False)),
                nn.LeakyReLU(0.2),
                spectral_norm(
                    nn.Conv2D(channel * 2,
                              channel * 4,
                              3,
                              stride=1,
                              padding=1,
                              bias_attr=False)),
                nn.GroupNorm(1, channel * 4),
                nn.LeakyReLU(0.2)
            ])
            last_channel = channel * 4
            channel = channel * 2
            in_h = in_h // 2

        self.body = nn.Sequential(*f)

        self.head = nn.Sequential(*[
            spectral_norm(
                nn.Conv2D(last_channel,
                          channel * 2,
                          3,
                          stride=1,
                          padding=1,
                          bias_attr=False)),
            nn.GroupNorm(1, channel * 2),
            nn.LeakyReLU(0.2),
            spectral_norm(
                nn.Conv2D(
                    channel * 2, 1, 3, stride=1, padding=1, bias_attr=False))
        ])
Ejemplo n.º 8
0
    def __init__(self,
                 negval,
                 n_feats,
                 n_colors,
                 scale,
                 nFeat=None,
                 in_channels=None,
                 out_channels=None):
        super(DownBlock, self).__init__()

        if nFeat is None:
            nFeat = n_feats

        if in_channels is None:
            in_channels = n_colors

        if out_channels is None:
            out_channels = n_colors

        dual_block = [
            nn.Sequential(
                nn.Conv2D(in_channels,
                          nFeat,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias_attr=False),
                nn.LeakyReLU(negative_slope=negval))
        ]

        for _ in range(1, int(math.log2(scale))):
            dual_block.append(
                nn.Sequential(
                    nn.Conv2D(nFeat,
                              nFeat,
                              kernel_size=3,
                              stride=2,
                              padding=1,
                              bias_attr=False),
                    nn.LeakyReLU(negative_slope=negval)))

        dual_block.append(
            nn.Conv2D(nFeat,
                      out_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=False))

        self.dual_module = nn.Sequential(*dual_block)
 def __init__(self, in_c, out_c, spatial):
     super(GradualStyleBlock, self).__init__()
     self.out_c = out_c
     self.spatial = spatial
     num_pools = int(np.log2(spatial))
     modules = []
     modules += [nn.Conv2D(in_c, out_c, kernel_size=3, stride=2, padding=1),
                 nn.LeakyReLU()]
     for i in range(num_pools - 1):
         modules += [
             nn.Conv2D(out_c, out_c, kernel_size=3, stride=2, padding=1),
             nn.LeakyReLU()
         ]
     self.convs = nn.Sequential(*modules)
     self.linear = EqualLinear(out_c, out_c, lr_mul=1)
Ejemplo n.º 10
0
Archivo: conv.py Proyecto: WenjinW/PGL
    def __init__(self,
                 input_size,
                 hidden_size,
                 feat_drop=0.6,
                 attn_drop=0.6,
                 num_heads=1,
                 concat=True,
                 activation=None):
        super(GATConv, self).__init__()
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.feat_drop = feat_drop
        self.attn_drop = attn_drop
        self.concat = concat

        self.linear = nn.Linear(input_size, num_heads * hidden_size)
        self.weight_src = self.create_parameter(shape=[num_heads, hidden_size])
        self.weight_dst = self.create_parameter(shape=[num_heads, hidden_size])

        self.feat_dropout = nn.Dropout(p=feat_drop)
        self.attn_dropout = nn.Dropout(p=attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        if isinstance(activation, str):
            activation = getattr(F, activation)
        self.activation = activation
Ejemplo n.º 11
0
    def __init__(self, in_features, hidden_features, n_layer=3, top_k=-1,
                 edge_dim=2, batch_norm=False, dropout=0.0, adj_type='dist', activation='softmax'):
        super(EdgeUpdateNetwork, self).__init__()
        self.top_k = top_k
        self.adj_type = adj_type
        self.edge_dim = edge_dim
        self.activation = activation

        num_dims_list = [hidden_features] * n_layer  # [num_features * r for r in ratio]
        if n_layer > 1:
            num_dims_list[0] = 2 * hidden_features
        if n_layer > 3:
            num_dims_list[1] = 2 * hidden_features
        # layers
        layer_list = OrderedDict()
        for l in range(len(num_dims_list)):
            # set layer
            layer_list['conv{}'.format(l)] = nn.Conv2D(in_channels=num_dims_list[l - 1] if l > 0 else in_features,
                                                       out_channels=num_dims_list[l],
                                                       kernel_size=1,
                                                       bias_attr=False)
            if batch_norm:
                layer_list['norm{}'.format(l)] = nn.BatchNorm2D(num_features=num_dims_list[l], )
            layer_list['relu{}'.format(l)] = nn.LeakyReLU()

            if dropout > 0:
                layer_list['drop{}'.format(l)] = nn.Dropout2D(p=dropout)

        layer_list['conv_out'] = nn.Conv2D(in_channels=num_dims_list[-1],
                                           out_channels=1,
                                           kernel_size=1)
        self.sim_network = nn.Sequential()
        for i in layer_list:
            self.sim_network.add_sublayer(i, layer_list[i])
Ejemplo n.º 12
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 feat_drop=0.6,
                 attn_drop=0.6,
                 num_heads=1,
                 concat=True,
                 activation=None):
        super(GATConv, self).__init__()
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.feat_drop = feat_drop
        self.attn_drop = attn_drop
        self.concat = concat

        self.linear = linear_init(input_size,
                                  num_heads * hidden_size,
                                  init_type='gcn')

        fc_w_attr = paddle.ParamAttr(initializer=nn.initializer.XavierNormal())
        self.weight_src = self.create_parameter(
            shape=[1, num_heads, hidden_size], attr=fc_w_attr)

        fc_w_attr = paddle.ParamAttr(initializer=nn.initializer.XavierNormal())
        self.weight_dst = self.create_parameter(
            shape=[1, num_heads, hidden_size], attr=fc_w_attr)

        self.feat_dropout = nn.Dropout(p=feat_drop)
        self.attn_dropout = nn.Dropout(p=attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        if isinstance(activation, str):
            activation = getattr(F, activation)
        self.activation = activation
Ejemplo n.º 13
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                bias_attr=False),
            nn.BatchNorm2D(6),
            nn.ReLU(),
            nn.MaxPool2D(
                kernel_size=2, stride=2),
            nn.Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0),
            nn.BatchNorm2D(16),
            nn.PReLU(),
            nn.MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = nn.Sequential(
            nn.Linear(
                in_features=400, out_features=120),
            nn.LeakyReLU(),
            nn.Linear(
                in_features=120, out_features=84),
            nn.Sigmoid(),
            nn.Linear(
                in_features=84, out_features=num_classes),
            nn.Softmax())
Ejemplo n.º 14
0
    def __init__(self, inp_dim, out_dim, n_layer=2, edge_dim=2, batch_norm=False, dropout=0.0):
        super(NodeUpdateNetwork, self).__init__()
        # set size
        self.edge_dim = edge_dim
        num_dims_list = [out_dim] * n_layer  # [num_features * r for r in ratio]
        if n_layer > 1:
            num_dims_list[0] = 2 * out_dim

        # layers
        layer_list = OrderedDict()
        for l in range(len(num_dims_list)):
            layer_list['conv{}'.format(l)] = nn.Conv2D(
                in_channels=num_dims_list[l - 1] if l > 0 else (self.edge_dim + 1) * inp_dim,
                out_channels=num_dims_list[l],
                kernel_size=1,
                bias_attr=False)
            if batch_norm:
                layer_list['norm{}'.format(l)] = nn.BatchNorm2D(num_features=num_dims_list[l])
            layer_list['relu{}'.format(l)] = nn.LeakyReLU()

            if dropout > 0 and l == (len(num_dims_list) - 1):
                layer_list['drop{}'.format(l)] = nn.Dropout2D(p=dropout)

        self.network = nn.Sequential()
        for i in layer_list:
            self.network.add_sublayer(i, layer_list[i])
Ejemplo n.º 15
0
 def __init__(self,
              in_channels: int,
              expansion: float,
              out_channels: int,
              bias_attr=False):
     super().__init__()
     self.in_channels = in_channels
     self.expansion = expansion
     self.out_channels = out_channels
     self.bottle_channels = round(self.expansion * self.in_channels)
     self.body = nn.Sequential(
         # pw
         Conv2DNormLReLU(self.in_channels,
                         self.bottle_channels,
                         kernel_size=1,
                         bias_attr=bias_attr),
         # dw
         nn.Conv2D(self.bottle_channels,
                   self.bottle_channels,
                   kernel_size=3,
                   stride=1,
                   padding=0,
                   groups=self.bottle_channels,
                   bias_attr=True),
         nn.GroupNorm(1, self.bottle_channels),
         nn.LeakyReLU(0.2),
         # pw & linear
         nn.Conv2D(self.bottle_channels,
                   self.out_channels,
                   kernel_size=1,
                   padding=0,
                   bias_attr=False),
         nn.GroupNorm(1, self.out_channels),
     )
Ejemplo n.º 16
0
    def __init__(self, channels_img, features_d):
        super(Discriminator, self).__init__()

        # Input : N x C x 256 x 256
        self.disc = nn.Sequential(
            nn.Conv2D(  # 128 x 128
                channels_img,
                features_d,
                kernel_size=4,
                stride=2,
                padding=1,
                weight_attr=paddle.ParamAttr(initializer=conv_initializer())),
            nn.LeakyReLU(0.2),
            self._block(features_d, features_d * 2, 4, 2, 1),  # 64 x 64 
            self._block(features_d * 2, features_d * 4, 4, 2, 1),  # 32 x 32
            self._block(features_d * 4, features_d * 8, 4, 2, 1),  # 16 x 16
            self._block(features_d * 8, features_d * 16, 4, 2, 1),  # 8 x 8
            self._block(features_d * 16, features_d * 32, 4, 2, 1),  # 4 x 4
            nn.Conv2D(
                features_d * 32,
                1,
                kernel_size=4,
                stride=2,
                padding=0,  # 1 x 1 
                weight_attr=paddle.ParamAttr(initializer=conv_initializer())),
            nn.Sigmoid(),
        )
Ejemplo n.º 17
0
 def __init__(self,
              inp_dim,
              hidden_dim,
              num_layers,
              batch_norm=False,
              dropout=0.):
     super(MLP, self).__init__()
     layer_list = OrderedDict()
     in_dim = inp_dim
     for l in range(num_layers):
         layer_list['fc{}'.format(l)] = nn.Linear(in_dim, hidden_dim)
         if l < num_layers - 1:
             if batch_norm:
                 layer_list['norm{}'.format(l)] = nn.BatchNorm1D(
                     num_features=hidden_dim)
             layer_list['relu{}'.format(l)] = nn.LeakyReLU()
             if dropout > 0:
                 layer_list['drop{}'.format(l)] = nn.Dropout(p=dropout)
         in_dim = hidden_dim
     if num_layers > 0:
         self.network = nn.Sequential()
         for i in layer_list:
             self.network.add_sublayer(i, layer_list[i])
     else:
         self.network = nn.Identity()
 def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0,
              actv=nn.LeakyReLU(0.2), upsample=False):
     super().__init__()
     self.w_hpf = w_hpf
     self.actv = actv
     self.upsample = upsample
     self.learned_sc = dim_in != dim_out
     self._build_weights(dim_in, dim_out, style_dim)
Ejemplo n.º 19
0
    def __init__(self, in_dim, out_dim, kernel_size=4, stride=2, padding=1):
        super(Downsample, self).__init__()

        self.layers = nn.Sequential(
            nn.LeakyReLU(0.2),
            nn.Conv2D(in_dim, out_dim, kernel_size, stride, padding, bias_attr=False),
            nn.BatchNorm2D(out_dim)
        )
Ejemplo n.º 20
0
 def __init__(self, act_type, **params):
     super(Activation, self).__init__()
     if act_type == 'relu':
         self.act = nn.ReLU()
     elif act_type == 'leaky_relu':
         self.act = nn.LeakyReLU(**params)
     else:
         raise ValueError(act_type)
 def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
              normalize=False, downsample=False):
     super().__init__()
     self.actv = actv
     self.normalize = normalize
     self.downsample = downsample
     self.learned_sc = dim_in != dim_out
     self._build_weights(dim_in, dim_out)
Ejemplo n.º 22
0
 def __init__(self, in_dim, dropout):
     super(AttentivePooling, self).__init__()
     self.compute_logits = nn.Sequential(nn.Linear(2 * in_dim, 1),
                                         nn.LeakyReLU())
     self.project_nodes = nn.Sequential(nn.Dropout(dropout),
                                        nn.Linear(in_dim, in_dim))
     self.pool = pgl.nn.GraphPool(pool_type='sum')
     self.gru = nn.GRUCell(in_dim, in_dim)
Ejemplo n.º 23
0
 def _block(self, in_channels, out_channels, kernel_size, stride, padding):
     return nn.Sequential(
         nn.Conv2D(
             in_channels, out_channels, kernel_size, stride, padding, bias_attr=False, 
             weight_attr=paddle.ParamAttr(initializer=conv_initializer() ) 
         ),
         nn.LeakyReLU(0.2),
     )
Ejemplo n.º 24
0
    def __init__(self, in_dim, out_dim, kernel_size=4, stride=2, padding=1):
        super(ConvBlock, self).__init__()

        self.layers = nn.Sequential(
            nn.Conv2D(in_dim, out_dim, kernel_size, stride, padding, bias_attr=False),                    # Conv2D
            nn.BatchNorm2D(out_dim),                    # BatchNorm2D
            nn.LeakyReLU(0.2)                           # LeakyReLU, leaky=0.2
        )
    def __init__(self, img_size=256, num_domains=2, max_conv_dim=512):
        super().__init__()
        dim_in = 2**14 // img_size
        blocks = []
        blocks += [nn.Conv2D(3, dim_in, 3, 1, 1)]

        repeat_num = int(np.log2(img_size)) - 2
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            blocks += [ResBlk(dim_in, dim_out, downsample=True)]
            dim_in = dim_out

        blocks += [nn.LeakyReLU(0.2)]
        blocks += [nn.Conv2D(dim_out, dim_out, 4, 1, 0)]
        blocks += [nn.LeakyReLU(0.2)]
        blocks += [nn.Conv2D(dim_out, num_domains, 1, 1, 0)]
        self.main = nn.Sequential(*blocks)
    def __init__(self, n_in, n_out):
        super(MLP, self).__init__()

        self.linear = nn.Linear(
            n_in,
            n_out,
            weight_attr=nn.initializer.XavierNormal(),
        )
        self.leaky_relu = nn.LeakyReLU(negative_slope=0.1)
Ejemplo n.º 27
0
    def __init__(self, input_nc=6, ndf=64):
        super(NLayerDiscriminator, self).__init__()

        self.layers = nn.Sequential(
            nn.Conv2D(input_nc, ndf, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2), ConvBlock(ndf, ndf * 2),
            ConvBlock(ndf * 2, ndf * 4), ConvBlock(ndf * 4, ndf * 8, stride=1),
            nn.Conv2D(ndf * 8, 1, kernel_size=4, stride=1, padding=1),
            nn.Sigmoid())
Ejemplo n.º 28
0
 def __init__(self, nf=64, gc=32, bias=True):
     super(ResidualDenseBlock_5C, self).__init__()
     # gc: growth channel, i.e. intermediate channels
     self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias_attr=bias)
     self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias_attr=bias)
     self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias_attr=bias)
     self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias_attr=bias)
     self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias_attr=bias)
     self.lrelu = nn.LeakyReLU(negative_slope=0.2)
Ejemplo n.º 29
0
 def __init__(self, in_channels=3, out_channels=3, kernel_size=3, *args):
     super(Block, self).__init__()
     self.nn = nn.Sequential(
         nn.Conv1D(in_channels=in_channels,
                   out_channels=out_channels,
                   kernel_size=kernel_size,
                   padding=0,
                   bias_attr=False),
         nn.BatchNorm1D(num_features=out_channels), nn.LeakyReLU(.2),
         nn.Dropout(p=.2))
Ejemplo n.º 30
0
def get_activation(name="silu", inplace=True):
    if name == "silu":
        module = nn.Silu()
    elif name == "relu":
        module = nn.ReLU()
    elif name == "lrelu":
        module = nn.LeakyReLU(0.1)
    else:
        raise AttributeError("Unsupported act type: {}".format(name))
    return module