Пример #1
0
 def __init__(self, in_features, kernel_size, padding, **kwargs):
     super(ResBlock2d, self).__init__(**kwargs)
     self.conv1 = dygraph.Conv2D(num_channels=in_features,
                                 num_filters=in_features,
                                 filter_size=kernel_size,
                                 padding=padding)
     self.conv2 = dygraph.Conv2D(num_channels=in_features,
                                 num_filters=in_features,
                                 filter_size=kernel_size,
                                 padding=padding)
     self.norm1 = dygraph.BatchNorm(num_channels=in_features, momentum=0.1)
     self.norm2 = dygraph.BatchNorm(num_channels=in_features, momentum=0.1)
Пример #2
0
def get_norm(norm_type, channels_num):
    if norm_type == "AN":
        return AffineNormalization(channels_num)
    elif norm_type == "BN":
        return dg.BatchNorm(channels_num)
    else:
        raise NotImplementedError()
Пример #3
0
 def __init__(self, name=None, num=None):
     super(TSNResNet, self).__init__()
     self.convbn = convbn(3, 16)
     self.convpools = dygraph.Sequential(convpool(16, 32, pooling=4),
                                         convpool(32, 64, pooling=4),
                                         convpool(64, 128))
     self.fcs = dygraph.Sequential(
         dygraph.Linear(7 * 7 * 128, 1024, act='relu'),
         dygraph.BatchNorm(1024), dygraph.Dropout(0.5),
         dygraph.Linear(1024, 101, act='softmax'))
     self.seg_num = 32
Пример #4
0
def get_activation_norm_layer(num_features, norm_type, input_dim, **norm_params):
    """
    Return an activation normalization layer.
    """
    input_dim = max(input_dim, 1)
    assert input_dim == 2 or input_dim == 1, 'Only support for 2D currently'

    if norm_type == 'none' or norm_type == '':
        norm_layer = None
    elif norm_type == 'batch':
        norm_layer = dg.BatchNorm(num_features, **norm_params)
    elif norm_type == 'instance':
        affine = norm_params.pop('affine', True)
        if not affine:
            norm_params['param_attr'] = False
            norm_params['bias_attr'] = False
        norm_layer = dg.InstanceNorm(num_features, **norm_params) #affine=affine, **norm_params)
    elif norm_type == 'sync_batch':
        affine = norm_params.pop('affine', True)
        norm_layer = dg.BatchNorm(num_features, **norm_params)
        F.BuildStrategy().sync_batch_norm = True
    elif norm_type == 'layer':
        norm_layer = dg.LayerNorm(num_features, **norm_params)
    elif norm_type == 'layer_2d':
        raise NotImplementedError()
    elif norm_type == 'adaptive':
        norm_layer = AdaptiveNorm(num_features, **norm_params)
    elif norm_type == 'spatially_adaptive':
        if input_dim != 2:
            raise ValueError("Spatially adaptive normalization layers only supports 2D input")
        norm_layer = SpatiallyAdaptiveNorm(num_features, **norm_params)
    elif norm_type == 'hyper_spatially_adaptive':
        if input_dim != 2:
            raise ValueError("Spatially adaptive normalization layers only supports 2D input")
        norm_layer = HyperSpatiallyAdaptiveNorm(num_features, **norm_params)
    else:
        raise ValueError("Activation norm layer %s is not recognized" % norm_type)

    return norm_layer
Пример #5
0
 def __init__(self,
              in_features,
              out_features,
              groups=1,
              kernel_size=3,
              padding=1):
     super(SameBlock2d, self).__init__()
     self.conv = dygraph.Conv2D(num_channels=in_features,
                                num_filters=out_features,
                                filter_size=kernel_size,
                                padding=padding,
                                groups=groups)
     self.norm = dygraph.BatchNorm(out_features)
Пример #6
0
def convbn(in_channel,
           out_channel,
           padding=None,
           stride=1,
           kernel=3,
           act=None):
    if padding == None:
        padding = int((kernel - 1) / 2)
    return fluid.dygraph.Sequential(
        dygraph.Conv2D(in_channel,
                       out_channel,
                       kernel,
                       stride=stride,
                       padding=padding,
                       act=act), dygraph.BatchNorm(out_channel))
Пример #7
0
    def __init__(self):
        super(HarFcn, self).__init__()

        self.cnn1 = dy.Sequential(
            dy.Conv2D(num_channels=1,
                      num_filters=128,
                      filter_size=3,
                      stride=1,
                      padding=1),
            dy.BatchNorm(num_channels=128),
            dy.Dropout(p=.2),
        )
        self.cnn2 = dy.Sequential(
            dy.Conv2D(num_channels=128,
                      num_filters=128,
                      filter_size=3,
                      stride=1,
                      padding=1),
            dy.BatchNorm(num_channels=128),
            dy.Dropout(p=.2),
        )
        self.cnn3 = dy.Sequential(
            dy.Conv2D(num_channels=128,
                      num_filters=128,
                      filter_size=3,
                      stride=1,
                      padding=1),
            dy.BatchNorm(num_channels=128),
            dy.Dropout(p=.2),
        )

        self.cls = dy.Sequential(
            dy.Linear(input_dim=384, output_dim=128),
            dy.Dropout(p=.2),
            dy.Linear(input_dim=128, output_dim=5),
        )
Пример #8
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              stride=1,
              padding=1,
              act='relu'):
     super().__init__()
     self.conv = dg.Conv2D(num_channels=in_channels,
                           num_filters=out_channels,
                           filter_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           bias_attr=False)
     self.bn = dg.BatchNorm(num_channels=out_channels, act=act)
Пример #9
0
 def __init__(self,
              in_features,
              out_features,
              kernel_size=3,
              padding=1,
              groups=1):
     super(DownBlock2d, self).__init__()
     self.conv = dygraph.Conv2D(num_channels=in_features,
                                num_filters=out_features,
                                filter_size=kernel_size,
                                padding=padding,
                                groups=groups)
     self.norm = dygraph.BatchNorm(num_channels=out_features, momentum=0.1)
     self.pool = dygraph.Pool2D(pool_size=(2, 2),
                                pool_type='avg',
                                pool_stride=2)
Пример #10
0
def convpool(in_channel,
             out_channel,
             padding=None,
             pooling=2,
             kernel=3,
             act='relu'):
    if padding == None:
        padding = int((kernel - 1) / 2)
    layers = [
        dygraph.Conv2D(in_channel,
                       out_channel,
                       kernel,
                       padding=padding,
                       act=act),
        dygraph.BatchNorm(out_channel)
    ]
    if pooling > 1:
        layers.append(dygraph.Pool2D(pooling, pool_stride=pooling))

    return fluid.dygraph.Sequential(*layers)
Пример #11
0
 def __init__(self, n):
     super(FrozenBatchNorm2d, self).__init__()
     self.module = dg.BatchNorm(n,
                                use_global_stats=True,
                                param_attr=F.ParamAttr(learning_rate=0))
Пример #12
0
    def __init__(self, embedding_size, num_hidden, use_cudnn=True):
        """ Encoder prenet layer of TransformerTTS.

        Args:
            embedding_size (int): the size of embedding.
            num_hidden (int): the size of hidden layer in network.
            use_cudnn (bool, optional): use cudnn or not. Defaults to True.
        """
        super(EncoderPrenet, self).__init__()
        self.embedding_size = embedding_size
        self.num_hidden = num_hidden
        self.use_cudnn = use_cudnn
        self.embedding = dg.Embedding(
            size=[len(symbols), embedding_size],
            padding_idx=0,
            param_attr=fluid.initializer.Normal(
                loc=0.0, scale=1.0))
        self.conv_list = []
        k = math.sqrt(1.0 / embedding_size)
        self.conv_list.append(
            Conv1D(
                num_channels=embedding_size,
                num_filters=num_hidden,
                filter_size=5,
                padding=int(np.floor(5 / 2)),
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.XavierInitializer()),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Uniform(
                        low=-k, high=k)),
                use_cudnn=use_cudnn))
        k = math.sqrt(1.0 / num_hidden)
        for _ in range(2):
            self.conv_list.append(
                Conv1D(
                    num_channels=num_hidden,
                    num_filters=num_hidden,
                    filter_size=5,
                    padding=int(np.floor(5 / 2)),
                    param_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.XavierInitializer()),
                    bias_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Uniform(
                            low=-k, high=k)),
                    use_cudnn=use_cudnn))

        for i, layer in enumerate(self.conv_list):
            self.add_sublayer("conv_list_{}".format(i), layer)

        self.batch_norm_list = [
            dg.BatchNorm(
                num_hidden, data_layout='NCHW') for _ in range(3)
        ]

        for i, layer in enumerate(self.batch_norm_list):
            self.add_sublayer("batch_norm_list_{}".format(i), layer)

        k = math.sqrt(1.0 / num_hidden)
        self.projection = dg.Linear(
            num_hidden,
            num_hidden,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
Пример #13
0
    def __init__(self,
                 n_mels=80,
                 num_hidden=512,
                 filter_size=5,
                 padding=0,
                 num_conv=5,
                 outputs_per_step=1,
                 use_cudnn=True,
                 dropout=0.1,
                 batchnorm_last=False):
        """Decocder post conv net of TransformerTTS.

        Args:
            n_mels (int, optional): the number of mel bands when calculating mel spectrograms. Defaults to 80.
            num_hidden (int, optional): the size of hidden layer in network. Defaults to 512.
            filter_size (int, optional): the filter size of Conv.  Defaults to 5.
            padding (int, optional): the padding size of Conv. Defaults to 0.
            num_conv (int, optional): the num of Conv layers in network. Defaults to 5.
            outputs_per_step (int, optional): the num of output frames per step . Defaults to 1.
            use_cudnn (bool, optional): use cudnn in Conv or not. Defaults to True.
            dropout (float, optional): dropout probability. Defaults to 0.1.
            batchnorm_last (bool, optional): if batchnorm at last layer or not. Defaults to False.
        """
        super(PostConvNet, self).__init__()

        self.dropout = dropout
        self.num_conv = num_conv
        self.batchnorm_last = batchnorm_last
        self.conv_list = []
        k = math.sqrt(1.0 / (n_mels * outputs_per_step))
        self.conv_list.append(
            Conv1D(num_channels=n_mels * outputs_per_step,
                   num_filters=num_hidden,
                   filter_size=filter_size,
                   padding=padding,
                   param_attr=fluid.ParamAttr(
                       initializer=fluid.initializer.XavierInitializer()),
                   bias_attr=fluid.ParamAttr(
                       initializer=fluid.initializer.Uniform(low=-k, high=k)),
                   use_cudnn=use_cudnn))

        k = math.sqrt(1.0 / num_hidden)
        for _ in range(1, num_conv - 1):
            self.conv_list.append(
                Conv1D(
                    num_channels=num_hidden,
                    num_filters=num_hidden,
                    filter_size=filter_size,
                    padding=padding,
                    param_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.XavierInitializer()),
                    bias_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Uniform(low=-k, high=k)),
                    use_cudnn=use_cudnn))

        self.conv_list.append(
            Conv1D(num_channels=num_hidden,
                   num_filters=n_mels * outputs_per_step,
                   filter_size=filter_size,
                   padding=padding,
                   param_attr=fluid.ParamAttr(
                       initializer=fluid.initializer.XavierInitializer()),
                   bias_attr=fluid.ParamAttr(
                       initializer=fluid.initializer.Uniform(low=-k, high=k)),
                   use_cudnn=use_cudnn))

        for i, layer in enumerate(self.conv_list):
            self.add_sublayer("conv_list_{}".format(i), layer)

        self.batch_norm_list = [
            dg.BatchNorm(num_hidden, data_layout='NCHW')
            for _ in range(num_conv - 1)
        ]
        if self.batchnorm_last:
            self.batch_norm_list.append(
                dg.BatchNorm(n_mels * outputs_per_step, data_layout='NCHW'))
        for i, layer in enumerate(self.batch_norm_list):
            self.add_sublayer("batch_norm_list_{}".format(i), layer)
Пример #14
0
    def __init__(self,
                 hidden_size,
                 batch_size,
                 K=16,
                 projection_size=256,
                 num_gru_layers=2,
                 max_pool_kernel_size=2,
                 is_post=False):
        """CBHG Module

        Args:
            hidden_size (int): dimension of hidden unit.
            batch_size (int): batch size of input.
            K (int, optional): number of convolution banks. Defaults to 16.
            projection_size (int, optional): dimension of projection unit. Defaults to 256.
            num_gru_layers (int, optional): number of layers of GRUcell. Defaults to 2.
            max_pool_kernel_size (int, optional): max pooling kernel size. Defaults to 2
            is_post (bool, optional): whether post processing or not. Defaults to False.
        """
        super(CBHG, self).__init__()

        self.hidden_size = hidden_size
        self.projection_size = projection_size
        self.conv_list = []
        k = math.sqrt(1.0 / projection_size)
        self.conv_list.append(
            Conv1D(
                num_channels=projection_size,
                num_filters=hidden_size,
                filter_size=1,
                padding=int(np.floor(1 / 2)),
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.XavierInitializer()),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Uniform(
                        low=-k, high=k))))
        k = math.sqrt(1.0 / hidden_size)
        for i in range(2, K + 1):
            self.conv_list.append(
                Conv1D(
                    num_channels=hidden_size,
                    num_filters=hidden_size,
                    filter_size=i,
                    padding=int(np.floor(i / 2)),
                    param_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.XavierInitializer()),
                    bias_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Uniform(
                            low=-k, high=k))))

        for i, layer in enumerate(self.conv_list):
            self.add_sublayer("conv_list_{}".format(i), layer)

        self.batchnorm_list = []
        for i in range(K):
            self.batchnorm_list.append(
                dg.BatchNorm(
                    hidden_size, data_layout='NCHW'))

        for i, layer in enumerate(self.batchnorm_list):
            self.add_sublayer("batchnorm_list_{}".format(i), layer)

        conv_outdim = hidden_size * K

        k = math.sqrt(1.0 / conv_outdim)
        self.conv_projection_1 = Conv1D(
            num_channels=conv_outdim,
            num_filters=hidden_size,
            filter_size=3,
            padding=int(np.floor(3 / 2)),
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))

        k = math.sqrt(1.0 / hidden_size)
        self.conv_projection_2 = Conv1D(
            num_channels=hidden_size,
            num_filters=projection_size,
            filter_size=3,
            padding=int(np.floor(3 / 2)),
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))

        self.batchnorm_proj_1 = dg.BatchNorm(hidden_size, data_layout='NCHW')
        self.batchnorm_proj_2 = dg.BatchNorm(
            projection_size, data_layout='NCHW')
        self.max_pool = Pool1D(
            pool_size=max_pool_kernel_size,
            pool_type='max',
            pool_stride=1,
            pool_padding=1,
            data_format="NCT")
        self.highway = Highwaynet(self.projection_size)

        h_0 = np.zeros((batch_size, hidden_size // 2), dtype="float32")
        h_0 = dg.to_variable(h_0)
        k = math.sqrt(1.0 / hidden_size)
        self.fc_forward1 = dg.Linear(
            hidden_size,
            hidden_size // 2 * 3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.fc_reverse1 = dg.Linear(
            hidden_size,
            hidden_size // 2 * 3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.gru_forward1 = DynamicGRU(
            size=self.hidden_size // 2,
            is_reverse=False,
            origin_mode=True,
            h_0=h_0)
        self.gru_reverse1 = DynamicGRU(
            size=self.hidden_size // 2,
            is_reverse=True,
            origin_mode=True,
            h_0=h_0)

        self.fc_forward2 = dg.Linear(
            hidden_size,
            hidden_size // 2 * 3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.fc_reverse2 = dg.Linear(
            hidden_size,
            hidden_size // 2 * 3,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.XavierInitializer()),
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-k, high=k)))
        self.gru_forward2 = DynamicGRU(
            size=self.hidden_size // 2,
            is_reverse=False,
            origin_mode=True,
            h_0=h_0)
        self.gru_reverse2 = DynamicGRU(
            size=self.hidden_size // 2,
            is_reverse=True,
            origin_mode=True,
            h_0=h_0)