Example #1
0
def decoder(vocab_size, num_layers, units, d_model, num_heads, dropout, name="decoder"):
    """
    transformer的decoder,使用函数式API进行编写,实现了
    模型层内部的一系列操作,相关的一些变量的时候基本和上面
    的encoder差不多,这里不多说
    :param vocab_size:token大小
    :param num_layers:编码解码的层数量
    :param units:单元大小
    :param d_model:深度
    :param num_heads:多头注意力的头部层数量
    :param dropout:dropout的权重
    :param name:
    :return:
    """
    inputs = tf.keras.Input(shape=(None,), name="inputs")
    enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs")
    look_ahead_mask = tf.keras.Input(shape=(1, None, None), name="look_ahead_mask")
    padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')

    embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
    embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
    embeddings = layers.PositionalEncoding(vocab_size, d_model)(embeddings)

    outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)

    for i in range(num_layers):
        outputs = layers.transformer_decoder_layer(
            units=units, d_model=d_model, num_heads=num_heads,
            dropout=dropout, name="transformer_decoder_layer_{}".format(i),
        )(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])

    return tf.keras.Model(inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
                          outputs=outputs, name=name)
Example #2
0
def encoder(vocab_size,
            num_layers,
            units,
            d_model,
            num_heads,
            dropout,
            name="encoder"):
    """
    transformer的encoder,使用函数式API进行编写,实现了
    模型层内部的一系列操作,num_layers决定了使用多少个
    encoder_layer层,更具Transformer架构里面的描述,可以根据
    效果进行调整,在encoder中还进行了位置编码,具体原理自行翻阅
    资料,就是实现公式的问题,这里就不多做注释了
    :param vocab_size:token大小
    :param num_layers:编码解码的数量
    :param units:单元大小
    :param d_model:深度
    :param num_heads:多头注意力的头部层数量
    :param dropout:dropout的权重
    :param name:
    :return: Model(inputs=[inputs, padding_mask], outputs=outputs)
    """
    inputs = tf.keras.Input(shape=(None, ), name="inputs")
    padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
    embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
    embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
    embeddings = layers.PositionalEncoding(vocab_size, d_model)(embeddings)

    outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)

    # 这里layer使用的name是为了调试的时候答应信息方便查看,也可以不写
    for i in range(num_layers):
        outputs = layers.transformer_encoder_layer(
            units=units,
            d_model=d_model,
            num_heads=num_heads,
            dropout=dropout,
            name="transformer_encoder_layer_{}".format(i),
        )([outputs, padding_mask])

    return tf.keras.Model(inputs=[inputs, padding_mask],
                          outputs=outputs,
                          name=name)