コード例 #1
0
def feed_forward_block(FeedForward_layers, x, dropout=0.0, l=1., L=1.):
    residual = x
    x = BatchNormalization()(x)
    x = Dropout(dropout)(x)
    x = FeedForward_layers[0](x)
    x = FeedForward_layers[1](x)
    x = LayerDropout(dropout * (l / L))([x, residual])
    return x
コード例 #2
0
def attention_block(attention_layer, x, seq_len, dropout=0.0, l=1., L=1.):
    residual = x
    x = BatchNormalization()(x)
    x = Dropout(dropout)(x)
    x1 = attention_layer[0](x)
    x2 = attention_layer[1](x)
    x = attention_layer[2]([x1, x2, seq_len])
    x = LayerDropout(dropout * (l / L))([x, residual])
    return x
コード例 #3
0
def conv_block(conv_layers, x, num_conv=4, dropout=0.0, l=1., L=1.):
    x = Lambda(lambda v: K.expand_dims(v, axis=2))(x)
    for i in range(num_conv):
        residual = x
        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)
        x = conv_layers[i][0](x)
        x = conv_layers[i][1](x)
        x = LayerDropout(dropout * (l / L))([x, residual])
    x = Lambda(lambda v: tf.squeeze(v, axis=2))(x)
    return x