Exemplo n.º 1
0
    def test_google_weights(self):
        albert_model_name = "albert_base"
        albert_dir = bert.fetch_tfhub_albert_model(albert_model_name,
                                                   ".models")

        albert_params = bert.albert_params(albert_model_name)
        l_bert = bert.BertModelLayer.from_params(albert_params, name="albert")

        l_input_ids = keras.layers.Input(shape=(128, ),
                                         dtype='int32',
                                         name="input_ids")
        l_token_type_ids = keras.layers.Input(shape=(128, ),
                                              dtype='int32',
                                              name="token_type_ids")
        output = l_bert([l_input_ids, l_token_type_ids])
        output = keras.layers.Lambda(lambda x: x[:, 0, :])(output)
        output = keras.layers.Dense(2)(output)
        model = keras.Model(inputs=[l_input_ids, l_token_type_ids],
                            outputs=output)

        model.build(input_shape=(None, 128))
        model.compile(
            optimizer=keras.optimizers.Adam(),
            loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
            metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")])

        for weight in l_bert.weights:
            print(weight.name)

        bert.load_albert_weights(l_bert, albert_dir)

        model.summary()
Exemplo n.º 2
0
    def test_albert_load_base_google_weights(self):  # for coverage mainly
        albert_model_name = "albert_base"
        albert_dir = bert.fetch_tfhub_albert_model(albert_model_name,
                                                   ".models")
        model_params = bert.albert_params(albert_model_name)

        l_bert = bert.BertModelLayer.from_params(model_params, name="albert")

        model = keras.models.Sequential([
            keras.layers.InputLayer(input_shape=(8, ),
                                    dtype=tf.int32,
                                    name="input_ids"),
            l_bert,
            keras.layers.Lambda(lambda x: x[:, 0, :]),
            keras.layers.Dense(2),
        ])
        model.build(input_shape=(None, 8))
        model.compile(
            optimizer=keras.optimizers.Adam(),
            loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
            metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")])

        bert.load_albert_weights(l_bert, albert_dir)

        model.summary()
Exemplo n.º 3
0
    def test_albert_google_weights(self):
        albert_model_name = "albert_base"
        albert_dir = bert.fetch_tfhub_albert_model(albert_model_name, ".models")

        albert_params = bert.albert_params(albert_model_name)
        model, l_bert = self.build_model(albert_params)

        skipped_weight_value_tuples = bert.load_albert_weights(l_bert, albert_dir)
        self.assertEqual(0, len(skipped_weight_value_tuples))
        model.summary()
Exemplo n.º 4
0
    def test_albert_params(self):
        albert_model_name = "albert_base"
        albert_dir = bert.fetch_tfhub_albert_model(albert_model_name,
                                                   ".models")
        dir_params = bert.albert_params(albert_dir)
        dir_params.attention_dropout = 0.1  # diff between README and assets/albert_config.json
        dir_params.hidden_dropout = 0.1
        name_params = bert.albert_params(albert_model_name)
        self.assertEqual(name_params, dir_params)

        # coverage
        model_params = dir_params
        model_params.vocab_size = model_params.vocab_size + 2
        model_params.adapter_size = 1
        l_bert = bert.BertModelLayer.from_params(model_params, name="albert")
        l_bert(tf.zeros((1, 128)))
        bert.load_albert_weights(l_bert, albert_dir)
Exemplo n.º 5
0
def Albert_model(max_seq_len):
    model_name = "albert_large"
    model_dir = bert.fetch_tfhub_albert_model(model_name, ".models")
    model_params = bert.albert_params(model_name)
    model_params.shared_layer = True
    model_params.embedding_size = 1024

    l_bert = bert.BertModelLayer.from_params(model_params, name="albert")

    l_input_ids = keras.layers.Input(shape=(max_seq_len, ), dtype='int32')

    # using the default token_type/segment id 0
    output = l_bert(l_input_ids)                              # output: [batch_size, max_seq_len, hidden_size]
    output = keras.layers.GlobalAveragePooling1D()(output)
    model = keras.Model(inputs=l_input_ids, outputs=output)
    model.build(input_shape=(None, max_seq_len))
    # use in a Keras Model here, and call model.build()
    bert.load_albert_weights(l_bert, model_dir)       # should be called after model.build()
    return model, model_dir
Exemplo n.º 6
0
def load_bert_model(name_model, max_seq_len, trainable=False):
    """
    models name supported, same as tf-2.0-bert
    """
    model_name = name_model
    model_dir = bert.fetch_tfhub_albert_model(model_name, ".models")
    model_params = bert.albert_params(model_name)

    l_bert = bert.BertModelLayer.from_params(model_params, name=name_model)

    l_input_ids = tf.keras.layers.Input(shape=(max_seq_len, ), dtype='int32')

    output = l_bert(
        l_input_ids)  # output: [batch_size, max_seq_len, hidden_size]

    model = tf.keras.Model(inputs=l_input_ids, outputs=output)
    model.build(input_shape=(None, max_seq_len))

    # load google albert original weights after the build
    bert.load_albert_weights(l_bert, model_dir)
    model.trainable = trainable

    return model
Exemplo n.º 7
0
def build_transformer(transformer, max_seq_length=None, num_labels=None, tagging=True, tokenizer_only=False):
    spm_model_file = None
    if transformer in zh_albert_models_google:
        from bert.tokenization.albert_tokenization import FullTokenizer
        model_url = zh_albert_models_google[transformer]
        albert = True
    elif transformer in albert_models_tfhub:
        from edparser.layers.transformers.albert_tokenization import FullTokenizer
        with stdout_redirected(to=os.devnull):
            model_url = fetch_tfhub_albert_model(transformer,
                                                 os.path.join(hanlp_home(), 'thirdparty', 'tfhub.dev', 'google',
                                                              transformer))
        albert = True
        spm_model_file = glob.glob(os.path.join(model_url, 'assets', '*.model'))
        assert len(spm_model_file) == 1, 'No vocab found or unambiguous vocabs found'
        spm_model_file = spm_model_file[0]
    elif transformer in bert_models_google:
        from bert.tokenization.bert_tokenization import FullTokenizer
        model_url = bert_models_google[transformer]
        albert = False
    else:
        raise ValueError(
            f'Unknown model {transformer}, available ones: {list(bert_models_google.keys()) + list(zh_albert_models_google.keys()) + list(albert_models_tfhub.keys())}')
    bert_dir = get_resource(model_url)
    if spm_model_file:
        vocab = glob.glob(os.path.join(bert_dir, 'assets', '*.vocab'))
    else:
        vocab = glob.glob(os.path.join(bert_dir, '*vocab*.txt'))
    assert len(vocab) == 1, 'No vocab found or unambiguous vocabs found'
    vocab = vocab[0]
    lower_case = any(key in transformer for key in ['uncased', 'multilingual', 'chinese', 'albert'])
    if spm_model_file:
        # noinspection PyTypeChecker
        tokenizer = FullTokenizer(vocab_file=vocab, spm_model_file=spm_model_file, do_lower_case=lower_case)
    else:
        tokenizer = FullTokenizer(vocab_file=vocab, do_lower_case=lower_case)
    if tokenizer_only:
        return tokenizer
    if spm_model_file:
        bert_params = albert_params(bert_dir)
    else:
        bert_params = bert.params_from_pretrained_ckpt(bert_dir)
    l_bert = bert.BertModelLayer.from_params(bert_params, name='albert' if albert else "bert")
    if not max_seq_length:
        return l_bert, tokenizer, bert_dir
    l_input_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name="input_ids")
    l_mask_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name="mask_ids")
    l_token_type_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name="token_type_ids")
    output = l_bert([l_input_ids, l_token_type_ids], mask=l_mask_ids)
    if not tagging:
        output = tf.keras.layers.Lambda(lambda seq: seq[:, 0, :])(output)
    if bert_params.hidden_dropout:
        output = tf.keras.layers.Dropout(bert_params.hidden_dropout, name='hidden_dropout')(output)
    logits = tf.keras.layers.Dense(num_labels, kernel_initializer=tf.keras.initializers.TruncatedNormal(
        bert_params.initializer_range))(output)
    model = tf.keras.Model(inputs=[l_input_ids, l_mask_ids, l_token_type_ids], outputs=logits)
    model.build(input_shape=(None, max_seq_length))
    if not spm_model_file:
        ckpt = glob.glob(os.path.join(bert_dir, '*.index'))
        assert ckpt, f'No checkpoint found under {bert_dir}'
        ckpt, _ = os.path.splitext(ckpt[0])
    with stdout_redirected(to=os.devnull):
        if albert:
            if spm_model_file:
                skipped_weight_value_tuples = bert.load_albert_weights(l_bert, bert_dir)
            else:
                # noinspection PyUnboundLocalVariable
                skipped_weight_value_tuples = load_stock_weights(l_bert, ckpt)
        else:
            # noinspection PyUnboundLocalVariable
            skipped_weight_value_tuples = bert.load_bert_weights(l_bert, ckpt)
    assert 0 == len(skipped_weight_value_tuples), f'failed to load pretrained {transformer}'
    return model, tokenizer
Exemplo n.º 8
0
from datetime import datetime

import bert
from bert.tokenization import FullTokenizer
from tensorflow import keras

from helper import create_learn_rate_scheduler, f1_score

MAX_SEQ_LEN = 128
ADAPTER_SIZE = None  # Use None for Fine-Tuning
MODEL_NAME = "albert_base"
MODEL_URL = 'https://tfhub.dev/google/albert_base/2?tf-hub-format=compressed'
CHECKPOINT_DIR = 'checkpoints'
MODEL_DIR = bert.fetch_tfhub_albert_model(MODEL_NAME, CHECKPOINT_DIR)

LOG_DIR = ".log/" + datetime.now().strftime("%Y%m%d-%H%M%s")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=LOG_DIR)


def flatten_layers(root_layer):
    if isinstance(root_layer, keras.layers.Layer):
        yield root_layer
    for layer in root_layer._layers:
        yield from flatten_layers(layer)


def freeze_layers(root_layer, exclude=None):
    exclude = [] if exclude is None else exclude
    root_layer.trainable = False
    for layer in flatten_layers(root_layer):
        if layer.name in exclude: