Esempio n. 1
0
def get_encoder(
    model_name,
    args,
    trainable: bool = True,
    prefix=None,
):
    MODEL_PATH = model_name
    if prefix is not None:
        MODEL_PATH = os.path.join(prefix, model_name)
    if model_name in {'bert-base-uncased', 'NlpHUST/vibert4news-base-cased'}:
        encoder = TFBertModel.from_pretrained(MODEL_PATH, trainable=trainable)
    elif model_name.find('bigbird') > -1:
        encoder = modeling.BertModel({
            "attention_probs_dropout_prob": 0.1,
            "hidden_act": "gelu",
            "hidden_dropout_prob": 0.1,
            "hidden_size": 768,
            "initializer_range": 0.02,
            "intermediate_size": 3072,
            "max_position_embeddings": 4096,
            "max_encoder_length": args.max_context_length,
            "num_attention_heads": 12,
            "num_hidden_layers": 12,
            "type_vocab_size": 2,
            "use_bias": True,
            "rescale_embedding": False,
            "use_gradient_checkpointing": False,
            "scope": "bert",
            "attention_type": "block_sparse",
            "norm_type": "postnorm",
            "block_size": 16,
            "num_rand_blocks": 3,
            "vocab_size": 50358
        })
        checkpoint_path = 'gs://bigbird-transformer/pretrain/bigbr_base/model.ckpt-0'
        checkpoint_reader = tf.compat.v1.train.NewCheckpointReader(
            checkpoint_path)
        encoder.set_weights([
            checkpoint_reader.get_tensor(v.name[:-2])
            for v in encoder.trainable_weights
        ])
        encoder.trainable = True
    else:
        raise Exception("Model {} not supported".format(model_name))

    if not args.use_pooler:
        if model_name in {
                'bert-base-uncased', 'NlpHUST/vibert4news-base-cased'
        }:
            encoder.bert.pooler.trainable = False
        elif model_name == 'bigbird':
            encoder.pooler.trainable = False

    return encoder
Esempio n. 2
0
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        model = modeling.BertModel(bert_config)
        masked_lm = MaskedLMLayer(bert_config["hidden_size"],
                                  bert_config["vocab_size"],
                                  model.embeder,
                                  initializer=utils.create_initializer(
                                      bert_config["initializer_range"]),
                                  activation_fn=utils.get_activation(
                                      bert_config["hidden_act"]))
        next_sentence = NSPLayer(bert_config["hidden_size"],
                                 initializer=utils.create_initializer(
                                     bert_config["initializer_range"]))

        sequence_output, pooled_output = model(
            features["input_ids"],
            training=is_training,
            token_type_ids=features.get("segment_ids"))

        masked_lm_loss, masked_lm_log_probs = masked_lm(
            sequence_output,
            label_ids=features.get("masked_lm_ids"),
            label_weights=features.get("masked_lm_weights"),
            masked_lm_positions=features.get("masked_lm_positions"))

        next_sentence_loss, next_sentence_log_probs = next_sentence(
            pooled_output, features.get("next_sentence_labels"))

        total_loss = masked_lm_loss
        if bert_config["use_nsp"]:
            total_loss += next_sentence_loss

        tvars = tf.compat.v1.trainable_variables()
        utils.log_variables(tvars, bert_config["ckpt_var_list"])

        output_spec = None
        if mode == tf.estimator.ModeKeys.TRAIN:

            learning_rate = optimization.get_linear_warmup_linear_decay_lr(
                init_lr=bert_config["learning_rate"],
                num_train_steps=bert_config["num_train_steps"],
                num_warmup_steps=bert_config["num_warmup_steps"])

            optimizer = optimization.get_optimizer(bert_config, learning_rate)

            global_step = tf.compat.v1.train.get_global_step()

            gradients = optimizer.compute_gradients(total_loss, tvars)
            train_op = optimizer.apply_gradients(gradients,
                                                 global_step=global_step)

            output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=total_loss,
                train_op=train_op,
                host_call=utils.add_scalars_to_summary(
                    bert_config["output_dir"],
                    {"learning_rate": learning_rate}))

        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(masked_lm_loss_value, masked_lm_log_probs,
                          masked_lm_ids, masked_lm_weights,
                          next_sentence_loss_value, next_sentence_log_probs,
                          next_sentence_labels):
                """Computes the loss and accuracy of the model."""
                masked_lm_predictions = tf.argmax(masked_lm_log_probs,
                                                  axis=-1,
                                                  output_type=tf.int32)
                masked_lm_accuracy = tf.compat.v1.metrics.accuracy(
                    labels=masked_lm_ids,
                    predictions=masked_lm_predictions,
                    weights=masked_lm_weights)
                masked_lm_mean_loss = tf.compat.v1.metrics.mean(
                    values=masked_lm_loss_value)

                next_sentence_predictions = tf.argmax(next_sentence_log_probs,
                                                      axis=-1,
                                                      output_type=tf.int32)
                next_sentence_accuracy = tf.compat.v1.metrics.accuracy(
                    labels=next_sentence_labels,
                    predictions=next_sentence_predictions)
                next_sentence_mean_loss = tf.compat.v1.metrics.mean(
                    values=next_sentence_loss_value)

                return {
                    "masked_lm_accuracy": masked_lm_accuracy,
                    "masked_lm_loss": masked_lm_mean_loss,
                    "next_sentence_accuracy": next_sentence_accuracy,
                    "next_sentence_loss": next_sentence_mean_loss,
                }

            eval_metrics = (metric_fn, [
                masked_lm_loss, masked_lm_log_probs, features["masked_lm_ids"],
                features["masked_lm_weights"], next_sentence_loss,
                next_sentence_log_probs, features["next_sentence_labels"]
            ])
            output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
                mode=mode, loss=total_loss, eval_metrics=eval_metrics)
        else:

            output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
                mode=mode,
                predictions={
                    "log-probabilities": masked_lm_log_probs,
                    "seq-embeddings": sequence_output
                })

        return output_spec
Esempio n. 3
0
  def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
    """The `model_fn` for TPUEstimator."""

    if isinstance(features, dict):
      if not labels and "labels" in features:
        labels = features["labels"]
      features = features["input_ids"]

    is_training = (mode == tf.estimator.ModeKeys.TRAIN)

    model = modeling.BertModel(bert_config)
    headl = ClassifierLossLayer(
        bert_config["num_labels"], bert_config["hidden_dropout_prob"],
        utils.create_initializer(bert_config["initializer_range"]),
        name=bert_config["scope"]+"/classifier")

    _, pooled_output = model(features, training=is_training)
    total_loss, log_probs = headl(pooled_output, labels, is_training)

    tvars = tf.compat.v1.trainable_variables()
    utils.log_variables(tvars, bert_config["ckpt_var_list"])

    output_spec = None
    if mode == tf.estimator.ModeKeys.TRAIN:

      learning_rate = optimization.get_linear_warmup_linear_decay_lr(
          init_lr=bert_config["learning_rate"],
          num_train_steps=bert_config["num_train_steps"],
          num_warmup_steps=bert_config["num_warmup_steps"])

      optimizer = optimization.get_optimizer(bert_config, learning_rate)

      global_step = tf.compat.v1.train.get_or_create_global_step()

      gradients = optimizer.compute_gradients(total_loss, tvars)
      train_op = optimizer.apply_gradients(gradients, global_step=global_step)

      output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          train_op=train_op,
          host_call=utils.add_scalars_to_summary(
              bert_config["output_dir"], {"learning_rate": learning_rate}))

    elif mode == tf.estimator.ModeKeys.EVAL:

      def metric_fn(loss_value, label_ids, log_probs):
        loss = tf.compat.v1.metrics.mean(values=loss_value)

        predictions = tf.argmax(log_probs, axis=-1, output_type=tf.int32)
        accuracy = tf.compat.v1.metrics.accuracy(
            labels=label_ids, predictions=predictions)
        p1, p1_op = tf.compat.v1.metrics.precision_at_k(
            labels=tf.cast(label_ids, tf.int64), predictions=log_probs, k=1)
        r1, r1_op = tf.compat.v1.metrics.recall_at_k(
            labels=tf.cast(label_ids, tf.int64), predictions=log_probs, k=1)
        f11 = tf.math.divide_no_nan(2*p1*r1, p1+r1)

        metric_dict = {
            "P@1": (p1, p1_op),
            "R@1": (r1, r1_op),
            "f1@1": (f11, tf.no_op()),
            "classification_accuracy": accuracy,
            "classification_loss": loss,
        }

        return metric_dict

      eval_metrics = (metric_fn,
                      [tf.expand_dims(total_loss, 0), labels, log_probs])
      output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          eval_metrics=eval_metrics)
    else:
      output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
          mode=mode,
          predictions={"log-probabilities": log_probs})

    return output_spec
Esempio n. 4
0
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        # BigBird model  정의
        model = modeling.BertModel(bert_config,
                                   features["input_ids"],
                                   training=is_training,
                                   token_type_ids=features.get("segment_ids"))
        # attention feature와 cls token에 대한 pooling feature를 가져옴
        sequence_output, pooled_output = model.get_output_feature()

        masked_lm = MaskedLMLayer(  # masked language output 계산 모델 정의
            bert_config["hidden_size"],
            bert_config["vocab_size"],
            model.embeder,
            input_tensor=sequence_output,
            label_ids=features.get("masked_lm_ids"),
            label_weights=features.get("masked_lm_weights"),
            masked_lm_positions=features.get("masked_lm_positions"),
            initializer=utils.create_initializer(
                bert_config["initializer_range"]),
            activation_fn=utils.get_activation(bert_config["hidden_act"]))

        masked_lm_loss, masked_lm_log_probs = masked_lm.get_mlm_loss()

        total_loss = masked_lm_loss

        tvars = tf.compat.v1.trainable_variables()
        utils.LogVariable(tvars, bert_config["ckpt_var_list"])

        output_spec = None
        if mode == tf.estimator.ModeKeys.TRAIN:
            # optimize 계산
            opt_model = optimization.LinearWarmupLinearDecay(  # optimize model 불러옴
                init_lr=bert_config["learning_rate"],
                num_train_steps=bert_config["num_train_steps"],
                num_warmup_steps=bert_config["num_warmup_steps"])
            learning_rate = opt_model.get_learning_rate()  # laernin rate 가져옴

            optimizer = optimization.Optimizer(bert_config, learning_rate)
            optimizer = optimizer.get_optimizer()

            global_step = tf.compat.v1.train.get_global_step()

            gradients = optimizer.compute_gradients(total_loss, tvars)
            train_op = optimizer.apply_gradients(gradients,
                                                 global_step=global_step)
            logging_hook = [
                tf.compat.v1.train.LoggingTensorHook(
                    {"loss is -> ": total_loss}, every_n_iter=256),
                tf.compat.v1.train.LoggingTensorHook(
                    {"global step -> ": global_step}, every_n_iter=256),
                tf.compat.v1.train.LoggingTensorHook(
                    {"learning rate -> ": learning_rate}, every_n_iter=256)
            ]

            output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=total_loss,
                train_op=train_op,
                training_hooks=logging_hook,
                host_call=utils.add_scalars_to_summary(
                    bert_config["output_dir"],
                    {"learning_rate": learning_rate}))

        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(masked_lm_loss_value, masked_lm_log_probs,
                          masked_lm_ids, masked_lm_weights):

                masked_lm_predictions = tf.argmax(masked_lm_log_probs,
                                                  axis=-1,
                                                  output_type=tf.int32)
                masked_lm_accuracy = tf.compat.v1.metrics.accuracy(
                    labels=masked_lm_ids,
                    predictions=masked_lm_predictions,
                    weights=masked_lm_weights)
                masked_lm_mean_loss = tf.compat.v1.metrics.mean(
                    values=masked_lm_loss_value)

                return {
                    "masked_lm_accuracy": masked_lm_accuracy,
                    "masked_lm_loss": masked_lm_mean_loss,
                }

            eval_metrics = (metric_fn, [
                masked_lm_loss, masked_lm_log_probs, features["masked_lm_ids"],
                features["masked_lm_weights"]
            ])

            output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
                mode=mode, loss=total_loss, eval_metrics=eval_metrics)
        else:
            output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
                mode=mode,
                predictions={
                    "log-probabilities": masked_lm_log_probs,
                    "seq-embeddings": sequence_output
                })

        return output_spec