(predicted_labels, log_probs) = create_model(
                bert_config, is_training, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings)

            predictions = {
                'probabilities': log_probs,
                'labels': predicted_labels
            }
            return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # Return the actual model function in the closure
    return model_fn


if __name__ == "__main__":
    label_list = [int(i) for i in hp.label_list.split(",")]
    train_features, test_features = process_data(hp)

    """ Start train """
    # Compute # train and warmup steps from batch size
    num_train_steps = int(len(train_features) / hp.BATCH_SIZE * hp.NUM_TRAIN_EPOCHS)
    num_warmup_steps = int(num_train_steps * hp.WARMUP_PROPORTION)

    # Specify output directory and number of checkpoint steps to save
    run_config = tf.estimator.RunConfig(model_dir=hp.OUTPUT_DIR, save_summary_steps=hp.SAVE_SUMMARY_STEPS, save_checkpoints_steps=hp.SAVE_CHECKPOINTS_STEPS)
    bert_config = modeling.BertConfig.from_json_file(hp.BERT_CONFIG)
    model_fn = model_fn_builder(bert_config=bert_config, num_labels=len(label_list), learning_rate=hp.LEARNING_RATE, num_train_steps=num_train_steps,
      num_warmup_steps=num_warmup_steps, use_one_hot_embeddings=False)

    estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config, params={"batch_size": hp.BATCH_SIZE})

    # Create an input function for training. drop_remainder = True for using TPUs.
Example #2
0
    global_step = tf.train.get_or_create_global_step()
    tf.summary.scalar("loss", loss)
    tf.summary.scalar("global_step", global_step)

    accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
    eval_metrics = {"eval_accuracy": accuracy}

    tf.summary.scalar("eval_accuracy", accuracy[1])

    summaries = tf.summary.merge_all()
    return loss, train_op, global_step, summaries, eval_metrics, label_ids, predicted_labels


if __name__ == "__main__":
    label_list = [int(i) for i in hp.label_list.split(",")]
    train_features, eval_features = process_data(hp)

    # Compute # train and warmup steps from batch size
    num_train_steps = int(
        len(train_features) / hp.BATCH_SIZE * hp.NUM_TRAIN_EPOCHS)
    num_warmup_steps = int(num_train_steps * hp.WARMUP_PROPORTION)
    num_eval_batches = len(eval_features) // hp.BATCH_SIZE + int(
        len(eval_features) % hp.BATCH_SIZE != 0)

    # Create an input function for training. drop_remainder = True for using TPUs.
    train_input_fn = run_classifier.input_fn_builder(
        features=train_features,
        seq_length=hp.MAX_SEQ_LENGTH,
        is_training=True,
        drop_remainder=False)