Ejemplo n.º 1
0
    def __init__(self):

        self.bert_config = run_classifier.modeling.BertConfig.from_json_file(
            bert_config_file)
        self.tokenizer = tokenization.FullTokenizer(
            vocab_file=vocab_file, do_lower_case=is_lower_case)
        self.is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2

        self.run_config = tf.contrib.tpu.RunConfig(
            cluster=tpu_cluster_resolver,
            model_dir=OUTPUT_DIR,
            save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
            tpu_config=tf.contrib.tpu.TPUConfig(
                iterations_per_loop=ITERATIONS_PER_LOOP,
                num_shards=NUM_TPU_CORES,
                per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.
                PER_HOST_V2))

        self.model_fn = run_classifier.model_fn_builder(
            bert_config=self.bert_config,
            num_labels=len(label_list),
            init_checkpoint=init_checkpoint,
            learning_rate=5e-5,
            num_train_steps=None,
            num_warmup_steps=None,
            use_tpu=False,
            use_one_hot_embeddings=False)

        self.estimator = tf.contrib.tpu.TPUEstimator(
            use_tpu=False,
            model_fn=self.model_fn,
            config=self.run_config,
            train_batch_size=BATCH_SIZE,
            eval_batch_size=BATCH_SIZE,
            predict_batch_size=BATCH_SIZE)
Ejemplo n.º 2
0
def get_tpu_estimator(model_dir, train_epochs, size_train_set, batch_size,
                      num_labels, classifier_model_fn):

    bert_pretrained_dir = f'{model_dir}/uncased_L-12_H-768_A-12'
    config_file = os.path.join(bert_pretrained_dir, 'bert_config.json')
    init_checkpoint = os.path.join(bert_pretrained_dir, 'bert_model.ckpt')

    tpu_cluster_resolver = None

    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        model_dir=model_dir,
        save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=ITERATIONS_PER_LOOP,
            num_shards=NUM_TPU_CORES,
            per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.
            PER_HOST_V2))

    num_train_steps = int(size_train_set / batch_size * train_epochs)

    num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)

    model_fn = run_classifier.model_fn_builder(
        bert_config=modeling.BertConfig.from_json_file(config_file),
        num_labels=num_labels,
        init_checkpoint=init_checkpoint,
        learning_rate=LEARNING_RATE,
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=
        False,  # If False training will fall on CPU or GPU, depending on what is available
        use_one_hot_embeddings=True,
        classifier_model_fn=classifier_model_fn)

    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=
        False,  # If False training will fall on CPU or GPU, depending on what is available
        model_fn=model_fn,
        config=run_config,
        train_batch_size=batch_size,
        eval_batch_size=batch_size)

    return estimator
Ejemplo n.º 3
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    processors = {
        "ylilauta": YlilautaProcessor,
        "yle": YleProcessor,
    }

    tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                  FLAGS.init_checkpoint)

    if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
        raise ValueError(
            "At least one of `do_train`, `do_eval` or `do_predict' must be True."
        )

    bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)

    if FLAGS.max_seq_length > bert_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, bert_config.max_position_embeddings))

    tf.gfile.MakeDirs(FLAGS.output_dir)

    task_name = FLAGS.task_name.lower()

    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()

    label_list = processor.get_labels()

    tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file,
                                           do_lower_case=FLAGS.do_lower_case)

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    train_examples = None
    num_train_steps = None
    num_warmup_steps = None
    if FLAGS.do_train:
        train_examples = processor.get_train_examples(FLAGS.data_dir)
        num_train_steps = int(
            len(train_examples) / FLAGS.train_batch_size *
            FLAGS.num_train_epochs)
        num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)

    model_fn = model_fn_builder(bert_config=bert_config,
                                num_labels=len(label_list),
                                init_checkpoint=FLAGS.init_checkpoint,
                                learning_rate=FLAGS.learning_rate,
                                num_train_steps=num_train_steps,
                                num_warmup_steps=num_warmup_steps,
                                use_tpu=FLAGS.use_tpu,
                                use_one_hot_embeddings=FLAGS.use_tpu)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=FLAGS.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        predict_batch_size=FLAGS.predict_batch_size)

    if FLAGS.do_train:
        train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
        file_based_convert_examples_to_features(train_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenizer, train_file)
        tf.logging.info("***** Running training *****")
        tf.logging.info("  Num examples = %d", len(train_examples))
        tf.logging.info("  Batch size = %d", FLAGS.train_batch_size)
        tf.logging.info("  Num steps = %d", num_train_steps)
        train_input_fn = file_based_input_fn_builder(
            input_file=train_file,
            seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    if FLAGS.do_eval:
        eval_examples = processor.get_dev_examples(FLAGS.data_dir)
        num_actual_eval_examples = len(eval_examples)
        if FLAGS.use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the number
            # of examples must be a multiple of the batch size, or else examples
            # will get dropped. So we pad with fake examples which are ignored
            # later on. These do NOT count towards the metric (all tf.metrics
            # support a per-instance weight, and these get a weight of 0.0).
            while len(eval_examples) % FLAGS.eval_batch_size != 0:
                eval_examples.append(PaddingInputExample())

        eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
        file_based_convert_examples_to_features(eval_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenizer, eval_file)

        tf.logging.info("***** Running evaluation *****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",
                        len(eval_examples), num_actual_eval_examples,
                        len(eval_examples) - num_actual_eval_examples)
        tf.logging.info("  Batch size = %d", FLAGS.eval_batch_size)

        # This tells the estimator to run through the entire set.
        eval_steps = None
        # However, if running eval on the TPU, you will need to specify the
        # number of steps.
        if FLAGS.use_tpu:
            assert len(eval_examples) % FLAGS.eval_batch_size == 0
            eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)

        eval_drop_remainder = True if FLAGS.use_tpu else False
        eval_input_fn = file_based_input_fn_builder(
            input_file=eval_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=eval_drop_remainder)

        result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)

        output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
        with tf.gfile.GFile(output_eval_file, "w") as writer:
            tf.logging.info("***** Eval results *****")
            for key in sorted(result.keys()):
                tf.logging.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))
                # output hyperparameter values and results
                print('\t'.join([
                    str(i) for i in [
                        'DEV-RESULT', 'init_checkpoint', FLAGS.init_checkpoint,
                        'data_dir', FLAGS.data_dir, 'max_seq_length',
                        FLAGS.max_seq_length, 'batch_size',
                        FLAGS.train_batch_size, 'learning_rate',
                        FLAGS.learning_rate, 'num_train_epochs',
                        FLAGS.num_train_epochs, key, result[key]
                    ]
                ]))

    if FLAGS.do_predict:
        predict_examples = processor.get_test_examples(FLAGS.data_dir)
        num_actual_predict_examples = len(predict_examples)
        if FLAGS.use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the number
            # of examples must be a multiple of the batch size, or else examples
            # will get dropped. So we pad with fake examples which are ignored
            # later on.
            while len(predict_examples) % FLAGS.predict_batch_size != 0:
                predict_examples.append(PaddingInputExample())

        predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
        file_based_convert_examples_to_features(predict_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenizer, predict_file)

        tf.logging.info("***** Running prediction*****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",
                        len(predict_examples), num_actual_predict_examples,
                        len(predict_examples) - num_actual_predict_examples)
        tf.logging.info("  Batch size = %d", FLAGS.predict_batch_size)

        predict_drop_remainder = True if FLAGS.use_tpu else False
        predict_input_fn = file_based_input_fn_builder(
            input_file=predict_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=predict_drop_remainder)

        result = estimator.predict(input_fn=predict_input_fn)

        output_predict_file = os.path.join(FLAGS.output_dir,
                                           "test_results.tsv")
        with tf.gfile.GFile(output_predict_file, "w") as writer:
            num_written_lines = 0
            tf.logging.info("***** Predict results *****")
            for (i, prediction) in enumerate(result):
                probabilities = prediction["probabilities"]
                if i >= num_actual_predict_examples:
                    break
                output_line = "\t".join(
                    str(class_probability)
                    for class_probability in probabilities) + "\n"
                writer.write(output_line)
                num_written_lines += 1
        assert num_written_lines == num_actual_predict_examples
Ejemplo n.º 4
0
def main(_):  # NOQA
    parsed_args, hparams = parse_args_and_hparams()
    tf.logging.set_verbosity(tf.logging.INFO)

    bert_config = modeling.BertConfig.from_json_file(hparams.bert_config_file)

    tf.gfile.MakeDirs(parsed_args.output_dir)

    if hparams.data_regime == "full":
        train_file = "train"
    elif hparams.data_regime == "10":
        train_file = "train_10"
    elif hparams.data_regime == "30":
        train_file = "train_30"
    else:
        tf.logging.error(f"Invalid data regime: {hparams.data_regime}")

    processor = _IntentProcessor(parsed_args.data_dir, hparams.task,
                                 train_file)

    label_list = processor.get_labels()

    tokenizer = tokenization.FullTokenizer(vocab_file=hparams.vocab_file,
                                           do_lower_case=True)

    tpu_cluster_resolver = None
    if hparams.use_tpu and hparams.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            hparams.tpu_name,
            zone=hparams.tpu_zone,
            project=hparams.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2

    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=None,
        model_dir=parsed_args.output_dir,
        save_checkpoints_steps=hparams.save_checkpoint_steps,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=hparams.iterations_per_loop,
            num_shards=hparams.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    train_examples = None
    num_train_steps = None
    num_warmup_steps = None
    if hparams.do_train:
        train_examples = processor.get_train_examples(parsed_args.data_dir)
        num_train_steps = int(
            len(train_examples) / hparams.batch_size * hparams.epochs)
        num_warmup_steps = int(num_train_steps * hparams.warmup_proportion)

    model_fn = model_fn_builder(bert_config=bert_config,
                                num_labels=len(label_list),
                                init_checkpoint=hparams.init_checkpoint,
                                learning_rate=hparams.learning_rate,
                                num_train_steps=num_train_steps,
                                num_warmup_steps=num_warmup_steps,
                                use_tpu=hparams.use_tpu,
                                use_one_hot_embeddings=hparams.use_tpu)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=hparams.use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=hparams.batch_size,
        eval_batch_size=_EVAL_BATCH_SIZE,
        predict_batch_size=_EVAL_BATCH_SIZE)

    if hparams.do_train:
        train_file = os.path.join(parsed_args.output_dir, "train.tf_record")
        file_based_convert_examples_to_features(train_examples, label_list,
                                                hparams.max_seq_length,
                                                tokenizer, train_file)
        tf.logging.info("***** Running training *****")
        tf.logging.info("  Num examples = %d", len(train_examples))
        tf.logging.info("  Batch size = %d", hparams.batch_size)
        tf.logging.info("  Num steps = %d", num_train_steps)
        train_input_fn = _file_based_input_fn_builder(
            input_file=train_file,
            seq_length=hparams.max_seq_length,
            is_training=True,
            drop_remainder=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)

    if hparams.do_eval:
        eval_examples = processor.get_dev_examples(parsed_args.data_dir)
        num_actual_eval_examples = len(eval_examples)
        if hparams.use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the
            # number of examples must be a multiple of the batch size, or else
            # examples will get dropped. So we pad with fake examples which are
            # ignored later on. These do NOT count towards the metric (all
            # tf.metrics support a per-instance weight, and these get a weight
            # of 0.0).
            while len(eval_examples) % _EVAL_BATCH_SIZE != 0:
                eval_examples.append(PaddingInputExample())

        eval_file = os.path.join(parsed_args.output_dir, "eval.tf_record")
        file_based_convert_examples_to_features(eval_examples, label_list,
                                                hparams.max_seq_length,
                                                tokenizer, eval_file)

        tf.logging.info("***** Running evaluation *****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",
                        len(eval_examples), num_actual_eval_examples,
                        len(eval_examples) - num_actual_eval_examples)
        tf.logging.info("  Batch size = %d", _EVAL_BATCH_SIZE)

        # This tells the estimator to run through the entire set.
        eval_steps = None
        # However, if running eval on the TPU, you will need to specify the
        # number of steps.
        if hparams.use_tpu:
            assert len(eval_examples) % _EVAL_BATCH_SIZE == 0
            eval_steps = int(len(eval_examples) // _EVAL_BATCH_SIZE)

        eval_drop_remainder = True if hparams.use_tpu else False
        eval_input_fn = _file_based_input_fn_builder(
            input_file=eval_file,
            seq_length=hparams.max_seq_length,
            is_training=False,
            drop_remainder=eval_drop_remainder)

        result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)

        output_eval_file = os.path.join(parsed_args.output_dir,
                                        "eval_results.txt")
        with tf.gfile.GFile(output_eval_file, "w") as writer:
            tf.logging.info("***** Eval results *****")
            for key in sorted(result.keys()):
                tf.logging.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))
Ejemplo n.º 5
0
def finetune(config, bucket_path, input_dir, model_dir, output_dir, vocab_file,
             tpu_name):
    processor = run_classifier.MrpcProcessor()
    label_list = processor.get_labels()

    BERT_GCS_DIR = "{}/{}".format(bucket_path, model_dir)
    BERT_DATA_DIR = "{}/{}".format(bucket_path, input_dir)
    BERT_OUT_DIR = "{}/{}".format(bucket_path, output_dir)
    INIT_CHECKPOINT = tf.train.latest_checkpoint(BERT_GCS_DIR)
    CONFIG_FILE = os.path.join(BERT_GCS_DIR, "bert_config.json")
    VOCAB_FILE = os.path.join(BERT_GCS_DIR, vocab_file)

    # Compute number of train and warmup steps from batch size
    train_examples = processor.get_train_examples(BERT_DATA_DIR)
    num_train_steps = int(
        len(train_examples) / int(config["train_batch_size"]) *
        float(config["num_train_epochs"]))
    num_warmup_steps = int(num_train_steps *
                           float(config["warmup_proportion"]))
    train_batch_size = int(config["train_batch_size"])
    eval_batch_size = int(config["eval_batch_size"])
    predict_batch_size = int(config["predict_batch_size"])
    do_lowercase = bool(config["do_lowercase"])
    max_sequence_length = int(config["max_seq_length"])

    bert_config = modeling.BertConfig.from_json_file(CONFIG_FILE)

    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        tpu_name)

    model_fn = run_classifier.model_fn_builder(
        bert_config=bert_config,
        num_labels=len(label_list),
        init_checkpoint=INIT_CHECKPOINT,
        learning_rate=float(config["learning_rate"]),
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=True,
        use_one_hot_embeddings=True)

    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=True,
        model_fn=model_fn,
        config=get_run_config(BERT_OUT_DIR, tpu_cluster_resolver, config),
        train_batch_size=train_batch_size,
        eval_batch_size=eval_batch_size,
        predict_batch_size=predict_batch_size)

    tokenizer = tokenization.FullTokenizer(vocab_file=VOCAB_FILE,
                                           do_lower_case=do_lowercase)
    train_features = run_classifier.convert_examples_to_features(
        train_examples, label_list, max_sequence_length, tokenizer)

    print('***** Started training at {} *****'.format(datetime.datetime.now()))
    print('  Num examples = {}'.format(len(train_examples)))
    print('  Batch size = {}'.format(train_batch_size))
    tf.logging.info("  Num steps = %d", num_train_steps)
    train_input_fn = run_classifier.input_fn_builder(
        features=train_features,
        seq_length=max_sequence_length,
        is_training=True,
        drop_remainder=True)
    estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
    print('***** Finished training at {} *****'.format(
        datetime.datetime.now()))

    model_eval(estimator, processor, BERT_DATA_DIR, label_list,
               max_sequence_length, tokenizer, eval_batch_size, BERT_OUT_DIR)
    model_predict(estimator, processor, BERT_DATA_DIR, predict_batch_size,
                  label_list, max_sequence_length, tokenizer)
Ejemplo n.º 6
0
def classify(
    data_dir:str,#The input data dir. Should contain the .tsv files (or other data files) 
    bert_config_file:str,#The config json file corresponding to the pre-trained BERT model.
    vocab_file:str,#The vocabulary file that the BERT model was trained on.
    output_dir:str,#The output directory where the model checkpoints will be written.

    #unnecessary parameters
    task_name:str='customer_task',
    labels:list=None,#a list of all labels
    init_checkpoint:str=None,#Initial checkpoint (usually from a pre-trained BERT model).
    do_lower_case:bool=True,#Whether to lower case the input text. 
    #Should be True for uncased models and False for cased models.

    max_seq_length:int=128,#The maximum total input sequence length after WordPiece tokenization. 
    #Sequences longer than this will be truncated, and sequences shorter than this will be padded.

    do_train:bool=False,#Whether to run training.
    do_eval:bool=False,#Whether to run eval on the dev set.
    do_predict:bool=False,#Whether to run the model in inference mode on the test set.
    train_batch_size:int=32,#Total batch size for training.
    eval_batch_size:int=8,#Total batch size for eval.
    predict_batch_size:int=5,#Total batch size for predict.
    learning_rate:float=5e-5,#The initial learning rate for Adam.
    num_train_epochs:float=3.0,#Total number of training epochs to perform.
    warmup_proportion:float=0.1,#Proportion of training to perform linear learning rate warmup for. 
    #E.g., 0.1 = 10% of training.

    save_checkpoints_steps:int=1000,#How often to save the model checkpoint.
    iterations_per_loop:int=1000,#How many steps to make in each estimator call.
    use_tpu:bool=False,#Whether to use TPU or GPU/CPU.
    tpu_name:str=None,#The Cloud TPU to use for training. This should be either the name 
    #used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.

    tpu_zone:str=None,#[Optional] GCE zone where the Cloud TPU is located in. If not 
    #specified, we will attempt to automatically detect the GCE project from metadata.

    gcp_project:str=None,#[Optional] Project name for the Cloud TPU-enabled project. If not 
    #specified, we will attempt to automatically detect the GCE project from metadata.

    master:str=None,#[Optional] TensorFlow master URL.
    num_tpu_cores:int=8#Only used if `use_tpu` is True. Total number of TPU cores to use.
    ):
    tf.logging.set_verbosity(tf.logging.INFO)
    processors={
        "cola": brc.ColaProcessor,
        "mnli": brc.MnliProcessor,
        "mrpc": brc.MrpcProcessor,
        "xnli": brc.XnliProcessor,
    }
    #设置参数do_lower_case和init_checkpoint
    bert.tokenization.validate_case_matches_checkpoint(do_lower_case,init_checkpoint)
    #参数do_train、do_eval和do_predict至少有其一为真
    if not do_train and not do_eval and not do_predict:
        raise ValueError("At least one of 'do_train', 'do_eval' or 'do_predict' must be True.")
    #加载BERT模型的配置文件
    bert_config=bert.modeling.BertConfig.from_json_file(bert_config_file)
    #最大序列长度
    if max_seq_length>bert_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model was only trained up to sequence length %d"%
            (max_seq_length,bert_config.max_position_embeddings))
    #确保输出文件夹存在
    tf.gfile.MakeDirs(output_dir)
    #根据任务名选择相应的处理类
    taskName=task_name.lower()
    if taskName=='customer_task':
        processor=CUserLabelTaskProcessor(labels)
    else:
        if taskName not in processors:
            raise ValueError('Task not found: %s'%(taskName))
        processor=processors[taskName]()
    label_list=processor.get_labels()
    #print(label_list)
    #分词器设置
    tokenizer=bert.tokenization.FullTokenizer(vocab_file=vocab_file,do_lower_case=do_lower_case)
    #显卡相关
    tpu_cluster_resolver=None
    if use_tpu and tpu_name:
        tpu_cluster_resolver=tf.contrib.cluster_resolver.TPUClusterResolver(tpu_name,zone=gcp_project)
    is_per_host=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config=tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=master,
        model_dir=output_dir,
        save_checkpoints_steps=save_checkpoints_steps,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=iterations_per_loop,
            num_shards=num_tpu_cores,
            per_host_input_for_training=is_per_host))
    #训练参数
    train_examples=None
    num_train_steps=None
    num_warmup_steps=None
    if do_train:
        train_examples=processor.get_train_examples(data_dir)
        num_train_steps=int(len(train_examples)/train_batch_size*num_train_epochs)
        num_warmup_steps=int(num_train_steps*warmup_proportion)
    model_fn=brc.model_fn_builder(
        bert_config=bert_config,
        num_labels=len(label_list),
        init_checkpoint=init_checkpoint,
        learning_rate=learning_rate,
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=use_tpu,
        use_one_hot_embeddings=use_tpu)
    #使用TPU
    # If TPU is not available, this will fall back to normal Estimator on CPU or GPU.
    estimator=tf.contrib.tpu.TPUEstimator(
        use_tpu=use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=train_batch_size,
        eval_batch_size=eval_batch_size,
        predict_batch_size=predict_batch_size)
    if do_train:
        train_file=os.path.join(output_dir,"train.tf_record")
        brc.file_based_convert_examples_to_features(train_examples, label_list, max_seq_length, tokenizer, train_file)
        tf.logging.info("***** Running training *****")
        tf.logging.info("  Num examples = %d", len(train_examples))
        tf.logging.info("  Batch size = %d", train_batch_size)
        tf.logging.info("  Num steps = %d", num_train_steps)
        train_input_fn=brc.file_based_input_fn_builder(input_file=train_file,seq_length=max_seq_length,is_training=True,drop_remainder=True)
        estimator.train(input_fn=train_input_fn,max_steps=num_train_steps)
    if do_eval:
        eval_examples=processor.get_dev_examples(data_dir)
        num_actual_eval_examples=len(eval_examples)
        if use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the number
            # of examples must be a multiple of the batch size, or else examples
            # will get dropped. So we pad with fake examples which are ignored
            # later on. These do NOT count towards the metric (all tf.metrics
            # support a per-instance weight, and these get a weight of 0.0).
            while len(eval_examples)%eval_batch_size!=0:
                eval_examples.append(brc.PaddingInputExample())
        eval_file=os.path.join(output_dir,"eval.tf_record")
        brc.file_based_convert_examples_to_features(eval_examples, label_list, max_seq_length, tokenizer, eval_file)
        tf.logging.info("***** Running evaluation *****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",len(eval_examples), num_actual_eval_examples,len(eval_examples) - num_actual_eval_examples)
        tf.logging.info("  Batch size = %d", eval_batch_size)
        # This tells the estimator to run through the entire set.
        eval_steps = None
        # However, if running eval on the TPU, you will need to specify the number of steps.
        if use_tpu:
            assert len(eval_example)%eval_batch_size==0
            eval_steps=int(len(eval_examples)//eval_batch_size)
        eval_drop_remainder = True if use_tpu else False
        eval_input_fn=brc.file_based_input_fn_builder(
            input_file=eval_file,seq_length=max_seq_length,is_training=False,drop_remainder=eval_drop_remainder)
        result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
        output_eval_file = os.path.join(output_dir, "eval_results.txt")
        with tf.gfile.GFile(output_eval_file, "w") as writer:
            tf.logging.info("***** Eval results *****")
            for key in sorted(result.keys()):
                tf.logging.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))
    if do_predict:
        predict_examples = processor.get_test_examples(data_dir)
        num_actual_predict_examples = len(predict_examples)
        if use_tpu:
            # TPU requires a fixed batch size for all batches, therefore the number
            # of examples must be a multiple of the batch size, or else examples
            # will get dropped. So we pad with fake examples which are ignored
            # later on.
            while len(predict_examples) % predict_batch_size != 0:
                 predict_examples.append(brc.PaddingInputExample())
        predict_file = os.path.join(output_dir, "predict.tf_record")
        brc.file_based_convert_examples_to_features(predict_examples,label_list,max_seq_length,tokenizer,predict_file)
        tf.logging.info("***** Running prediction*****")
        tf.logging.info("  Num examples = %d (%d actual, %d padding)",len(predict_examples),
                       num_actual_predict_examples,len(predict_examples) - num_actual_predict_examples)
        tf.logging.info("  Batch size = %d", predict_batch_size)
        predict_drop_remainder = True if use_tpu else False
        predict_input_fn = brc.file_based_input_fn_builder(
            input_file=predict_file,seq_length=max_seq_length,is_training=False,drop_remainder=predict_drop_remainder)
        result = estimator.predict(input_fn=predict_input_fn)
        output_predict_file = os.path.join(output_dir, "test_results.tsv")
        with tf.gfile.GFile(output_predict_file, "w") as writer:
            num_written_lines = 0
            tf.logging.info("***** Predict results *****")
            for (i, prediction) in enumerate(result):
                probabilities = prediction["probabilities"]
                if i >= num_actual_predict_examples:
                    break
                output_line = "\t".join(str(class_probability)for class_probability in probabilities) + "\n"
                writer.write(output_line)
                num_written_lines += 1
        assert num_written_lines == num_actual_predict_examples