def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        # pylint: disable=no-member
        """The `model_fn` for Estimator."""

        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, features[name].shape))

        input_ids = features["input_ids"]
        input_mask = features["input_mask"]
        segment_ids = features["segment_ids"]
        hard_label_ids = features["hard_label_ids"]
        soft_label_ids = features["soft_label_ids"]

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        (total_loss,
         probabilities) = create_model(bert_config, is_training, input_ids,
                                       input_mask, segment_ids, hard_label_ids,
                                       soft_label_ids, use_one_hot_embeddings)

        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        if init_checkpoint:
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        if mode == tf.estimator.ModeKeys.TRAIN:

            train_op = custom_optimization.create_optimizer(
                total_loss, learning_rate, num_train_steps, num_warmup_steps,
                False, None, FLAGS.use_fp16)

            output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                     loss=total_loss,
                                                     train_op=train_op)
        elif mode == tf.estimator.ModeKeys.EVAL:
            output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                     loss=total_loss)
        else:
            output_spec = tf.estimator.EstimatorSpec(
                mode=mode, predictions={"probabilities": probabilities})
        return output_spec
Пример #2
0
        def model_fn(features, labels, mode, params):
            # pylint: disable=unused-argument
            input_ids = features['input_ids']
            label_ids = features['label_ids']

            if mode == tf.estimator.ModeKeys.TRAIN:
                self.hparams.dropout_rate = FLAGS.dropout_rate
            else:
                self.hparams.dropout_rate = 0.

            total_loss, probabilities = model.classifier_model(
                hparams=self.hparams,
                input_tokens=input_ids,
                labels=label_ids,
                past=None,
                reuse=tf.AUTO_REUSE)

            tvars = tf.trainable_variables()
            init_checkpoint = os.path.join('data', 'models', FLAGS.model_name)
            (assignment_map, initialized_variable_names
             ) = model.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

            tf.logging.info('**** Trainable Variables ****')
            for var in tvars:
                init_string = ''
                if var.name in initialized_variable_names:
                    init_string = ', *INIT_FROM_CKPT*'
                tf.logging.info('  name = %s, shape = %s%s', var.name,
                                var.shape, init_string)

            if mode == tf.estimator.ModeKeys.TRAIN:
                train_op = custom_optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps, False, None, FLAGS.use_fp16)

                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         loss=total_loss,
                                                         train_op=train_op)
            elif mode == tf.estimator.ModeKeys.EVAL:
                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         loss=total_loss)
            else:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode, predictions={"probabilities": probabilities})
            return output_spec
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" % (name, features[name].shape))

        input_ids = features["input_ids"]
        input_mask = features["input_mask"]
        segment_ids = features["segment_ids"]
        label_ids = features["label_ids"]
        is_real_example = None
        if "is_real_example" in features:
            is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
        else:
            is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        (total_loss, per_example_loss, logits, probabilities) = create_model(
            bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
            num_labels, use_one_hot_embeddings, fp16, weight_list)

        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        is_multi_gpu = use_gpu and int(num_gpu_cores) >= 2
        if mode == tf.estimator.ModeKeys.TRAIN:
            if is_multi_gpu:
                train_op = custom_optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps, num_warmup_steps, fp16=fp16)
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold=scaffold_fn)
            else:
                train_op = optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, fp16=fp16)
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold_fn=scaffold_fn)

        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(per_example_loss, label_ids, logits, is_real_example):
                predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                accuracy = tf.metrics.accuracy(
                    labels=label_ids, predictions=predictions, weights=is_real_example)
                # add more metrics
                pr, pr_op = tf.metrics.precision(
                    labels=label_ids, predictions=predictions, weights=is_real_example)
                re, re_op = tf.metrics.recall(
                    labels=label_ids, predictions=predictions, weights=is_real_example)
                f1 = (2 * pr * re) / (pr + re)  # f1-score for binary classification
                loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
                return {
                    "eval_accuracy": accuracy,
                    "eval_precision": (pr, pr_op),
                    "eval_recall": (re, re_op),
                    "eval_f1score": (f1, tf.identity(f1)),
                    "eval_loss": loss
                }

            eval_metrics = (metric_fn,
                            [per_example_loss, label_ids, logits, is_real_example])
            # eval on single-gpu only
            output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=total_loss,
                eval_metrics=eval_metrics,
                scaffold_fn=scaffold_fn)
        else:
            if is_multi_gpu:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    predictions={"probabilities": probabilities})
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    predictions={"probabilities": probabilities},
                    scaffold_fn=scaffold_fn)

        return output_spec
Пример #4
0
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, features[name].shape))

        input_ids = features["input_ids"]
        input_mask = features["input_mask"]
        segment_ids = features["segment_ids"]
        label_ids = features["label_ids"]
        is_real_example = None
        if "is_real_example" in features:
            is_real_example = tf.cast(features["is_real_example"],
                                      dtype=tf.float32)
        else:
            is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        (total_loss, per_example_loss, logits, probabilities) = create_model(
            bert_config, is_training, input_ids, input_mask, segment_ids,
            label_ids, num_labels, use_one_hot_embeddings, weight_list)

        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint,
                                                  assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        output_spec = None
        is_multi_gpu = use_gpu and int(num_gpu_cores) >= 2
        if mode == tf.estimator.ModeKeys.TRAIN:
            if is_multi_gpu:
                train_op = custom_optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps)
                tensors_to_log = {'train loss': total_loss}
                logging_hook = tf.train.LoggingTensorHook(
                    tensors=tensors_to_log, every_n_iter=1)
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    training_hooks=[logging_hook],
                    scaffold=scaffold_fn)
            else:
                # output loss during training https://github.com/google-research/bert/issues/70
                #train_op = optimization.create_optimizer(
                #    total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
                train_op, new_global_step = optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps, use_tpu)
                tensors_to_log = {
                    'train loss': total_loss,
                    'global step': new_global_step
                }
                logging_hook = tf.train.LoggingTensorHook(
                    tensors=tensors_to_log, every_n_iter=1)
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    training_hooks=[logging_hook],
                    scaffold_fn=scaffold_fn)
        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(per_example_loss, label_ids, logits,
                          is_real_example):
                predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                accuracy = tf.metrics.accuracy(labels=label_ids,
                                               predictions=predictions,
                                               weights=is_real_example)
                loss = tf.metrics.mean(values=per_example_loss,
                                       weights=is_real_example)
                mean_per_class_accuracy = tf.metrics.mean_per_class_accuracy(
                    labels=label_ids,
                    predictions=predictions,
                    weights=is_real_example)
                #precision = tf.metrics.F1Score(labels=label_ids, predictions=predictions, weights=is_real_example)
                #recall = tf.metrics.recall(labels=label_ids, predictions=predictions, weights=is_real_example)
                return {
                    "eval_accuracy": accuracy,
                    "eval_loss": loss,
                    "mean_per_class_accuracy": mean_per_class_accuracy
                }

            eval_metrics = (metric_fn, [
                per_example_loss, label_ids, logits, is_real_example
            ])
            #eval on single-gpu
            output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=total_loss,
                eval_metrics=eval_metrics,
                scaffold_fn=scaffold_fn)
        else:
            if is_multi_gpu:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode, predictions={"probabilities": probabilities})
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    predictions={"probabilities": probabilities},
                    scaffold_fn=scaffold_fn)
        return output_spec
Пример #5
0
    def model_fn(
        features, labels, mode, params
    ):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info('*** Features ***')
        for name in sorted(features.keys()):
            tf.logging.info(
                '  name = %s, shape = %s' % (name, features[name].shape)
            )

        input_ids = features['input_ids']
        input_mask = features['input_mask']
        segment_ids = features['segment_ids']
        y = features['y']

        is_training = mode == tf.estimator.ModeKeys.TRAIN

        model = multilanguagebert_transformer.Model(
            is_training = is_training,
            input_ids = input_ids,
            input_mask = input_mask,
            token_type_ids = segment_ids,
            Y = y,
        )
        o = model.get_sequence_output()
        Y_seq_len = tf.count_nonzero(y, 1, dtype = tf.int32)
        masks = tf.sequence_mask(Y_seq_len, tf.shape(y)[1], dtype = tf.float32)
        logits = tf.expand_dims(tf.expand_dims(o, axis = 2), axis = 2)
        feature = tf.expand_dims(tf.expand_dims(y, axis = 2), axis = 2)
        loss_num, loss_denom = model_t2t._loss_single(
            logits, 'targets', feature, weights = masks
        )
        total_loss = loss_num / loss_denom

        #         total_loss = tf.contrib.seq2seq.sequence_loss(
        #             logits = o, targets = y, weights = masks
        #         )
        y_t = tf.argmax(o, axis = 2)
        y_t = tf.cast(y_t, tf.int32)
        prediction = tf.boolean_mask(y_t, masks)
        mask_label = tf.boolean_mask(y, masks)
        correct_pred = tf.equal(prediction, mask_label)
        correct_index = tf.cast(correct_pred, tf.float32)
        total_accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            (
                assignment_map,
                initialized_variable_names,
            ) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
            print(initialized_variable_names)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(
                        init_checkpoint, assignment_map
                    )
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info('**** Trainable Variables ****')
        for var in tvars:
            init_string = ''
            if var.name in initialized_variable_names:
                init_string = ', *INIT_FROM_CKPT*'
            tf.logging.info(
                '  name = %s, shape = %s%s', var.name, var.shape, init_string
            )

        output_spec = None
        if mode == tf.estimator.ModeKeys.TRAIN:
            if FLAGS.use_gpu and int(FLAGS.num_gpu_cores) >= 2:
                train_op = custom_optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps, num_warmup_steps
                )
            else:
                train_op = optimization.create_optimizer(
                    total_loss,
                    learning_rate,
                    num_train_steps,
                    num_warmup_steps,
                    use_tpu,
                )
            if FLAGS.use_gpu and int(FLAGS.num_gpu_cores) >= 2:
                output_spec = tf.estimator.EstimatorSpec(
                    mode = mode,
                    loss = total_loss,
                    train_op = train_op,
                    scaffold = scaffold_fn,
                )
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode = mode,
                    loss = total_loss,
                    train_op = train_op,
                    scaffold_fn = scaffold_fn,
                )
        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(loss, accuracy):
                return {'total_loss': loss, 'total_accuracy': accuracy}

            eval_metrics = (metric_fn, [total_loss, total_accuracy])
            if FLAGS.use_gpu and int(FLAGS.num_gpu_cores) >= 2:
                output_spec = tf.estimator.EstimatorSpec(
                    mode = mode,
                    loss = total_loss,
                    eval_metrics = eval_metrics,
                    scaffold = scaffold_fn,
                )
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode = mode,
                    loss = total_loss,
                    eval_metrics = eval_metrics,
                    scaffold_fn = scaffold_fn,
                )
        else:
            raise ValueError(
                'Only TRAIN and EVAL modes are supported: %s' % (mode)
            )

        return output_spec
Пример #6
0
    def model_fn(
        features, labels, mode, params
    ):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info('*** Features ***')
        for name in sorted(features.keys()):
            tf.logging.info(
                '  name = %s, shape = %s' % (name, features[name].shape)
            )

        input_ids = features['input_ids']
        input_mask = features['input_mask']
        segment_ids = features['segment_ids']
        masked_lm_positions = features['masked_lm_positions']
        masked_lm_ids = features['masked_lm_ids']
        masked_lm_weights = features['masked_lm_weights']
        # Note: We keep this feature name `next_sentence_labels` to be compatible
        # with the original data created by lanzhzh@. However, in the ALBERT case
        # it does represent sentence_order_labels.
        sentence_order_labels = features['next_sentence_labels']

        is_training = mode == tf.estimator.ModeKeys.TRAIN

        model = modeling.AlbertModel(
            config = albert_config,
            is_training = is_training,
            input_ids = input_ids,
            input_mask = input_mask,
            token_type_ids = segment_ids,
            use_one_hot_embeddings = use_one_hot_embeddings,
        )

        (
            masked_lm_loss,
            masked_lm_example_loss,
            masked_lm_log_probs,
        ) = get_masked_lm_output(
            albert_config,
            model.get_sequence_output(),
            model.get_embedding_table(),
            masked_lm_positions,
            masked_lm_ids,
            masked_lm_weights,
        )

        (
            sentence_order_loss,
            sentence_order_example_loss,
            sentence_order_log_probs,
        ) = get_sentence_order_output(
            albert_config, model.get_pooled_output(), sentence_order_labels
        )

        total_loss = masked_lm_loss + sentence_order_loss

        tvars = tf.trainable_variables()

        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            tf.logging.info(
                'number of hidden group %d to initialize',
                albert_config.num_hidden_groups,
            )
            num_of_initialize_group = 1
            if FLAGS.init_from_group0:
                num_of_initialize_group = albert_config.num_hidden_groups
                if albert_config.net_structure_type > 0:
                    num_of_initialize_group = albert_config.num_hidden_layers
            (
                assignment_map,
                initialized_variable_names,
            ) = modeling.get_assignment_map_from_checkpoint(
                tvars, init_checkpoint, num_of_initialize_group
            )
            if use_tpu:

                def tpu_scaffold():
                    for gid in range(num_of_initialize_group):
                        tf.logging.info('initialize the %dth layer', gid)
                        tf.logging.info(assignment_map[gid])
                        tf.train.init_from_checkpoint(
                            init_checkpoint, assignment_map[gid]
                        )
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                for gid in range(num_of_initialize_group):
                    tf.logging.info('initialize the %dth layer', gid)
                    tf.logging.info(assignment_map[gid])
                    tf.train.init_from_checkpoint(
                        init_checkpoint, assignment_map[gid]
                    )

        tf.logging.info('**** Trainable Variables ****')
        for var in tvars:
            init_string = ''
            if var.name in initialized_variable_names:
                init_string = ', *INIT_FROM_CKPT*'
            tf.logging.info(
                '  name = %s, shape = %s%s', var.name, var.shape, init_string
            )

        output_spec = None
        if mode == tf.estimator.ModeKeys.TRAIN:
            train_op = optimization.create_optimizer(
                total_loss, learning_rate, num_train_steps, num_warmup_steps
            )
            output_spec = tf.estimator.EstimatorSpec(
                mode = mode,
                loss = total_loss,
                train_op = train_op,
                scaffold = scaffold_fn,
            )

        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(*args):
                """Computes the loss and accuracy of the model."""
                (
                    masked_lm_example_loss,
                    masked_lm_log_probs,
                    masked_lm_ids,
                    masked_lm_weights,
                    sentence_order_example_loss,
                    sentence_order_log_probs,
                    sentence_order_labels,
                ) = args[:7]

                masked_lm_log_probs = tf.reshape(
                    masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
                )
                masked_lm_predictions = tf.argmax(
                    masked_lm_log_probs, axis = -1, output_type = tf.int32
                )
                masked_lm_example_loss = tf.reshape(
                    masked_lm_example_loss, [-1]
                )
                masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
                masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
                masked_lm_accuracy = tf.metrics.accuracy(
                    labels = masked_lm_ids,
                    predictions = masked_lm_predictions,
                    weights = masked_lm_weights,
                )
                masked_lm_mean_loss = tf.metrics.mean(
                    values = masked_lm_example_loss, weights = masked_lm_weights
                )

                metrics = {
                    'masked_lm_accuracy': masked_lm_accuracy,
                    'masked_lm_loss': masked_lm_mean_loss,
                }

                sentence_order_log_probs = tf.reshape(
                    sentence_order_log_probs,
                    [-1, sentence_order_log_probs.shape[-1]],
                )
                sentence_order_predictions = tf.argmax(
                    sentence_order_log_probs, axis = -1, output_type = tf.int32
                )
                sentence_order_labels = tf.reshape(sentence_order_labels, [-1])
                sentence_order_accuracy = tf.metrics.accuracy(
                    labels = sentence_order_labels,
                    predictions = sentence_order_predictions,
                )
                sentence_order_mean_loss = tf.metrics.mean(
                    values = sentence_order_example_loss
                )
                metrics.update(
                    {
                        'sentence_order_accuracy': sentence_order_accuracy,
                        'sentence_order_loss': sentence_order_mean_loss,
                    }
                )
                return metrics

            metric_values = [
                masked_lm_example_loss,
                masked_lm_log_probs,
                masked_lm_ids,
                masked_lm_weights,
                sentence_order_example_loss,
                sentence_order_log_probs,
                sentence_order_labels,
            ]

            eval_metrics = (metric_fn, metric_values)

            output_spec = contrib_tpu.TPUEstimatorSpec(
                mode = mode,
                loss = total_loss,
                eval_metrics = eval_metrics,
                scaffold_fn = scaffold_fn,
            )
        else:
            raise ValueError(
                'Only TRAIN and EVAL modes are supported: %s' % (mode)
            )

        return output_spec
    def model_fn(features, labels, mode, params, config=None):  # pylint: disable=unused-argument
        """
        The `model_fn` for TPUEstimator.
        模型有训练,验证和测试三种阶段,而且对于不同模式,对数据有不同的处理方式。例如在训练阶段,我们需要将数据喂给模型,
        模型基于输入数据给出预测值,然后我们在通过预测值和真实值计算出loss,最后用loss更新网络参数,
        而在评估阶段,我们则不需要反向传播更新网络参数,换句话说,mdoel_fn需要对三种模式设置三套代码。

        Args:
            features: dict of Tensor, This is batch_features from input_fn,`Tensor` or dict of `Tensor` (depends on data passed to `fit`
            labels: This is batch_labels from input_fn. features, labels是从输入函数input_fn中返回的特征和标签batch
            mode: An instance of tf.estimator.ModeKeys
            params: Additional configuration for hyper-parameters. 是一个字典,它可以传入许多参数用来构建网络或者定义训练方式等


        Return:
            tf.estimator.EstimatorSpec

        """

        print("features={}".format(features))
        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, features[name].shape))

        input_ids = features["input_ids"]
        label_ids = features["label_ids"]
        if "is_real_example" in features:
            # 类型强制转换为tf.float32
            is_real_example = tf.cast(features["is_real_example"],
                                      dtype=tf.float32)
        else:
            # 创建一个将所有元素都设置为1的张量Tensor.
            is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)

        # 根据mode判断是否为训练模式
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        # 基于特征数据创建模型,并计算loss等
        print("create_model:\ninput_ids={}".format(input_ids.shape))
        print("label_ids={}".format(label_ids.shape))
        (total_loss, per_example_loss, logits,
         probabilities) = create_model(bert_config, is_training, input_ids,
                                       None, None, label_ids, num_labels,
                                       use_one_hot_embeddings, None)

        print("total_loss={}".format(total_loss))
        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint,
                                                  assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        # 训练模式
        if mode == tf.estimator.ModeKeys.TRAIN:
            if FLAGS.num_gpu_cores > 1:
                train_op = custom_optimization.create_optimizer(
                    total_loss,
                    learning_rate,
                    num_train_steps,
                    num_warmup_steps,
                    fp16=fp16)

                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         loss=total_loss,
                                                         train_op=train_op,
                                                         scaffold=scaffold_fn)
            else:
                train_op = optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps, use_tpu)

                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold_fn=scaffold_fn)
        # 评估模式
        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(per_example_loss, label_ids, logits,
                          is_real_example):
                predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                accuracy = tf.metrics.accuracy(labels=label_ids,
                                               predictions=predictions,
                                               weights=is_real_example)
                loss = tf.metrics.mean(values=per_example_loss,
                                       weights=is_real_example)
                # add more metrics
                pr, pr_op = tf.metrics.precision(labels=label_ids,
                                                 predictions=predictions,
                                                 weights=is_real_example)
                re, re_op = tf.metrics.recall(labels=label_ids,
                                              predictions=predictions,
                                              weights=is_real_example)
                # if FLAGS.classifier_mode == "multi-class":
                #     # multi-class
                #     # pr, pr_op = tf_metrics.precision(label_ids, predictions, num_labels, average="macro")
                #     # re, re_op = tf_metrics.recall(label_ids, predictions, num_labels, average="macro")
                #     f1 = tf_metrics.f1(label_ids, predictions, num_labels, average="macro")
                # else:
                #     # binary classifier
                #     f1 = tf.contrib.metrics.f1_score(label_ids, predictions)
                #     # f1, f1_op = (2 * pr * re) / (pr + re)  # f1-score for binary classification
                # 返回结果:dict: {key: value(tuple: (metric_tensor, update_op)) }
                return {
                    "eval_accuracy": accuracy,
                    "eval_loss": loss,
                    "eval_precision": (pr, pr_op),
                    "eval_recall": (re, re_op),
                    # "eval_f1": f1,
                }

            eval_metrics = (metric_fn, [
                per_example_loss, label_ids, logits, is_real_example
            ])
            if FLAGS.num_gpu_cores > 1:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    eval_metric_ops=metric_fn(per_example_loss, label_ids,
                                              logits, is_real_example),
                    scaffold=scaffold_fn,
                )
            else:
                # eval on single-gpu only
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    eval_metrics=eval_metrics,
                    scaffold_fn=scaffold_fn)
        else:
            # tf.estimator.ModeKeys.PREDICT 预测模式
            # 基于logits计算最大的概率所在索引的label
            predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
            if FLAGS.num_gpu_cores > 1:
                # 多GPUs
                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         predictions={
                                                             "probabilities":
                                                             probabilities,
                                                             "predictions":
                                                             predictions
                                                         })
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    predictions={
                        "probabilities": probabilities,
                        "predictions": predictions
                    },
                    scaffold_fn=scaffold_fn)
        return output_spec
Пример #8
0
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info('*** Features ***')
        for name in sorted(features.keys()):
            tf.logging.info('  name = %s, shape = %s' %
                            (name, features[name].shape))

        input_ids = features['input_ids']
        input_mask = features['input_mask']
        segment_ids = features['segment_ids']
        masked_lm_positions = features['masked_lm_positions']
        masked_lm_ids = features['masked_lm_ids']
        masked_lm_weights = features['masked_lm_weights']
        next_sentence_labels = features['next_sentence_labels']

        is_training = mode == tf.estimator.ModeKeys.TRAIN

        model = modeling.BertModel(
            config=bert_config,
            is_training=is_training,
            input_ids=input_ids,
            input_mask=input_mask,
            token_type_ids=segment_ids,
            use_one_hot_embeddings=use_one_hot_embeddings,
        )

        (
            masked_lm_loss,
            masked_lm_example_loss,
            masked_lm_log_probs,
        ) = get_masked_lm_output(
            bert_config,
            model.get_sequence_output(),
            model.get_embedding_table(),
            model.get_embedding_table_2(),
            masked_lm_positions,
            masked_lm_ids,
            masked_lm_weights,
        )

        (
            next_sentence_loss,
            next_sentence_example_loss,
            next_sentence_log_probs,
        ) = get_next_sentence_output(bert_config, model.get_pooled_output(),
                                     next_sentence_labels)

        total_loss = masked_lm_loss + next_sentence_loss

        tvars = tf.trainable_variables()

        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            (
                assignment_map,
                initialized_variable_names,
            ) = modeling.get_assignment_map_from_checkpoint(
                tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint,
                                                  assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info('**** Trainable Variables ****')
        for var in tvars:
            init_string = ''
            if var.name in initialized_variable_names:
                init_string = ', *INIT_FROM_CKPT*'
            tf.logging.info('  name = %s, shape = %s%s', var.name, var.shape,
                            init_string)

        output_spec = None
        if mode == tf.estimator.ModeKeys.TRAIN:
            if FLAGS.use_gpu and int(FLAGS.num_gpu_cores) >= 2:
                train_op = custom_optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps)
                # train_op = optimization.create_optimizer(
                #     total_loss,
                #     learning_rate,
                #     num_train_steps,
                #     num_warmup_steps,
                #     use_tpu,
                # )
            else:
                train_op = optimization.create_optimizer(
                    total_loss,
                    learning_rate,
                    num_train_steps,
                    num_warmup_steps,
                    use_tpu,
                )
            if FLAGS.use_gpu and int(FLAGS.num_gpu_cores) >= 2:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold=scaffold_fn,
                )
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold_fn=scaffold_fn,
                )
        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(
                masked_lm_example_loss,
                masked_lm_log_probs,
                masked_lm_ids,
                masked_lm_weights,
                next_sentence_example_loss,
                next_sentence_log_probs,
                next_sentence_labels,
            ):
                """Computes the loss and accuracy of the model."""
                masked_lm_log_probs = tf.reshape(
                    masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]])
                masked_lm_predictions = tf.argmax(masked_lm_log_probs,
                                                  axis=-1,
                                                  output_type=tf.int32)
                masked_lm_example_loss = tf.reshape(masked_lm_example_loss,
                                                    [-1])
                masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
                masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
                masked_lm_accuracy = tf.metrics.accuracy(
                    labels=masked_lm_ids,
                    predictions=masked_lm_predictions,
                    weights=masked_lm_weights,
                )
                masked_lm_mean_loss = tf.metrics.mean(
                    values=masked_lm_example_loss, weights=masked_lm_weights)

                next_sentence_log_probs = tf.reshape(
                    next_sentence_log_probs,
                    [-1, next_sentence_log_probs.shape[-1]],
                )
                next_sentence_predictions = tf.argmax(next_sentence_log_probs,
                                                      axis=-1,
                                                      output_type=tf.int32)
                next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
                next_sentence_accuracy = tf.metrics.accuracy(
                    labels=next_sentence_labels,
                    predictions=next_sentence_predictions,
                )
                next_sentence_mean_loss = tf.metrics.mean(
                    values=next_sentence_example_loss)

                return {
                    'masked_lm_accuracy': masked_lm_accuracy,
                    'masked_lm_loss': masked_lm_mean_loss,
                    'next_sentence_accuracy': next_sentence_accuracy,
                    'next_sentence_loss': next_sentence_mean_loss,
                }

            eval_metrics = (
                metric_fn,
                [
                    masked_lm_example_loss,
                    masked_lm_log_probs,
                    masked_lm_ids,
                    masked_lm_weights,
                    next_sentence_example_loss,
                    next_sentence_log_probs,
                    next_sentence_labels,
                ],
            )
            if FLAGS.use_gpu and int(FLAGS.num_gpu_cores) >= 2:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    eval_metrics=eval_metrics,
                    scaffold=scaffold_fn,
                )
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    eval_metrics=eval_metrics,
                    scaffold_fn=scaffold_fn,
                )
        else:
            raise ValueError('Only TRAIN and EVAL modes are supported: %s' %
                             (mode))

        return output_spec
Пример #9
0
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""
        """
        features: This is batch_features from input_fn
        labels: This is batch_labels from input_fn
        mode:   An instance of tf.estimator.ModeKeys
        params: Additional configuration
        """

        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, features[name].shape))

        input_sequence = features["input_sequence"]
        input_mask = features["input_mask"]
        segment_ids = features["segment_ids"]
        edit_sequence = features["edit_sequence"]

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        (total_loss, per_example_loss,
         logits, probabilities) = gec_create_model(
             bert_config, is_training, input_sequence, input_mask, segment_ids,
             edit_sequence, use_one_hot_embeddings, mode, copy_weight,
             use_bert_more, insert_ids, multitoken_insert_ids,
             subtract_replaced_from_replacement)

        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            # 如果初始化检查点文件
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint,
                                                  assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        output_spec = None
        if mode == tf.estimator.ModeKeys.TRAIN:

            if FLAGS.use_tpu and FLAGS.tpu_name:
                # TPU train
                train_op = optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps, use_tpu)
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold_fn=scaffold_fn)
            else:
                # GPUs or CPU train
                train_op = custom_optimization.create_optimizer(
                    total_loss, learning_rate, num_train_steps,
                    num_warmup_steps)
                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         loss=total_loss,
                                                         train_op=train_op)

        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(per_example_loss, edit_sequence, logits):
                predictions = tf.argmax(
                    logits[:, :, 3:], axis=-1, output_type=tf.int32) + 3
                mask = tf.equal(edit_sequence, 0)
                mask = tf.logical_or(mask, tf.equal(edit_sequence, 1))
                mask = tf.logical_or(mask, tf.equal(edit_sequence, 2))
                mask = tf.logical_or(mask, tf.equal(edit_sequence, 3))
                mask = tf.to_float(tf.logical_not(mask))
                accuracy = tf.metrics.accuracy(edit_sequence, predictions,
                                               mask)
                loss = tf.metrics.mean(per_example_loss)
                result_dict = {}
                result_dict["eval_accuracy"] = accuracy
                result_dict["eval_loss"] = loss
                return {
                    "eval_accuracy": accuracy,
                    "eval_loss": loss,
                }

            eval_metrics = (metric_fn,
                            [per_example_loss, edit_sequence, logits])
            output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=total_loss,
                eval_metrics=eval_metrics,
                scaffold_fn=scaffold_fn)
        else:
            # first three edit ids unk, sos, eos are dummy. We do not consider them in predictions
            predictions = tf.argmax(
                logits[:, :, 3:], axis=-1, output_type=tf.int32) + 3
            if FLAGS.use_tpu and FLAGS.tpu_name:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    predictions={
                        "predictions": predictions,
                        "logits": logits
                    },
                    scaffold_fn=scaffold_fn)
            else:
                # multiple GPUs
                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         predictions={
                                                             "predictions":
                                                             predictions,
                                                             "logits": logits
                                                         })
        return output_spec
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, features[name].shape))

        input_ids = features["input_ids"]
        input_mask = features["input_mask"]
        segment_ids = features["segment_ids"]
        label_ids = features["label_ids"]
        seq_len = features["seq_len"]
        output_mask = features["output_mask"]
        output_mask_float = tf.to_float(output_mask)

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        (total_loss, per_example_loss,
         predictions) = create_model(bert_config, is_training, input_ids,
                                     input_mask, segment_ids, label_ids,
                                     output_mask_float, num_labels,
                                     use_one_hot_embeddings, fp16)

        tvars = tf.trainable_variables()

        scaffold_fn = None
        if init_checkpoint:
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint,
                                                  assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        if mode == tf.estimator.ModeKeys.TRAIN:
            if use_gpu and int(num_gpu_cores) >= 2:
                train_op = custom_optimization.create_optimizer(
                    total_loss,
                    learning_rate,
                    num_train_steps,
                    num_warmup_steps,
                    fp16=fp16)
                output_spec = tf.estimator.EstimatorSpec(mode=mode,
                                                         loss=total_loss,
                                                         train_op=train_op,
                                                         scaffold=scaffold_fn)
            else:
                train_op = optimization.create_optimizer(total_loss,
                                                         learning_rate,
                                                         num_train_steps,
                                                         num_warmup_steps,
                                                         use_tpu,
                                                         fp16=fp16)
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    train_op=train_op,
                    scaffold_fn=scaffold_fn)
        elif mode == tf.estimator.ModeKeys.EVAL:

            def metric_fn(per_example_loss, label_ids, predictions,
                          output_mask):
                accuracy = tf.metrics.accuracy(label_ids, predictions,
                                               output_mask)
                loss = tf.metrics.mean(per_example_loss)
                return {
                    "eval_accuracy": accuracy,
                    "eval_loss": loss,
                }

            eval_metrics = (metric_fn, [
                per_example_loss, label_ids, predictions, output_mask
            ])
            if use_gpu and int(num_gpu_cores) >= 2:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    eval_metric_ops=eval_metrics[0](*eval_metrics[1]))
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    loss=total_loss,
                    eval_metrics=eval_metrics,
                    scaffold_fn=scaffold_fn)
        else:
            predictions = {"predictions": predictions, "seq_len": seq_len}
            if use_gpu and int(num_gpu_cores) >= 2:
                output_spec = tf.estimator.EstimatorSpec(
                    mode=mode, predictions=predictions)
            else:
                output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                    mode=mode,
                    predictions=predictions,
                    scaffold_fn=scaffold_fn)

        return output_spec
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""

        tf.logging.info("*** Features ***")
        for name in sorted(features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, features[name].shape))

        input_ids = features["input_ids"]
        input_mask = features["input_mask"]
        segment_ids = features["segment_ids"]
        label_ids = features["label_ids"]
        is_real_example = None
        if "is_real_example" in features:
            is_real_example = tf.cast(features["is_real_example"],
                                      dtype=tf.float32)
        else:
            is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        (total_loss, per_example_loss, logits, probabilities,
         predict) = create_model(bert_config, is_training, input_ids,
                                 input_mask, segment_ids, label_ids,
                                 num_labels, use_one_hot_embeddings, fp16)

        tvars = tf.trainable_variables()
        initialized_variable_names = {}
        scaffold_fn = None
        if init_checkpoint:
            (assignment_map, initialized_variable_names
             ) = modeling.get_assignment_map_from_checkpoint(
                 tvars, init_checkpoint)
            if use_tpu:

                def tpu_scaffold():
                    tf.train.init_from_checkpoint(init_checkpoint,
                                                  assignment_map)
                    return tf.train.Scaffold()

                scaffold_fn = tpu_scaffold
            else:
                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)
        # summary
        predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
        accuracy = tf.metrics.accuracy(labels=label_ids,
                                       predictions=predictions)
        tf.summary.scalar('accuracy', accuracy[1])
        export_outputs = {
            'predict_output':
            tf.estimator.export.PredictOutput({
                'predict': predict,
                'probabilities': probabilities
            })
        }
        if mode == tf.estimator.ModeKeys.TRAIN:
            # 单机多卡
            if use_gpu and int(num_gpu_cores) >= 2:
                train_op = custom_optimization.create_optimizer(
                    total_loss,
                    params['learning_rate'],
                    params['num_train_steps'],
                    params['num_warmup_steps'],
                    fp16=fp16)
            else:
                train_op = optimization.create_optimizer(
                    total_loss, params['learning_rate'],
                    params['num_train_steps'], params['num_warmup_steps'],
                    False)
            output_spec = tf.estimator.EstimatorSpec(
                mode=mode,
                loss=total_loss,
                train_op=train_op,
                export_outputs=export_outputs,
                scaffold=scaffold_fn)
        if mode == tf.estimator.ModeKeys.EVAL:
            eval_loss = tf.metrics.mean(values=per_example_loss)
            eval_metrics = {"eval_accuracy": accuracy, "eval_loss": eval_loss}
            output_spec = tf.estimator.EstimatorSpec(
                mode=mode,
                loss=total_loss,
                eval_metric_ops=eval_metrics,
                export_outputs=export_outputs,
                scaffold=scaffold_fn)
        if mode == tf.estimator.ModeKeys.PREDICT:
            predicted_classes = tf.argmax(logits, 1)
            predictions = {
                'class_ids': predicted_classes[:, tf.newaxis],
                'probabilities': probabilities,
                'logits': logits
            }
            output_spec = tf.estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions,
                export_outputs=export_outputs,
                scaffold=scaffold_fn)
        return output_spec
        """