def metric_fn(
                    per_example_loss,
                    label_ids,
                    logits,
                    is_real_example, probabilities):

                if FLAGS.data_repeat:
                    raw_shape = [-1, FLAGS.data_repeat, probabilities.shape[-1].value]

                    probabilities = tf.reduce_mean(tf.reshape(probabilities, shape=raw_shape), -2)
                    predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)

                    raw_shape = [-1, FLAGS.data_repeat]
                    label_ids = tf.reduce_mean(tf.reshape(label_ids, shape=raw_shape), -1)
                    label_ids = tf.cast(label_ids, dtype=tf.int32)

                    is_real_example_a = tf.reduce_mean(tf.reshape(is_real_example, shape=raw_shape), -1)
                    is_real_example_a = tf.cast(is_real_example_a, dtype=tf.int32)

                    conf_mat = get_metrics_ops(label_ids, predictions, 3, is_real_example_a)

                    accuracy = tf.metrics.accuracy(
                        labels=label_ids,
                        predictions=predictions,
                        weights=is_real_example_a)
                else:
                    predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                    conf_mat = get_metrics_ops(label_ids, predictions, 3, is_real_example)

                    accuracy = tf.metrics.accuracy(
                        labels=label_ids,
                        predictions=predictions,
                        weights=is_real_example)



                # predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                # eval_input_dict = {
                #     'labels': label_ids,
                #     'predictions': predictions,
                #     'weights': is_real_example
                # }
                # accuracy = tf.metrics.accuracy(**eval_input_dict)
                # conf_mat = get_metrics_ops(label_ids, predictions, 3, is_real_example)

                loss = tf.metrics.mean(
                    values=per_example_loss,
                    weights=is_real_example)

                return {
                    'eval_accuracy': accuracy,
                    'eval_loss': loss,
                    "conf_mat": conf_mat}
            def metric_fn(
                    per_example_loss,
                    label_ids,
                    logits,
                    is_real_example):
                predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
                eval_input_dict = {
                    'labels': label_ids,
                    'predictions': predictions,
                    'weights': is_real_example
                }
                accuracy = tf.metrics.accuracy(**eval_input_dict)

                """
                change
                """
                conf_mat = get_metrics_ops(label_ids, predictions, 3, is_real_example)

                loss = tf.metrics.mean(
                    values=per_example_loss,
                    weights=is_real_example)
                return {
                    'eval_accuracy': accuracy,
                    'eval_loss': loss,
                    "conf_mat": conf_mat}
            def metric_fn(input_label_ids, input_label_mask, logits,
                          num_labels, trans_params, sequence_lengths):

                predictions, _ = tf.contrib.crf.crf_decode(
                    logits, trans_params, sequence_lengths)

                conf_mat = metrics.get_metrics_ops(input_label_ids,
                                                   predictions, num_labels,
                                                   input_label_mask)

                accuracy = tf.metrics.accuracy(labels=input_label_ids,
                                               predictions=predictions,
                                               weights=input_label_mask)
                return {
                    "eval_accuracy": accuracy,
                    "eval_cm": conf_mat,
                }
            def metric_fn(per_example_loss, label_ids, logits,
                          is_real_example):

                predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)

                conf_mat = get_metrics_ops(label_ids, predictions, 3,
                                           is_real_example)

                accuracy = tf.metrics.accuracy(labels=label_ids,
                                               predictions=predictions,
                                               weights=is_real_example)

                loss = tf.metrics.mean(values=per_example_loss,
                                       weights=is_real_example)

                return {
                    "eval_accuracy": accuracy,
                    "eval_loss": loss,
                    "cf": conf_mat,
                }
Esempio n. 5
0
            def metric_fn(per_example_loss, label_ids, logits, num_labels,
                          is_real_example):
                label_ids = tf.argmax(label_ids, axis=-1, output_type=tf.int32)
                predicted_labels = tf.argmax(logits,
                                             axis=-1,
                                             output_type=tf.int32)
                accuracy = tf.metrics.accuracy(label_ids,
                                               predicted_labels,
                                               weights=is_real_example)

                loss = tf.metrics.mean(values=per_example_loss,
                                       weights=is_real_example)
                """ 13: 调用metrics模型的ops 函数,
                得到包含F1,precision和recall的confusion_matrix。"""
                confusion_matrix = get_metrics_ops(label_ids, predicted_labels,
                                                   num_labels)
                return {
                    "eval_accuracy": accuracy,
                    "eval_loss": loss,
                    "eval_matrix": confusion_matrix
                }