コード例 #1
0
 def write_weights(self, mode: str, models: Iterable[Model], step: int,
                   visualize: bool) -> None:
     # Similar to TF implementation, but multiple models
     with self.tf_summary_writers[mode].as_default(
     ), summary_ops_v2.always_record_summaries():
         for model in models:
             for layer in model.layers:
                 for weight in layer.weights:
                     weight_name = weight.name.replace(':', '_')
                     weight_name = "{}_{}".format(model.model_name,
                                                  weight_name)
                     with tfops.init_scope():
                         weight = backend.get_value(weight)
                     summary_ops_v2.histogram(weight_name,
                                              weight,
                                              step=step)
                     if visualize:
                         weight = self._weight_to_image(
                             weight=weight, kernel_channels_last=True)
                         if weight is not None:
                             summary_ops_v2.image(
                                 weight_name,
                                 weight,
                                 step=step,
                                 max_images=weight.shape[0])
コード例 #2
0
ファイル: tensorboard.py プロジェクト: AriChow/fastestimator
 def _log_weight_as_image(weight, weight_name, epoch):
     """ Logs a weight as a TensorBoard image.
         Implementation from tensorflow codebase, would have invoked theirs directly but they didn't make it a static
         method
     """
     w_img = array_ops.squeeze(weight)
     shape = backend.int_shape(w_img)
     if len(shape) == 1:  # Bias case
         w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
     elif len(shape) == 2:  # Dense layer kernel case
         if shape[0] > shape[1]:
             w_img = array_ops.transpose(w_img)
             shape = backend.int_shape(w_img)
         w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
     elif len(shape) == 3:  # ConvNet case
         if backend.image_data_format() == 'channels_last':
             # Switch to channels_first to display every kernel as a separate
             # image.
             w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
             shape = backend.int_shape(w_img)
         w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
     shape = backend.int_shape(w_img)
     # Not possible to handle 3D convnets etc.
     if len(shape) == 4 and shape[-1] in [1, 3, 4]:
         summary_ops_v2.image(weight_name, w_img, step=epoch)
コード例 #3
0
def _train_step_ae(model, linear_model, features, optimizer_ae, 
    global_step, config, training, epoch):    
    linear_predictions = get_linear_predictions(linear_model, features)
    anno = get_annotations(features)
    with tf.GradientTape() as tape_ae:
        losses = _loss_fn_ae(model, linear_predictions, features["inputs"], 
            anno, config, training)
        (reconstruction_loss, mean_kld_z, reconstruction, penalty) = losses

        # mean reconstruction loss ae
        mean_reconstruction_loss = tf.reduce_mean(reconstruction_loss)
        
        # summaries
        scalar("reconstruction_loss", mean_reconstruction_loss, 
            step=global_step)
        scalar("kl_loss", mean_kld_z, step=global_step)
        scalar("penalty", penalty, step=global_step)
        image("images", tf.concat([features["inputs"], reconstruction], axis=2), 
            step=global_step)

        # L2 regularizers
        l2_regularizer_ae = tf.add_n([tf.nn.l2_loss(v) for v in 
            model.trainable_variables if 'bias' not in v.name])
        
        # total loss AE
        ae_loss = (mean_reconstruction_loss + mean_kld_z + 
            config.penalty_weight*penalty + 
            config.ae_l2_penalty_weight*l2_regularizer_ae)
        
    grads = tape_ae.gradient(ae_loss, model.trainable_variables)
    optimizer_ae.apply_gradients(zip(grads, model.trainable_variables))

    global_step.assign_add(1)
コード例 #4
0
ファイル: summaries.py プロジェクト: vwegmayr/tractography
 def _scatter(self, epoch, logs={}):
     x = self.scatter_x_model.predict_generator(self.eval_seq)
     y = self.scatter_y_model.predict_generator(self.eval_seq)
     # =====================================================================
     writer = self._get_writer(self._train_run_name)
     with context.eager_mode(), writer.as_default(), \
         summary_ops_v2.always_record_summaries():
         # ------------------------------------------------------------------
         for i, name in self.scatter:
             fig, ax = plt.subplots()
             ax.hist2d(x[i],
                       y[i],
                       bins=50,
                       density=True,
                       norm=colors.SymLogNorm(linthresh=0.01,
                                              linscale=2,
                                              vmin=-1.0,
                                              vmax=2.0))
             ax.set_xlabel(name[0])
             ax.set_ylabel(name[1])
             fig.canvas.draw()
             # ------------------------------------------------------------------
             plot = np.array(fig.canvas.renderer.buffer_rgba())
             plot = np.expand_dims(plot, 0)
             summary_ops_v2.image("2DHistogram", plot, step=step)
             # ------------------------------------------------------------------
             plt.close()
             writer.flush()
コード例 #5
0
ファイル: callbacks.py プロジェクト: MaximLippeveld/DEC-DA
 def make_summary(self, step, tensor, type):
     with context.eager_mode(), self.writer.as_default(
     ), summary_ops_v2.always_record_summaries():
         for i in range(self.n_channels):
             summary_ops_v2.image("%s image dim %d" % (type, i),
                                  tensor[:, :, :, i, tf.newaxis],
                                  max_images=3,
                                  step=step)
コード例 #6
0
ファイル: callbacks.py プロジェクト: TUMFTM/RadarSeg
    def _log_confusion_matrix(self, logs, prefix, step):
        """
        Logs the confusion matrix as image.

        Arguments:
            logs: Training log dictionary with metric nams as keys, <dict>.
            prefix: The prefix to apply to the summary names, <str>.
            step: The global step to use for TensorBoard, <int>.
        """
        if logs is None:
            logs = {}

        # Group metrics by the name of their associated file writer. Values
        # are lists of metrics, as (name, scalar_value) pairs.
        logs_by_writer = {
            self._train_run_name: [],
            self._validation_run_name: [],
        }

        # Get confusion matrix values
        for (name, value) in logs.items():
            if name.endswith('confusion_matrix'):
                # Assign writer
                if name.startswith(self._validation_prefix):
                    name = name[len(self._validation_prefix):]
                    writer_name = self._validation_run_name
                else:
                    writer_name = self._train_run_name

                # Add prefix
                name = prefix + name

                # Plot confusion matrix and decode figure
                value = tf.identity(value)
                value = plot_confusion_matrix(value,
                                              class_names=self.class_names,
                                              norm=True)
                value = decode_figure(value)

                # Add to writer list
                logs_by_writer[writer_name].append((name, value))

        # Iterate over writers (train, val)
        with context.eager_mode():
            with summary_ops_v2.always_record_summaries():
                for writer_name in logs_by_writer:
                    these_logs = logs_by_writer[writer_name]
                    if not these_logs:
                        # Skip if empts (no validation metric)
                        continue

                    # Write logs
                    writer = self._get_writer(writer_name)
                    with writer.as_default():
                        for (name, value) in these_logs:
                            summary_ops_v2.image(name, value, step=step)
コード例 #7
0
 def testEagerMemory(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
コード例 #8
0
 def testEagerMemory(self):
   training_util.get_or_create_global_step()
   logdir = self.get_temp_dir()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
コード例 #9
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
     # The working condition of the ops is tested in the C++ test so we just
     # test here that we're calling them correctly.
     self.assertTrue(gfile.Exists(logdir))
コード例 #10
0
 def testSummaryOps(self):
   training_util.get_or_create_global_step()
   logdir = tempfile.mkdtemp()
   with summary_ops.create_file_writer(
       logdir, max_queue=0,
       name='t0').as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, '')
     summary_ops.scalar('scalar', 2.0)
     summary_ops.histogram('histogram', [1.0])
     summary_ops.image('image', [[[[1.0]]]])
     summary_ops.audio('audio', [[1.0]], 1.0, 1)
     # The working condition of the ops is tested in the C++ test so we just
     # test here that we're calling them correctly.
     self.assertTrue(gfile.Exists(logdir))
コード例 #11
0
 def testSummaryOps(self):
   logdir = self.get_temp_dir()
   writer = summary_ops.create_file_writer(logdir, max_queue=0)
   with writer.as_default(), summary_ops.always_record_summaries():
     summary_ops.generic('tensor', 1, step=1)
     summary_ops.scalar('scalar', 2.0, step=1)
     summary_ops.histogram('histogram', [1.0], step=1)
     summary_ops.image('image', [[[[1.0]]]], step=1)
     summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
   with self.cached_session() as sess:
     sess.run(summary_ops.summary_writer_initializer_op())
     sess.run(summary_ops.all_summary_ops())
   # The working condition of the ops is tested in the C++ test so we just
   # test here that we're calling them correctly.
   self.assertTrue(gfile.Exists(logdir))
コード例 #12
0
 def testSummaryOps(self):
     logdir = self.get_temp_dir()
     writer = summary_ops.create_file_writer(logdir, max_queue=0)
     with writer.as_default(), summary_ops.always_record_summaries():
         summary_ops.generic('tensor', 1, step=1)
         summary_ops.scalar('scalar', 2.0, step=1)
         summary_ops.histogram('histogram', [1.0], step=1)
         summary_ops.image('image', [[[[1.0]]]], step=1)
         summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
     with self.cached_session() as sess:
         sess.run(summary_ops.summary_writer_initializer_op())
         sess.run(summary_ops.all_summary_ops())
     # The working condition of the ops is tested in the C++ test so we just
     # test here that we're calling them correctly.
     self.assertTrue(gfile.Exists(logdir))
コード例 #13
0
    def construct(self, args):
        with self.session.graph.as_default():
            # Inputs
            self.images = tf.placeholder(tf.float32,
                                         [None, MNIST.H, MNIST.W, MNIST.C],
                                         name="images")
            self.labels = tf.placeholder(tf.int64, [None], name="labels")

            # Computation
            hidden = tf.keras.layers.Flatten()(self.images)
            # TODO: Add `args.layers` number of hidden layers with size `args.hidden_layer`,
            # using activation from `args.activation`, allowing "none", "relu", "tanh", "sigmoid".
            # Store the results back to `hidden` variable.
            output_layer = tf.keras.layers.Dense(MNIST.LABELS)(hidden)
            self.predictions = tf.argmax(output_layer, axis=1)

            # Training
            loss = tf.keras.losses.sparse_categorical_crossentropy(
                self.labels, output_layer, from_logits=True)
            global_step = tf.train.create_global_step()
            self.training = tf.train.AdamOptimizer().minimize(
                loss, global_step=global_step, name="training")

            # Summaries
            accuracy = tf.math.reduce_mean(
                tf.cast(tf.equal(self.labels, self.predictions), tf.float32))
            confusion_matrix = tf.reshape(
                tf.confusion_matrix(self.labels,
                                    self.predictions,
                                    weights=tf.not_equal(
                                        self.labels, self.predictions),
                                    dtype=tf.float32),
                [1, MNIST.LABELS, MNIST.LABELS, 1])

            summary_writer = tf_summary.create_file_writer(args.logdir,
                                                           flush_millis=10 *
                                                           1000)
            self.summaries = {}
            with summary_writer.as_default(
            ), tf_summary.record_summaries_every_n_global_steps(100):
                self.summaries["train"] = [
                    tf_summary.scalar("train/loss", loss),
                    tf_summary.scalar("train/accuracy", accuracy)
                ]
            with summary_writer.as_default(
            ), tf_summary.always_record_summaries():
                for dataset in ["dev", "test"]:
                    self.summaries[dataset] = [
                        tf_summary.scalar(dataset + "/accuracy", accuracy),
                        tf_summary.image(dataset + "/confusion_matrix",
                                         confusion_matrix)
                    ]
                    with tf.control_dependencies(self.summaries[dataset]):
                        self.summaries[dataset].append(summary_writer.flush())

            # Initialize variables
            self.session.run(tf.global_variables_initializer())
            with summary_writer.as_default():
                tf_summary.initialize(session=self.session,
                                      graph=self.session.graph)
コード例 #14
0
 def define_ops():
   result = []
   # TF 2.0 summary ops
   result.append(summary_ops.write('write', 1, step=0))
   result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
   # TF 1.x tf.contrib.summary ops
   result.append(summary_ops.generic('tensor', 1, step=1))
   result.append(summary_ops.scalar('scalar', 2.0, step=1))
   result.append(summary_ops.histogram('histogram', [1.0], step=1))
   result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
   result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
   return result
コード例 #15
0
 def define_ops():
   result = []
   # TF 2.0 summary ops
   result.append(summary_ops.write('write', 1, step=0))
   result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
   # TF 1.x tf.contrib.summary ops
   result.append(summary_ops.generic('tensor', 1, step=1))
   result.append(summary_ops.scalar('scalar', 2.0, step=1))
   result.append(summary_ops.histogram('histogram', [1.0], step=1))
   result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
   result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
   return result
コード例 #16
0
    def write_ll_hist(self, epoch, probs_res):
        plt.subplot(2, 1, 1)
        for dataset in self.datasets:
            sns.distplot(probs_res['orig_probs'][dataset], label=dataset)
        plt.title('Log Likelihood')
        plt.legend()
        plt.subplot(2, 1, 2)
        for dataset in self.datasets:
            sns.distplot(probs_res['corr_probs'][dataset], label=dataset)
        plt.title('Corrected Log Likelihood')
        plt.legend()

        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        hist_img = tf.image.decode_png(buf.getvalue(), channels=4)
        hist_img = tf.expand_dims(hist_img, 0)

        with summary_ops_v2.always_record_summaries():
            with self._val_writer.as_default():
                summary_ops_v2.image('ll_hist', hist_img, step=epoch)
コード例 #17
0
    def _log_weight_as_image(self, weight, weight_name, epoch):
        """Logs a weight as a TensorBoard image."""
        w_img = array_ops.squeeze(weight)
        shape = K.int_shape(w_img)
        if len(shape) == 1:  # Bias case
            w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
        elif len(shape) == 2:  # Dense layer kernel case
            if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
            w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
        elif len(shape) == 3:  # ConvNet case
            if K.image_data_format() == 'channels_last':
                # Switch to channels_first to display every kernel as a separate
                # image.
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
            w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])

        shape = K.int_shape(w_img)
        # Not possible to handle 3D convnets etc.
        if len(shape) == 4 and shape[-1] in [1, 3, 4]:
            summary_ops_v2.image(weight_name, w_img, step=epoch)
コード例 #18
0
ファイル: summaries.py プロジェクト: vwegmayr/tractography
    def _scatter(self, step, logs):
        kappa_pred = self.scatter_x_model.predict_generator(self.eval_seq)
        mu_pred = self.scatter_y_model.predict_generator(self.eval_seq)

        mu_true = self.eval_seq.outgoing

        agreement = np.sum(mu_true * mu_pred, axis=1)
        kappa_mean = kappa_pred.mean() + 10**-9
        kappa_pred /= kappa_mean

        # =====================================================================
        writer = self._get_writer(self._train_run_name)
        with context.eager_mode(), writer.as_default(), \
            summary_ops_v2.always_record_summaries():
            # ------------------------------------------------------------------
            fig, ax = plt.subplots()
            ax.hist2d(kappa_pred,
                      agreement,
                      bins=50,
                      density=True,
                      norm=colors.SymLogNorm(linthresh=0.01,
                                             linscale=2,
                                             vmin=-1.0,
                                             vmax=2.0))
            ax.plot([0, 1 / (logs["T"] + 10**-9) / kappa_mean], [0, 1],
                    color="red")
            ax.set_xlabel("Certainty k")
            ax.set_ylabel("Agreement <m,v>")
            fig.canvas.draw()
            # ------------------------------------------------------------------
            plot = np.array(fig.canvas.renderer.buffer_rgba())
            plot = np.expand_dims(plot, 0)
            summary_ops_v2.image("2DHistogram", plot, step=step)
            # ------------------------------------------------------------------
            plt.close()
            writer.flush()
コード例 #19
0
def eval_one_epoch(model, linear_model, test_features, summary_directory, 
    global_step, config, epoch, training):
    metr_reconstruction_loss = tf.metrics.Mean("reconstruction_loss")
    metr_auto_reconstruction_loss = tf.metrics.Mean("auto_reconstruction_loss")
    metr_exp_reconstruction_loss = tf.metrics.Mean("exp_reconstruction_loss")
    metr_kl_loss = tf.metrics.Mean("kl_loss")
    metr_linear_loss = tf.metrics.Mean("linear_loss")
    metr_pen = tf.metrics.Mean("penalty")
    reconstruction_losses = []
    linear_losses = []
    images = []
    images2 = []

    
    for _features in test_features:
        # get predictions and latents
        linear_predictions = get_linear_predictions(linear_model, _features)
        anno = get_annotations(_features)
        latents = get_latents(config, model, _features, training)
        (reconstruction_loss, mean_kld_z, reconstruction, 
            penalty) = _loss_fn_ae(model, linear_predictions, 
                _features["inputs"], anno, config, training)

        # mean encoding 
        out_det = model.autoencode(_features["inputs"], training=training)["output"]
        autoencode_rec_loss = tf.reduce_sum(tf.square(
            out_det - _features["inputs"]), axis=[1, 2, 3])

        squared_error = _loss_fn_lm(latents, linear_model, anno)       

        reconstruction_losses.append(reconstruction_loss.numpy())
        linear_losses.append(squared_error.numpy())
        
        # update mean-metric
        metr_auto_reconstruction_loss(autoencode_rec_loss)
        metr_reconstruction_loss(reconstruction_loss)
        metr_linear_loss(squared_error)
        metr_kl_loss(mean_kld_z)
        metr_pen(penalty)

        se_x_xhatexp = get_sq_er_x_xhatexp(config, model, linear_model, 
            _features, training) 
        exp_reconstruction = get_xhatexp(config, model, linear_model, 
            _features, training) 
        metr_exp_reconstruction_loss(se_x_xhatexp)

        # append input images, and only keep 4 - otherwise we'd get OOM
        images.append(tf.concat([_features["inputs"][0:1, :, :, :], 
            reconstruction[0:1, :, :, :],
            exp_reconstruction[0:1, :, :, :]], axis=2))

        # append input images, and only keep 4 - otherwise we'd get OOM
        images2.append(tf.concat([_features["inputs"][0:1, :, :, :], 
            reconstruction[0:1, :, :, :]], axis=2))
        
        shuffle(images)
        images = images[-4:]

        shuffle(images2)
        images2 = images2[-4:]

    writer = tf.summary.create_file_writer(summary_directory)
    with writer.as_default(), tf.summary.record_if(True):

        scalar("reconstruction_loss", metr_reconstruction_loss.result(), 
            step=global_step)
        scalar("exp_reconstruction_loss", metr_exp_reconstruction_loss.result(), 
            step=global_step)
        scalar("auto_reconstruction_loss", metr_auto_reconstruction_loss.result(), 
            step=global_step)
        scalar("linear_loss", metr_linear_loss.result(), step=global_step)
        scalar("kl_loss", metr_kl_loss.result(), step=global_step)
        scalar("penalty", metr_pen.result(), step=global_step)
        histogram("distribution_reconstruction_loss", 
            np.concatenate(reconstruction_losses, axis=0).flatten(), 
            step=global_step)
        histogram("distribution_linear_loss", 
            np.concatenate(linear_losses, axis=0).flatten(), step=global_step)
        image("images", tf.concat(images, axis=0), step=global_step)
        image("images2", tf.concat(images2, axis=0), step=global_step)


    out_dict = {"reconstruction_loss": metr_reconstruction_loss.result(),
        "exp_reconstruction_loss": metr_exp_reconstruction_loss.result(),
        "auto_reconstruction_loss": metr_auto_reconstruction_loss.result(),
        "kl_loss": metr_kl_loss.result(),
        "linear_loss": metr_linear_loss.result(),
        "penalty": metr_pen.result()
    }

    return out_dict
コード例 #20
0
 def call(self, x):
     summary_ops_v2.image('custom_image_summary', x)
     return x
コード例 #21
0
ファイル: summaries.py プロジェクト: vwegmayr/tractography
 def _log_activations(self, step):
     writer = self._get_writer(self._train_run_name)
     with context.eager_mode(), writer.as_default(), \
         summary_ops_v2.always_record_summaries():
         # ==================================================================
         activation_values = self.activation_model.predict_generator(
             self.eval_seq)
         terminal = (self.eval_seq.isterminal == 1)
         midway = np.logical_not(terminal)
         # ==================================================================
         kappa = activation_values[0]
         summary_ops_v2.histogram("kappa_midway", kappa[midway], step=step)
         summary_ops_v2.scalar("kappa_midway_mean",
                               np.mean(kappa[midway]),
                               step=step)
         # ------------------------------------------------------------------
         summary_ops_v2.histogram("kappa_terminal",
                                  kappa[terminal],
                                  step=step)
         summary_ops_v2.scalar("kappa_terminal_mean",
                               np.mean(kappa[terminal]),
                               step=step)
         # ==================================================================
         mu = activation_values[1]
         neg_dot_prod_midway = -np.sum(
             mu[midway] * self.eval_seq.outgoing[midway], axis=1)
         summary_ops_v2.histogram("neg_dot_prod_midway",
                                  neg_dot_prod_midway,
                                  step=step)
         summary_ops_v2.scalar("neg_dot_prod_midway_mean",
                               np.mean(neg_dot_prod_midway),
                               step=step)
         # ------------------------------------------------------------------
         neg_dot_prod_terminal = -np.sum(
             mu[terminal] * self.eval_seq.outgoing[terminal], axis=1)
         summary_ops_v2.histogram("neg_dot_prod_terminal",
                                  neg_dot_prod_terminal,
                                  step=step)
         summary_ops_v2.scalar("neg_dot_prod_terminal_mean",
                               np.mean(neg_dot_prod_terminal),
                               step=step)
         # ==================================================================
         isterminal = activation_values[2]
         ave_prec = average_precision_score(self.eval_seq.isterminal,
                                            isterminal)
         summary_ops_v2.scalar("average_precision", ave_prec, step=step)
         # ------------------------------------------------------------------
         precision, recall, thresh = precision_recall_curve(
             y_true=self.eval_seq.isterminal,
             probas_pred=np.round(isterminal / 0.05) * 0.05)
         # ------------------------------------------------------------------
         fig, ax = plt.subplots()
         ax.plot(recall, precision, "-o")
         frac = np.mean(self.eval_seq.isterminal)
         ax.plot([0, 1], [frac, frac])
         ax.set_xlabel("Recall")
         ax.set_ylabel("Precision")
         fig.canvas.draw()
         # ------------------------------------------------------------------
         plot = np.array(fig.canvas.renderer.buffer_rgba())
         plot = np.expand_dims(plot, 0)
         summary_ops_v2.image("Precision-Recall", plot, step=step)
         # ==================================================================
         plt.close()
         writer.flush()