예제 #1
0
def import_to_tensorboard(model_dir, log_dir):
    """View an imported protobuf model (`.pb` file) as a graph in Tensorboard.

  Args:
    model_dir: The location of the protobuf (`pb`) model to visualize
    log_dir: The location for the Tensorboard log to begin visualization from.

  Usage:
    Call this function with your model location and desired log directory.
    Launch Tensorboard by pointing it to the log directory.
    View your imported `.pb` model as a graph.
  """
    with session.Session(graph=ops.Graph()) as sess:
        with gfile.FastGFile(model_dir, "rb") as f:
            graph_def = graph_pb2.GraphDef()
            graph_def.ParseFromString(f.read())
            importer.import_graph_def(graph_def)

        pb_visual_writer = summary.FileWriter(log_dir)
        pb_visual_writer.add_graph(sess.graph)
        print("Model Imported. Visualize by running: "
              "tensorboard --logdir={}".format(log_dir))
예제 #2
0
def import_to_tensorboard(model_dir, log_dir, tag_set):
    """View an imported protobuf model (`.pb` file) as a graph in Tensorboard.
  Args:
    model_dir: The location of the protobuf (`pb`) model to visualize
    log_dir: The location for the Tensorboard log to begin visualization from.
    tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
        separated by ','. For tag-set contains multiple tags, all tags must be
        passed in.
  Usage:
    Call this function with your model location and desired log directory.
    Launch Tensorboard by pointing it to the log directory.
    View your imported `.pb` model as a graph.
  """
    with session.Session(graph=ops.Graph()) as sess:
        input_graph_def = saved_model_utils.get_meta_graph_def(
            model_dir, tag_set).graph_def
        importer.import_graph_def(input_graph_def)

        pb_visual_writer = summary.FileWriter(log_dir)
        pb_visual_writer.add_graph(sess.graph)
        print("Model Imported. Visualize by running: "
              "tensorboard --logdir={}".format(log_dir))
def import_to_tensorboard(saved_model, output_dir):
    """View an imported saved_model.pb as a graph in Tensorboard.

  Args:
    saved_model: The location of the saved_model.pb to visualize.
    output_dir: The location for the Tensorboard log to begin visualization from.

  Usage:
    Call this function with your model location and desired log directory.
    Launch Tensorboard by pointing it to the log directory.
    View your imported `.pb` model as a graph.
  """
    with open(saved_model, "rb") as f:
        sm = saved_model_pb2.SavedModel()
        sm.ParseFromString(f.read())
        if 1 != len(sm.meta_graphs):
            print('More than one graph found. Not sure which to write')
            sys.exit(1)
        graph_def = sm.meta_graphs[0].graph_def

        pb_visual_writer = summary.FileWriter(output_dir)
        pb_visual_writer.add_graph(None, graph_def=graph_def)
        print("Model Imported. Visualize by running: "
              "tensorboard --logdir={}".format(output_dir))
    def __init__(self, test_set, log_dir='./logs', embeddings_freq=10):
        """
        Args:
            test_set: The test data
            log_dir: Path to directory used for logging
            embeddings_freq: Defines how often embedding variables will be saved to
                the log directory. If set to 1, this is done every epoch, if it is set to 10 every 10th epoch and so forth.
        """
        super().__init__()

        self.sess = K.get_session()

        self.log_dir = log_dir
        self.embeddings_freq = embeddings_freq

        self.writer = tf_summary.FileWriter(self.log_dir)
        self.saver: tf.train.Saver

        self.embeddings_ckpt_path = os.path.join(self.log_dir, 'keras_embedding.ckpt')

        self.test_set = test_set

        # Save metadata.
        np.savetxt(f'{log_dir}/metadata.tsv', self.test_set.labels, fmt='%i')
  def generate_run(self, run_name):
    if run_name == self._RUN_WITH_HISTOGRAM:
      (use_histogram, use_scalars) = (True, False)
    elif run_name == self._RUN_WITH_SCALARS:
      (use_histogram, use_scalars) = (False, True)
    else:
      assert False, 'Invalid run name: %r' % run_name
    ops.reset_default_graph()
    sess = session.Session()
    placeholder = array_ops.placeholder(dtypes.float32, shape=[3])
    if use_histogram:
      summary.histogram(self._HISTOGRAM_TAG, placeholder)
    if use_scalars:
      summary.scalar(self._SCALAR_TAG, math_ops.reduce_mean(placeholder))
    summ = summary.merge_all()

    subdir = os.path.join(self.logdir, run_name)
    writer = summary.FileWriter(subdir)
    writer.add_graph(sess.graph)
    for step in xrange(self._STEPS):
      feed_dict = {placeholder: [1 + step, 2 + step, 3 + step]}
      s = sess.run(summ, feed_dict=feed_dict)
      writer.add_summary(s, global_step=step)
    writer.close()
    def testTFSummaryImage(self):
        """Verify processing of summary_lib.image."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = summary_lib.FileWriter(self.get_temp_dir())
        writer.event_writer = event_sink
        with self.test_session() as sess:
            ipt = array_ops.ones([10, 4, 4, 3], dtypes.uint8)
            # This is an interesting example, because the old tf.image_summary op
            # would throw an error here, because it would be tag reuse.
            # Using the tf node name instead allows argument re-use to the image
            # summary.
            with ops.name_scope('1'):
                summary_lib.image('images', ipt, max_outputs=1)
            with ops.name_scope('2'):
                summary_lib.image('images', ipt, max_outputs=2)
            with ops.name_scope('3'):
                summary_lib.image('images', ipt, max_outputs=3)
            merged = summary_lib.merge_all()
            writer.add_graph(sess.graph)
            for i in xrange(10):
                summ = sess.run(merged)
                writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        tags = [
            u'1/images/image', u'2/images/image/0', u'2/images/image/1',
            u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
        ]

        self.assertTagsEqual(accumulator.Tags(), {
            ea.IMAGES: tags,
            ea.GRAPH: True,
            ea.META_GRAPH: False,
        })
예제 #7
0
  def __init__(self,
               graph=None,
               ready_op=USE_DEFAULT,
               ready_for_local_init_op=USE_DEFAULT,
               is_chief=True,
               init_op=USE_DEFAULT,
               init_feed_dict=None,
               local_init_op=USE_DEFAULT,
               logdir=None,
               summary_op=USE_DEFAULT,
               saver=USE_DEFAULT,
               global_step=USE_DEFAULT,
               save_summaries_secs=120,
               save_model_secs=600,
               recovery_wait_secs=30,
               stop_grace_secs=120,
               checkpoint_basename="model.ckpt",
               session_manager=None,
               summary_writer=USE_DEFAULT,
               init_fn=None):
    """Create a `Supervisor`.

    Args:
      graph: A `Graph`.  The graph that the model will use.  Defaults to the
        default `Graph`.  The supervisor may add operations to the graph before
        creating a session, but the graph should not be modified by the caller
        after passing it to the supervisor.
      ready_op: 1-D string `Tensor`.  This tensor is evaluated by supervisors in
        `prepare_or_wait_for_session()` to check if the model is ready to use.
        The model is considered ready if it returns an empty array.  Defaults to
        the tensor returned from `tf.report_uninitialized_variables()`  If
        `None`, the model is not checked for readiness.
      ready_for_local_init_op: 1-D string `Tensor`.  This tensor is evaluated by
        supervisors in `prepare_or_wait_for_session()` to check if the model is
        ready to run the local_init_op.
        The model is considered ready if it returns an empty array.  Defaults to
        the tensor returned from
        `tf.report_uninitialized_variables(tf.global_variables())`. If `None`,
        the model is not checked for readiness before running local_init_op.
      is_chief: If True, create a chief supervisor in charge of initializing
        and restoring the model.  If False, create a supervisor that relies
        on a chief supervisor for inits and restore.
      init_op: `Operation`.  Used by chief supervisors to initialize the model
        when it can not be recovered.  Defaults to an `Operation` that
        initializes all variables.  If `None`, no initialization is done
        automatically unless you pass a value for `init_fn`, see below.
      init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
        This feed dictionary will be used when `init_op` is evaluated.
      local_init_op: `Operation`. Used by all supervisors to run initializations
        that should run for every new supervisor instance. By default these
        are table initializers and initializers for local variables.
        If `None`, no further per supervisor-instance initialization is
        done automatically.
      logdir: A string.  Optional path to a directory where to checkpoint the
        model and log events for the visualizer.  Used by chief supervisors.
        The directory will be created if it does not exist.
      summary_op: An `Operation` that returns a Summary for the event logs.
        Used by chief supervisors if a `logdir` was specified.  Defaults to the
        operation returned from summary.merge_all().  If `None`, summaries are
        not computed automatically.
      saver: A Saver object.  Used by chief supervisors if a `logdir` was
        specified.  Defaults to the saved returned by Saver().
        If `None`, the model is not saved automatically.
      global_step: An integer Tensor of size 1 that counts steps.  The value
        from 'global_step' is used in summaries and checkpoint filenames.
        Default to the op named 'global_step' in the graph if it exists, is of
        rank 1, size 1, and of type tf.int32 or tf.int64.  If `None` the global
        step is not recorded in summaries and checkpoint files.  Used by chief
        supervisors if a `logdir` was specified.
      save_summaries_secs: Number of seconds between the computation of
        summaries for the event log.  Defaults to 120 seconds.  Pass 0 to
        disable summaries.
      save_model_secs: Number of seconds between the creation of model
        checkpoints.  Defaults to 600 seconds.  Pass 0 to disable checkpoints.
      recovery_wait_secs: Number of seconds between checks that the model
        is ready.  Used by supervisors when waiting for a chief supervisor
        to initialize or restore the model.  Defaults to 30 seconds.
      stop_grace_secs: Grace period, in seconds, given to running threads to
        stop when `stop()` is called.  Defaults to 120 seconds.
      checkpoint_basename: The basename for checkpoint saving.
      session_manager: `SessionManager`, which manages Session creation and
        recovery. If it is `None`, a default `SessionManager` will be created
        with the set of arguments passed in for backwards compatibility.
      summary_writer: `SummaryWriter` to use or `USE_DEFAULT`.  Can be `None`
        to indicate that no summaries should be written.
      init_fn: Optional callable used to initialize the model. Called
        after the optional `init_op` is called.  The callable must accept one
        argument, the session being initialized.

    Returns:
      A `Supervisor`.
    """
    # Set default values of arguments.
    if graph is None:
      graph = ops.get_default_graph()
    with graph.as_default():
      self._init_ready_op(
          ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
      self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
      self._init_local_init_op(local_init_op=local_init_op)
      self._init_saver(saver=saver)
      self._init_summary_op(summary_op=summary_op)
      self._init_global_step(global_step=global_step)
    self._graph = graph
    self._meta_graph_def = meta_graph.create_meta_graph_def(
        graph_def=graph.as_graph_def(add_shapes=True),
        saver_def=self._saver.saver_def if self._saver else None)
    self._is_chief = is_chief
    self._coord = coordinator.Coordinator()
    self._recovery_wait_secs = recovery_wait_secs
    self._stop_grace_secs = stop_grace_secs
    self._init_fn = init_fn

    # Set all attributes related to checkpointing and writing events to None.
    # Afterwards, set them appropriately for chief supervisors, as these are
    # the only supervisors that can write checkpoints and events.
    self._logdir = None
    self._save_summaries_secs = None
    self._save_model_secs = None
    self._save_path = None
    self._summary_writer = None

    if self._is_chief:
      self._logdir = logdir
      self._save_summaries_secs = save_summaries_secs
      self._save_model_secs = save_model_secs
      if self._logdir:
        self._save_path = os.path.join(self._logdir, checkpoint_basename)
      if summary_writer is Supervisor.USE_DEFAULT:
        if self._logdir:
          self._summary_writer = _summary.FileWriter(self._logdir)
      else:
        self._summary_writer = summary_writer
      self._graph_added_to_summary = False

    self._init_session_manager(session_manager=session_manager)
    self._verify_setup()
    # The graph is not allowed to change anymore.
    graph.finalize()
예제 #8
0
    def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    mapped_weight_name = weight.name.replace(':', '_')
                    tf_summary.histogram(mapped_weight_name, weight)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = K.int_shape(w_img)
                        if len(shape) == 2:  # dense layer kernel case
                            if shape[0] > shape[1]:
                                w_img = array_ops.transpose(w_img)
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [1, shape[0], shape[1], 1])
                        elif len(shape) == 3:  # convnet case
                            if K.image_data_format() == 'channels_last':
                                # switch to channels_first to display
                                # every kernel as a separate image
                                w_img = array_ops.transpose(w_img,
                                                            perm=[2, 0, 1])
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [shape[0], shape[1], shape[2], 1])
                        elif len(shape) == 1:  # bias case
                            w_img = array_ops.reshape(w_img,
                                                      [1, shape[0], 1, 1])
                        else:
                            # not possible to handle 3D convnets etc.
                            continue

                        shape = K.int_shape(w_img)
                        assert len(shape) == 4 and shape[-1] in [1, 3, 4]
                        tf_summary.image(mapped_weight_name, w_img)

                if self.write_grads:
                    for weight in layer.trainable_weights:
                        mapped_weight_name = weight.name.replace(':', '_')
                        grads = model.optimizer.get_gradients(
                            model.total_loss, weight)

                        def is_indexed_slices(grad):
                            return type(grad).__name__ == 'IndexedSlices'

                        grads = [
                            grad.values if is_indexed_slices(grad) else grad
                            for grad in grads
                        ]
                        tf_summary.histogram(
                            '{}_grad'.format(mapped_weight_name), grads)

                if hasattr(layer, 'output'):
                    tf_summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
        else:
            self.writer = tf_summary.FileWriter(self.log_dir)
 def set_estimator(self, estimator):
   super(SummarySaver, self).set_estimator(estimator)
   # TODO(mdan): This line looks redundant.
   if self._summary_writer is None:
     self._summary_writer = core_summary.FileWriter(estimator.model_dir)
예제 #10
0
def show_pb_in_tensorboard(graph_def, port=6006):
    """Show pb_file in tensorboard"""
    _ = importer.import_graph_def(graph_def, name="")
    summary_write = summary.FileWriter("./logdir/", graph)
    os.system('tensorboard --logdir ./logdir/ --port 6006')
예제 #11
0
def graph_visualize(sess, log_dir='/tmp/'):
    pb_visual_writer = summary.FileWriter(log_dir)
    pb_visual_writer.add_graph(sess.graph)
    print("Model Imported. Visualize by running the bash command: "
          "tensorboard --logdir={}".format(log_dir))
예제 #12
0
import argparse
import sys
import tensorflow as tf

from tensorflow.python.platform import app
from tensorflow.python.summary import summary

with tf.Session(graph=tf.Graph()) as sess:
    tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING],
                               './')

    pb_visual_writer = summary.FileWriter('/tmp/tensorflow_logdir')
    pb_visual_writer.add_graph(sess.graph)
    print("Model Imported. Visualize by running: "
          "tensorboard --logdir={}".format('/tmp/tensorflow_logdir'))
예제 #13
0
# Requires
# Python, Tensorflow (2.0+)
#
# Can be installed with:
# pip install tensorflow
#
# Description:
# Reads a protobuf file and exports the graph to `output_directory` so it can
# be visualized in tensorboard.

import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.python.summary import summary

output_directory = ''
graph_def_pb_file = "<file>.pb"

with tf.io.gfile.GFile(graph_def_pb_file, "rb") as f:
    graph_def = graph_pb2.GraphDef()
    graph_def.ParseFromString(f.read())


with tf.Graph().as_default() as graph:
    tf.import_graph_def(graph_def, name="")

    pb_visual_writer = summary.FileWriter(output_directory)
    pb_visual_writer.add_graph(graph)
import numpy as np
ROOT_DIR = os.path.abspath('../')
sys.path.append(ROOT_DIR)
MODEL_DIR = os.path.join(ROOT_DIR, 'models')

# Gather all Model Names in models/
for root, dirs, files in os.walk(MODEL_DIR):
    if root.count(os.sep) - MODEL_DIR.count(os.sep) == 0:
        for idx, model in enumerate(dirs):
            models = []
            models.append(dirs)
            models = np.squeeze(models)

# Create Tensorboard readable tfevent files in models/{}/log
for model in models:
    print("> creating tfevent of model: {}".format(model))
    MODEL_NAME = model
    MODEL_PATH = ROOT_DIR + '/models/{}/frozen_inference_graph.pb'.format(
        MODEL_NAME)
    LOG_DIR = ROOT_DIR + '/models/{}/log/'.format(MODEL_NAME)

    with session.Session(graph=ops.Graph()) as sess:
        with gfile.FastGFile(MODEL_PATH, "rb") as f:
            graph_def = graph_pb2.GraphDef()
            graph_def.ParseFromString(f.read())
            importer.import_graph_def(graph_def)
        pb_visual_writer = summary.FileWriter(LOG_DIR)
        pb_visual_writer.add_graph(sess.graph)
    print("> Model Imported. Visualize by running: "
          "tensorboard --logdir={}".format(LOG_DIR))
    def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf_summary.histogram(weight.name, weight)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = array_ops.transpose(w_img)
                        if len(shape) == 1:
                            w_img = array_ops.expand_dims(w_img, 0)
                        w_img = array_ops.expand_dims(
                            array_ops.expand_dims(w_img, 0), -1)
                        tf_summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf_summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
        else:
            self.writer = tf_summary.FileWriter(self.log_dir)

        if self.embeddings_freq:
            self.saver = saver_lib.Saver()

            embeddings_layer_names = self.embeddings_layer_names

            if not embeddings_layer_names:
                embeddings_layer_names = [
                    layer.name for layer in self.model.layers
                    if type(layer).__name__ == 'Embedding'
                ]

            embeddings = {
                layer.name: layer.weights[0]
                for layer in self.model.layers
                if layer.name in embeddings_layer_names
            }

            embeddings_metadata = {}

            if not isinstance(self.embeddings_metadata, str):
                embeddings_metadata = self.embeddings_metadata
            else:
                embeddings_metadata = {
                    layer_name: self.embeddings_metadata
                    for layer_name in embeddings.keys()
                }

            config = projector.ProjectorConfig()
            self.embeddings_logs = []

            for layer_name, tensor in embeddings.items():
                embedding = config.embeddings.add()
                embedding.tensor_name = tensor.name

                self.embeddings_logs.append(
                    os.path.join(self.log_dir, layer_name + '.ckpt'))

                if layer_name in embeddings_metadata:
                    embedding.metadata_path = embeddings_metadata[layer_name]

            projector.visualize_embeddings(self.writer, config)
    def testScalarsRealistically(self):
        """Test accumulator by writing values and then reading them."""
        def FakeScalarSummary(tag, value):
            value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
            summary = summary_pb2.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), 'values_dir')
        if gfile.IsDirectory(directory):
            gfile.DeleteRecursively(directory)
        gfile.MkDir(directory)

        writer = summary_lib.FileWriter(directory, max_queue=100)

        with graph_pb2.Graph().as_default() as graph:
            _ = constant_op.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        writer.add_graph(graph)
        meta_graph_def = saver.export_meta_graph(graph_def=graph.as_graph_def(
            add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        run_metadata = config_pb2.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = 'test device'
        writer.add_run_metadata(run_metadata, 'test run')

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(), {
                ea.SCALARS: ['id', 'sq'],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ['test run'],
            })
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
예제 #17
0
import tensorflow as tf
from tensorflow.python.summary import summary

with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    new_saver = tf.train.import_meta_graph(
        '/home/jan/Downloads/squeezeDet/model.ckpt-87000.meta')
    new_saver.restore(sess, '/home/jan/Downloads/squeezeDet/model.ckpt-87000')

    log_dir = '/home/jan/Downloads/squeezeDet/tensorboard'

    pb_visual_writer = summary.FileWriter(log_dir)
    pb_visual_writer.add_graph(sess.graph)
    print("Model Imported. Visualize by running: "
          "tensorboard --logdir={}".format(log_dir))

    print("done")
예제 #18
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import sys

from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary

model_dir = 'D:/data/glomeruli/20180202_glomeruli_detection_noquant.pb'
log_dir = 'd:/temp/tf'

with session.Session(graph=ops.Graph()) as sess:
    with gfile.FastGFile(model_dir, "rb") as f:
        graph_def = graph_pb2.GraphDef()
        graph_def.ParseFromString(f.read())
        importer.import_graph_def(graph_def)

        # pb_visual_writer = summary.FileWriter(log_dir)
        # pb_visual_writer.add_graph(sess.graph)
        file_writer = summary.FileWriter(log_dir, sess.graph)
        print("Model Imported. Visualize by running: tensorboard --logdir={}".
              format(log_dir))
예제 #19
0
def tensorboard(file, logdir="./log"):
    """ rufe danach in der shell 'tensorboard --log-dir log' auf """
    g = load(file)
    with session.Session(graph=g) as sess:
        w = summary.FileWriter(logdir)
        w.add_graph(sess.graph)
예제 #20
0
  def set_model(self, model):
    self.model = model
    self.sess = K.get_session()
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_grads:
            grads = model.optimizer.get_gradients(model.total_loss, weight)
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
    else:
      self.writer = tf_summary.FileWriter(self.log_dir)

    if self.embeddings_freq:
      embeddings_layer_names = self.embeddings_layer_names

      if not embeddings_layer_names:
        embeddings_layer_names = [
            layer.name for layer in self.model.layers
            if type(layer).__name__ == 'Embedding'
        ]

      embeddings = {
          layer.name: layer.weights[0]
          for layer in self.model.layers if layer.name in embeddings_layer_names
      }

      self.saver = saver_lib.Saver(list(embeddings.values()))

      embeddings_metadata = {}

      if not isinstance(self.embeddings_metadata, str):
        embeddings_metadata = self.embeddings_metadata
      else:
        embeddings_metadata = {
            layer_name: self.embeddings_metadata
            for layer_name in embeddings.keys()
        }

      config = projector.ProjectorConfig()
      self.embeddings_ckpt_path = os.path.join(self.log_dir,
                                               'keras_embedding.ckpt')

      for layer_name, tensor in embeddings.items():
        embedding = config.embeddings.add()
        embedding.tensor_name = tensor.name

        if layer_name in embeddings_metadata:
          embedding.metadata_path = embeddings_metadata[layer_name]

      projector.visualize_embeddings(self.writer, config)
                    type=str,
                    help='Path to output folder')
parser.add_argument('--metaname',
                    default='model.meta',
                    type=str,
                    help='Name of ckpt meta file')
parser.add_argument('--ckptname',
                    default='model-1545',
                    type=str,
                    help='Name of model ckpts')
parser.add_argument('--log_dir',
                    default='output/tensorboard_log/',
                    type=str,
                    help='Name of log directory')

args = parser.parse_args()

mapping = {'normal': 0, 'pneumonia': 1, 'COVID-19': 2}
inv_mapping = {0: 'normal', 1: 'pneumonia', 2: 'COVID-19'}

sess = tf.Session()
tf.get_default_graph()
saver = tf.train.import_meta_graph(
    os.path.join(args.weightspath, args.metaname))
saver.restore(sess, os.path.join(args.weightspath, args.ckptname))

graph = tf.get_default_graph()

writer = summary.FileWriter(args.log_dir)
writer.add_graph(sess.graph)
print("done exporting model for tensorboard")