예제 #1
0
    def _GenerateProjectorTestData(self):
        config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')
        config = projector_config_pb2.ProjectorConfig()
        embedding = config.embeddings.add()
        # Add an embedding by its canonical tensor name.
        embedding.tensor_name = 'var1:0'

        with tf.gfile.GFile(os.path.join(self.log_dir, 'bookmarks.json'),
                            'w') as f:
            f.write('{"a": "b"}')
        embedding.bookmarks_path = 'bookmarks.json'

        config_pbtxt = text_format.MessageToString(config)
        with tf.gfile.GFile(config_path, 'w') as f:
            f.write(config_pbtxt)

        # Write a checkpoint with some dummy variables.
        with tf.Graph().as_default():
            sess = tf.Session()
            checkpoint_path = os.path.join(self.log_dir, 'model')
            tf.get_variable('var1', [1, 2],
                            initializer=tf.constant_initializer(6.0))
            tf.get_variable('var2', [10, 10])
            tf.get_variable('var3', [100, 100])
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
            saver.save(sess, checkpoint_path)
예제 #2
0
    def testGraphFromMetaGraphBecomesAvailable(self):
        """Test accumulator by writing values and then reading them."""

        directory = os.path.join(self.get_temp_dir(),
                                 'metagraph_test_values_dir')
        if tf.gfile.IsDirectory(directory):
            tf.gfile.DeleteRecursively(directory)
        tf.gfile.MkDir(directory)

        writer = tf.summary.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        meta_graph_def = tf.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(acc.Tags(), {
            ea.GRAPH: True,
            ea.META_GRAPH: True,
        })
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
예제 #3
0
    def testEndpointsNoAssets(self):
        g = tf.Graph()

        fw = tf.summary.FileWriter(self.log_dir, graph=g)
        fw.close()

        self._SetupWSGIApp()
        run_json = self._GetJson('/data/plugin/projector/runs')
        self.assertEqual(run_json, [])
예제 #4
0
파일: util.py 프로젝트: ysm1121/tensorboard
 def _lazily_initialize(self):
     """Initialize the graph and session, if this has not yet been done."""
     with self._initialization_lock:
         if self._session:
             return
         graph = tf.Graph()
         with graph.as_default():
             self.initialize_graph()
         # Don't reserve GPU because libpng can't run on GPU.
         config = tf.ConfigProto(device_count={'GPU': 0})
         self._session = tf.Session(graph=graph, config=config)
예제 #5
0
    def _get_writer_fn(self, event_batch):
        key = (event_batch.experiment_name, event_batch.run_name)
        if key in self._writer_fn_cache:
            return self._writer_fn_cache[key]
        with tf.Graph().as_default():
            placeholder = tf.placeholder(shape=[], dtype=tf.string)
            writer = tf.contrib.summary.create_db_writer(
                self._db_path,
                experiment_name=event_batch.experiment_name,
                run_name=event_batch.run_name)
            with writer.as_default():
                # TODO(nickfelt): running import_event() one record at a time is very
                #   slow; we should add an op that accepts a vector of records.
                import_op = tf.contrib.summary.import_event(placeholder)
            session = tf.Session()
            session.run(writer.init())

            def writer_fn(event_proto):
                session.run(import_op, feed_dict={placeholder: event_proto})

        self._writer_fn_cache[key] = writer_fn
        return writer_fn
예제 #6
0
def _make_sprite_image(thumbnails, thumbnail_dim):
    """Constructs a sprite image from thumbnails and returns the png bytes."""
    if len(thumbnails) < 1:
        raise ValueError('The length of "thumbnails" must be >= 1')

    if isinstance(thumbnails, np.ndarray) and thumbnails.ndim != 4:
        raise ValueError('"thumbnails" should be of rank 4, '
                         'but is of rank %d' % thumbnails.ndim)
    if isinstance(thumbnails, list):
        if not isinstance(thumbnails[0],
                          np.ndarray) or thumbnails[0].ndim != 3:
            raise ValueError(
                'Each element of "thumbnails" must be a 3D `ndarray`')
        thumbnails = np.array(thumbnails)

    with tf.Graph().as_default():
        s = tf.Session()
        resized_images = tf.image.resize_images(thumbnails,
                                                thumbnail_dim).eval(session=s)
        images_per_row = int(math.ceil(math.sqrt(len(thumbnails))))
        thumb_height = thumbnail_dim[0]
        thumb_width = thumbnail_dim[1]
        master_height = images_per_row * thumb_height
        master_width = images_per_row * thumb_width
        num_channels = thumbnails.shape[3]
        master = np.zeros([master_height, master_width, num_channels])
        for idx, image in enumerate(resized_images):
            left_idx = idx % images_per_row
            top_idx = int(math.floor(idx / images_per_row))
            left_start = left_idx * thumb_width
            left_end = left_start + thumb_width
            top_start = top_idx * thumb_height
            top_end = top_start + thumb_height
            master[top_start:top_end, left_start:left_end, :] = image

        if USING_TF:
            return tf.image.encode_png(master).eval(session=s)
        else:
            return master.tobytes()
예제 #7
0
    def testScalarsRealistically(self):
        """Test accumulator by writing values and then reading them."""
        def FakeScalarSummary(tag, value):
            value = tf.Summary.Value(tag=tag, simple_value=value)
            summary = tf.Summary(value=[value])
            return summary

        directory = os.path.join(self.get_temp_dir(), 'values_dir')
        if tf.gfile.IsDirectory(directory):
            tf.gfile.DeleteRecursively(directory)
        tf.gfile.MkDir(directory)

        writer = tf.summary.FileWriter(directory, max_queue=100)

        with tf.Graph().as_default() as graph:
            _ = tf.constant([2.0, 1.0])
        # Add a graph to the summary writer.
        writer.add_graph(graph)
        meta_graph_def = tf.train.export_meta_graph(
            graph_def=graph.as_graph_def(add_shapes=True))
        writer.add_meta_graph(meta_graph_def)

        run_metadata = tf.RunMetadata()
        device_stats = run_metadata.step_stats.dev_stats.add()
        device_stats.device = 'test device'
        writer.add_run_metadata(run_metadata, 'test run')

        # Write a bunch of events using the writer.
        for i in xrange(30):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify that we can load those events properly
        acc = ea.EventAccumulator(directory)
        acc.Reload()
        self.assertTagsEqual(
            acc.Tags(), {
                ea.SCALARS: ['id', 'sq'],
                ea.GRAPH: True,
                ea.META_GRAPH: True,
                ea.RUN_METADATA: ['test run'],
            })
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(30, len(id_events))
        self.assertEqual(30, len(sq_events))
        for i in xrange(30):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)

        # Write a few more events to test incremental reloading
        for i in xrange(30, 40):
            summ_id = FakeScalarSummary('id', i)
            summ_sq = FakeScalarSummary('sq', i * i)
            writer.add_summary(summ_id, i * 5)
            writer.add_summary(summ_sq, i * 5)
        writer.flush()

        # Verify we can now see all of the data
        acc.Reload()
        id_events = acc.Scalars('id')
        sq_events = acc.Scalars('sq')
        self.assertEqual(40, len(id_events))
        self.assertEqual(40, len(sq_events))
        for i in xrange(40):
            self.assertEqual(i * 5, id_events[i].step)
            self.assertEqual(i * 5, sq_events[i].step)
            self.assertEqual(i, id_events[i].value)
            self.assertEqual(i * i, sq_events[i].value)
        self.assertProtoEquals(graph.as_graph_def(add_shapes=True),
                               acc.Graph())
        self.assertProtoEquals(meta_graph_def, acc.MetaGraph())