コード例 #1
0
ファイル: summaries.py プロジェクト: huangpu1/3dgan
def summarize_weights_biases():
    with tf.variable_scope('weights'):
        for l in tf.get_collection('weights'):
            tf.summary.histogram(hem.tensor_name(l), l)
            tf.summary.scalar(hem.tensor_name(l) + '/sparsity', tf.nn.zero_fraction(l))
            # montage_summary(l, name=tensor_name(l) + '/montage')
    with tf.variable_scope('biases'):
        for l in tf.get_collection('biases'):
            tf.summary.histogram(hem.tensor_name(l), l)
            tf.summary.scalar(hem.tensor_name(l) + '/sparsity', tf.nn.zero_fraction(l))
コード例 #2
0
ファイル: summaries.py プロジェクト: huangpu1/3dgan
def summarize_activations(scope=None, summarize_montages=True):
    with tf.variable_scope('activations'):
        for l in tf.get_collection('conv_layers', scope=scope):
            layer_name = hem.tensor_name(l)
            l = tf.transpose(l, [0, 2, 3, 1])
            tf.summary.histogram(layer_name, l)
            tf.summary.scalar(layer_name + '/sparsity', tf.nn.zero_fraction(l))
            tf.summary.scalar(layer_name + '/mean', tf.reduce_mean(l))
            if summarize_montages:
                montage(tf.transpose(l[0], [2, 0, 1]), name=layer_name + '/montage')
        for l in tf.get_collection('dense_layers', scope=scope):
            # l = tf.transpose(l, [0, 2, 3, 1])
            layer_name = hem.tensor_name(l)
            tf.summary.histogram(layer_name, l)
            tf.summary.scalar(layer_name + '/sparsity', tf.nn.zero_fraction(l))
            tf.summary.scalar(layer_name + '/mean', tf.reduce_mean(l))
コード例 #3
0
ファイル: pix2pix.py プロジェクト: huangpu1/3dgan
 def activation_summaries():
     layers = tf.get_collection('conv_layers')
     d_layers = [l for l in layers if 'discriminator' in l.name]
     g_layers = [l for l in layers if 'generator' in l.name]
     with tf.variable_scope('discriminator_activations'):
         for l in d_layers:
             layer_name = hem.tensor_name(l)
             layer_name = '/'.join(layer_name.split('/')[0:2])
             l = tf.transpose(l, [0, 2, 3, 1])
             tf.summary.histogram(layer_name, l)
             tf.summary.scalar(layer_name + '/sparsity', tf.nn.zero_fraction(l))
             tf.summary.scalar(layer_name + '/mean', tf.reduce_mean(l))
     with tf.variable_scope('generator_activations'):
         for l in g_layers:
             layer_name = hem.tensor_name(l)
             layer_name = '/'.join(layer_name.split('/')[0:2])
             l = tf.transpose(l, [0, 2, 3, 1])
             tf.summary.histogram(layer_name, l)
             tf.summary.scalar(layer_name + '/sparsity', tf.nn.zero_fraction(l))
             tf.summary.scalar(layer_name + '/mean', tf.reduce_mean(l))
コード例 #4
0
ファイル: summaries.py プロジェクト: huangpu1/3dgan
def summarize_gradients(grads_and_vars, name=None):
    """Adds histogram summaries for input list.

    Args:
      grads_and_vars: List, a list of tuples in form (grad, var).e
      name: String, name to use for var scope.
    """
    with tf.name_scope(name, 'gradients', grads_and_vars):
        for g, v in grads_and_vars:
            n = hem.tensor_name(v) + '/gradient'
            tf.summary.histogram(n, g)
            tf.summary.scalar(n, tf.reduce_mean(g))
コード例 #5
0
ファイル: summaries.py プロジェクト: huangpu1/3dgan
def summarize_layers(scope, layers, montage=False):
    with tf.variable_scope(scope):
        # activations
        for l in layers:
            layer_name = hem.tensor_name(l)
            layer_name = '/'.join(layer_name.split('/')[0:2])
            l = tf.transpose(l, [0, 2, 3, 1])
            tf.summary.histogram(layer_name, l)
            tf.summary.scalar(layer_name + '/sparsity', tf.nn.zero_fraction(l))
            tf.summary.scalar(layer_name + '/mean', tf.reduce_mean(l))
            if montage:
                hem.montage(tf.transpose(l[0], [2, 0, 1]), name=layer_name + '/montage')
コード例 #6
0
ファイル: cgan.py プロジェクト: huangpu1/3dgan
def _summaries(g, x, args):
    """Add specialized summaries to the graph.

    This adds summaries that:
        - Track variability in generator samples given random noise vectors.
        - Track how much of the noise vector is used.
        - Generate examples from the real and learned distributions.

    Args:
        g: Tensor, the generator's output. i.e., the fake images.
        x: Tensor, the real (input) images.
        args: Argparse structure.

    Returns:
        None
    """
    # 1. generate multiple samples using a single image
    with tf.variable_scope(tf.get_variable_scope()):
        gpu_id = 0
        with tf.device(hem.variables_on_cpu(gpu_id)):
            with tf.name_scope('tower_{}'.format(gpu_id)) as scope:
                # print('input shape', x.shape)
                with tf.variable_scope('generator'):
                    # using first image in batch, form new one with just this image
                    x_repeated = tf.stack([x[0]] * args.batch_size)
                    x_rgb, x_depth = _split(x_repeated)
                    # then create a new path for the generator using just this dataset
                    import copy
                    args_copy = copy.copy(args)
                    args_copy.batch_norm = False
                    d = generator(x_rgb, args_copy, reuse=True)
                    # scale to [0, 1]
                    sampler_rgb, sampler_depth = _split(d)
                    sampler_rgb = hem.rescale(sampler_rgb, (-1, 1), (0, 1))
                    sampler_depth = hem.rescale(sampler_depth, (-1, 1), (0, 1))

    with tf.variable_scope('sampler'):
        hem.montage(sampler_rgb[0:args.examples], 8, 8, name='images')
        hem.montage(sampler_depth[0:args.examples], 8, 8, name='depths')

        # and track the variance of the depth predictions
        mean, var = tf.nn.moments(sampler_depth, axes=[0])
        hem.scalars(('predicted_depth_mean', tf.reduce_mean(mean)),
                    ('predicted_depth_var', tf.reduce_mean(var)))

        # and the images (temporary, for calibration/sanity check)
        x_rgb, x_depth = _split(x)
        mean, var = tf.nn.moments(x_depth, axes=[0])
        hem.scalars(('real_depth_mean', tf.reduce_mean(mean)),
                    ('real_depth_var', tf.reduce_mean(var)))

        sampler_rgb = tf.transpose(sampler_rgb, [0, 2, 3, 1])
        sampler_depth = tf.transpose(sampler_depth, [0, 2, 3, 1])

        # mean, var = tf.nn.moments(tf.image.rgb_to_grayscale(sampler_rgb), axes=[0])
        axis = [0, -1]
        mean, var = tf.nn.moments(tf.image.rgb_to_grayscale(sampler_rgb),
                                  axes=[0])
        hem.scalars(('image_mean', tf.reduce_mean(mean)),
                    ('image_var', tf.reduce_mean(var)))

        mean, var = tf.nn.moments(sampler_depth, axes=[0])
        hem.scalars(('depth_mean', tf.reduce_mean(mean)),
                    ('depth_var', tf.reduce_mean(var)))

    # 2. generate summaries for real and fake images
    with tf.variable_scope('examples'):
        hem.histograms(('fake', g), ('real', x))
        # rescale, split, and colorize
        x = hem.rescale(x[0:args.examples], (-1, 1), (0, 1))
        g = hem.rescale(g[0:args.examples], (-1, 1), (0, 1))
        hem.histograms(('fake_rescaled', g), ('real_rescaled', x))

        x_rgb, x_depth = _split(x)
        g_rgb, g_depth = _split(g)
        # note: these are rescaled to (0, 1)
        hem.histograms(('real_depth', x_depth), ('fake_depth', g_depth),
                       ('real_rgb', x_rgb), ('fake_rgb', g_rgb))
        # add montages
        # TODO shouldn't be fixed to 8, but to ceil(sqrt(args.examples))
        hem.montage(x_rgb, 8, 8, name='real/images')
        hem.montage(x_depth, 8, 8, name='real/depths')
        hem.montage(g_rgb, 8, 8, name='fake/images')
        hem.montage(g_depth, 8, 8, name='fake/depths')

    # 3. add additional summaries for weights and biases in e_c1 (the initial noise layer)
    # TODO don't iterate through list, but grab directly by full name
    with tf.variable_scope('noise'):
        for l in tf.get_collection('weights'):
            if 'e_c1' in hem.tensor_name(l):
                print(l, hem.tensor_name(l))
                x_rgb, x_noise = _split(l)
                hem.histograms(('rgb/weights', x_rgb),
                               ('noise/weights', x_noise))
                hem.scalars(
                    ('rgb/weights/sparsity', tf.nn.zero_fraction(x_rgb)),
                    ('noise/weights/sparsity', tf.nn.zero_fraction(x_noise)),
                    ('noise/weights/mean', tf.reduce_mean(x_noise)),
                    ('rgb/weights/mean', tf.reduce_mean(x_rgb)))
                break
コード例 #7
0
ファイル: artist.py プロジェクト: huangpu1/3dgan
 def gradient_summaries(grads, name='gradients'):
     with tf.variable_scope(name):
         for g, v in grads:
             n = hem.tensor_name(v)
             tf.summary.histogram(n, g)
             tf.summary.scalar(n, tf.reduce_mean(g))
コード例 #8
0
ファイル: pix2pix.py プロジェクト: huangpu1/3dgan
 def loss_summaries():
     for l in tf.get_collection('losses'):
         tf.summary.scalar(hem.tensor_name(l), l)
         tf.summary.histogram(hem.tensor_name(l), l)
コード例 #9
0
ファイル: summaries.py プロジェクト: huangpu1/3dgan
def summarize_collection(name, scope):
    """Add a scalar summary for every tensor in a collection."""
    collection = tf.get_collection(name, scope)
    for x in collection:
        tf.summary.scalar(hem.tensor_name(x), x)
    return collection
コード例 #10
0
ファイル: summaries.py プロジェクト: huangpu1/3dgan
def summarize_losses():
    with tf.variable_scope('loss'):
        for l in tf.get_collection('losses'):
            tf.summary.scalar(hem.tensor_name(l), l)
            tf.summary.histogram(hem.tensor_name(l), l)