예제 #1
0
파일: summaries.py 프로젝트: huangpu1/3dgan
def summarize_activations():
    with tf.variable_scope('activations'):
        for l in tf.get_collection('conv_layers'):
            tf.summary.histogram(tensor_name(l), l)
            tf.summary.scalar(tensor_name(l) + '/sparsity', tf.nn.zero_fraction(l))
            montage_summary(tf.transpose(l[0], [2, 0, 1]), name=tensor_name(l) + '/montage')
        for l in tf.get_collection('dense_layers'):
            tf.summary.histogram(tensor_name(l), l)
            tf.summary.scalar(tensor_name(l) + '/sparsity', tf.nn.zero_fraction(l))
예제 #2
0
파일: summaries.py 프로젝트: huangpu1/3dgan
def summarize_weights_biases():
    with tf.variable_scope('weights'):
        for l in tf.get_collection('weights'):
            tf.summary.histogram(tensor_name(l), l)
            tf.summary.scalar(tensor_name(l) + '/sparsity', tf.nn.zero_fraction(l))
            # montage_summary(l, name=tensor_name(l) + '/montage')
    with tf.variable_scope('biases'):
        for l in tf.get_collection('biases'):
            tf.summary.histogram(tensor_name(l), l)
            tf.summary.scalar(tensor_name(l) + '/sparsity', tf.nn.zero_fraction(l))    
 def __init__(self, session, config, model, variational, data):
     cfg = config
     self.config = config
     self.session = session
     self.data = data
     self.variational = variational
     self.model = model
     self.build_elbo(n_samples=cfg['q/n_samples_stats'])
     self.q_params = fw.get_variables('variational')
     self.global_step = fw.get_or_create_global_step()
     self.build_elbo(n_samples=cfg['q/n_samples'], training=True)
     self.build_elbo_loss()
     with tf.name_scope('q_neg_elbo_grad'):
         self.q_neg_elbo_grad = tf.gradients(self.differentiable_elbo_loss,
                                             self.q_params)
     self.q_neg_elbo_grad_norm = [
         util.norm(g) for g in self.q_neg_elbo_grad
     ]
     for param, norm in zip(self.q_params, self.q_neg_elbo_grad_norm):
         tf.summary.scalar(
             'o/neg_elbo_grad_norm_' + util.tensor_name(param), norm)
     self.p_params = fw.get_variables('model')
     self.build_optimizer()
     self.t0 = time.time()
     self.t = np.inf
     self.build_proximity_statistic_summaries()
     for param in self.q_params + self.p_params:
         self.build_summaries(param)
    def build_q_gradients(self):
        cfg = self.config
        q_gradients = self.q_neg_elbo_grad
        constraint_grad = [0.] * len(self.q_params)
        magnitude = self.magnitude
        if cfg['c/decay'] == 'linear':
            magnitude = tf.maximum(magnitude, 0.)
        for name, distance in self.distance.items():
            distance_grad = tf.gradients(distance, self.q_params)
            for i in range(len(self.q_params)):
                if distance_grad[i] is not None:
                    param_name = util.tensor_name(self.q_params[i])
                    update = magnitude * distance_grad[i]
                    constraint_grad[i] += update
                    q_gradients[i] += update
                    update_norm = util.norm(update)
                    fraction = tf.reduce_mean(
                        update_norm /
                        util.norm(self.q_neg_elbo_grad[i] + update))

                    fraction = tf.Print(fraction, [fraction], 'fraction: ')
                    tf.summary.scalar(
                        '_'.join(['c/fraction_grad_d', name, param_name]),
                        fraction)
                    tf.summary.scalar(
                        '_'.join(['c/norm_grad_constraint', name, param_name]),
                        update_norm)
                    tf.summary.scalar(
                        '_'.join([
                            'c/ratio_grad_constraint_grad_neg_elbo', name,
                            param_name
                        ]), update_norm / self.q_neg_elbo_grad_norm[i])
        self.q_gradients = q_gradients
        self.q_constraint_grad = constraint_grad
 def build_statistic(self):
     statistic = {}
     q_variables = fw.get_variables('variational')
     q_weights = [var for var in q_variables if 'weights' in var.name]
     for weight_matrix in q_weights:
         identity = tf.matmul(weight_matrix,
                              weight_matrix,
                              transpose_b=True)
         statistic[util.tensor_name(identity) + '_wwT'] = identity
     self._statistic = statistic
예제 #6
0
파일: summaries.py 프로젝트: huangpu1/3dgan
def summarize_gradients(grads_and_vars, name=None):
    """Adds histogram summaries for input list.

    Args:
      grads_and_vars: List, a list of tuples in form (grad, var).e
      name: String, name to use for var scope.
    """
    with tf.name_scope(name, 'gradients', grads_and_vars):
        for g, v in grads_and_vars:
            tf.summary.histogram(tensor_name(v) + '/gradient', g)
예제 #7
0
파일: summaries.py 프로젝트: huangpu1/3dgan
def summarize_losses():
    with tf.variable_scope('loss'):
        for l in tf.get_collection('losses'):
            tf.summary.scalar(tensor_name(l), l)
            tf.summary.histogram(tensor_name(l), l)
 def build_summaries(self, param):
     base_name = util.tensor_name(param)
     tf.summary.scalar(base_name + '/mean', tf.reduce_mean(param))
     tf.summary.scalar(base_name + '/max', tf.reduce_max(param))
     tf.summary.scalar(base_name + '/min', tf.reduce_min(param))
     tf.summary.histogram(base_name, param)