예제 #1
0
 def _SummarizeTensor(self, t_name):
     min_var = self._GetQStateVar(t_name, 'min')
     max_var = self._GetQStateVar(t_name, 'max')
     # foo/q/somet_min:0 -> foo/q/somet_min
     summary_name_min = min_var.name.split(':')[0]
     summary_name_max = max_var.name.split(':')[0]
     summary_utils.scalar(summary_name_min, min_var)
     summary_utils.scalar(summary_name_max, max_var)
예제 #2
0
 def AddSummary(self, lr, optimizer, var_grad):
     p = self.params
     summary_utils.scalar('adagrad_lr', lr)
     for v, _ in var_grad.Flatten():
         slot = optimizer.get_slot(v, 'accumulator')
         assert slot is not None
         summary_utils.scalar('optimizer/adagrad_accum_%s' % v.name,
                              tf.reduce_mean(slot))
예제 #3
0
 def PostTrainingStepUpdate(self, global_step):
     summary_utils.scalar('cap', self._Value(global_step))
     return tf.no_op()
예제 #4
0
 def AddSummary(self, lr, optimizer, var_grad):
     summary_utils.scalar('adam_lr', lr)