def write_weights(self, mode: str, models: Iterable[Model], step: int, visualize: bool) -> None: # Similar to TF implementation, but multiple models with self.tf_summary_writers[mode].as_default( ), summary_ops_v2.always_record_summaries(): for model in models: for layer in model.layers: for weight in layer.weights: weight_name = weight.name.replace(':', '_') weight_name = "{}_{}".format(model.model_name, weight_name) with tfops.init_scope(): weight = backend.get_value(weight) summary_ops_v2.histogram(weight_name, weight, step=step) if visualize: weight = self._weight_to_image( weight=weight, kernel_channels_last=True) if weight is not None: summary_ops_v2.image( weight_name, weight, step=step, max_images=weight.shape[0])
def testEagerMemory(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1)
def testEagerMemory(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1)
def _log_weights(self, epoch): """Logs the weights of the Model to TensorBoard.""" with context.eager_mode(), \ self.writer.as_default(), \ summary_ops_v2.always_record_summaries(): for layer in self.model.model.layers: for weight in layer.weights: weight_name = weight.name.replace(':', '_') with ops.init_scope(): weight = K.get_value(weight) summary_ops_v2.histogram(weight_name, weight, step=epoch) if self.write_images: self._log_weight_as_image(weight, weight_name, epoch) self.writer.flush()
def _log_weights(self, epoch): with self._train_writer.as_default(): with summary_ops_v2.always_record_summaries(): for layer in self.model.layers: for weight in layer.weights: if hasattr(weight, "name"): weight_name = weight.name.replace(":", "_") summary_ops_v2.histogram(weight_name, weight, step=epoch) if self.write_images: self._log_weight_as_image( weight, weight_name, epoch) self._train_writer.flush()
def testSummaryOps(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir))
def testSummaryOps(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir))
def testSummaryOps(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, step=1) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.histogram('histogram', [1.0], step=1) summary_ops.image('image', [[[[1.0]]]], step=1) summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir))
def testSummaryOps(self): logdir = self.get_temp_dir() writer = summary_ops.create_file_writer(logdir, max_queue=0) with writer.as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, step=1) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.histogram('histogram', [1.0], step=1) summary_ops.image('image', [[[[1.0]]]], step=1) summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1) with self.cached_session() as sess: sess.run(summary_ops.summary_writer_initializer_op()) sess.run(summary_ops.all_summary_ops()) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir))
def _log_activations(self, epoch): """Logs the outputs of the Model to TensorBoard.""" writer = self._get_writer(self._train_run_name) with context.eager_mode(), writer.as_default(), \ summary_ops_v2.always_record_summaries(): activations = self.activation_model.predict_generator( self.eval_seq) if not isinstance(activations, list): activations = [activations] for i, values in enumerate(activations): name = self.activations[i] summary_ops_v2.histogram(name, values, step=epoch) summary_ops_v2.scalar(name + "_mean", np.mean(values), step=epoch) writer.flush()
def _log_weights(self, epoch): # Similar to TF implementation, but multiple models writer = self.summary_writers['train'] with writer.as_default(), summary_ops_v2.always_record_summaries(): for model_name, model in self.network.model.items(): for layer in model.layers: for weight in layer.weights: weight_name = weight.name.replace(':', '_') weight_name = "{}_{}".format(model_name, weight_name) with tfops.init_scope(): weight = backend.get_value(weight) summary_ops_v2.histogram(weight_name, weight, step=epoch) if True in self.write_images: self._log_weight_as_image(weight, weight_name, epoch) writer.flush()
def define_ops(): result = [] # TF 2.0 summary ops result.append(summary_ops.write('write', 1, step=0)) result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb')) # TF 1.x tf.contrib.summary ops result.append(summary_ops.generic('tensor', 1, step=1)) result.append(summary_ops.scalar('scalar', 2.0, step=1)) result.append(summary_ops.histogram('histogram', [1.0], step=1)) result.append(summary_ops.image('image', [[[[1.0]]]], step=1)) result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)) return result
def define_ops(): result = [] # TF 2.0 summary ops result.append(summary_ops.write('write', 1, step=0)) result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb')) # TF 1.x tf.contrib.summary ops result.append(summary_ops.generic('tensor', 1, step=1)) result.append(summary_ops.scalar('scalar', 2.0, step=1)) result.append(summary_ops.histogram('histogram', [1.0], step=1)) result.append(summary_ops.image('image', [[[[1.0]]]], step=1)) result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)) return result
def train_step(self, positions, pi, z, learning_rate=0): # Record the operations used to compute the loss, so that the gradient # of the loss with respect to the variables can be computed. # metrics = 0 with tf.GradientTape() as tape: policy_head, value_head = self.model(positions, training=True) loss = self.compute_loss(pi, z, policy_head, value_head) # self.ComputeMetrics(y, logits) metrics = self.compute_metrics(pi, policy_head) grads = tape.gradient(loss, self.model.trainable_variables) # grads = self.average_gradients(tower_grads) # grads = self.optimizer.compute_gradients(self.loss) # defensive step 2 to clip norm # grads0_lst = tf.map_fn(lambda x: x[0], grads) # [g for g, _ in grads] clipped_grads, self.norm = tf.clip_by_global_norm( grads, self.global_norm) # defensive step 3 check NaN # See: https://stackoverflow.com/questions/40701712/how-to-check-nan-in-gradients-in-tensorflow-when-updating grad_check = [ tf.debugging.check_numerics(g, message='NaN Found!') for g in clipped_grads ] with tf.control_dependencies(grad_check): self.optimizer.apply_gradients( zip(clipped_grads, self.model.trainable_variables), # [v for _, v in grads] global_step=self.global_step, name='train_step') if self.is_logging: for grad, var in zip(grads, self.model.trainable_variables): if grad is not None: summary_ops_v2.histogram(var.name + '/gradients', grad) for var in self.model.trainable_variables: summary_ops_v2.histogram(var.name, var) return metrics, loss, self.global_step
def call(self, x): summary_ops_v2.histogram('custom_histogram_summary', x) return x
def _log_activations(self, step): writer = self._get_writer(self._train_run_name) with context.eager_mode(), writer.as_default(), \ summary_ops_v2.always_record_summaries(): # ================================================================== activation_values = self.activation_model.predict_generator( self.eval_seq) terminal = (self.eval_seq.isterminal == 1) midway = np.logical_not(terminal) # ================================================================== kappa = activation_values[0] summary_ops_v2.histogram("kappa_midway", kappa[midway], step=step) summary_ops_v2.scalar("kappa_midway_mean", np.mean(kappa[midway]), step=step) # ------------------------------------------------------------------ summary_ops_v2.histogram("kappa_terminal", kappa[terminal], step=step) summary_ops_v2.scalar("kappa_terminal_mean", np.mean(kappa[terminal]), step=step) # ================================================================== mu = activation_values[1] neg_dot_prod_midway = -np.sum( mu[midway] * self.eval_seq.outgoing[midway], axis=1) summary_ops_v2.histogram("neg_dot_prod_midway", neg_dot_prod_midway, step=step) summary_ops_v2.scalar("neg_dot_prod_midway_mean", np.mean(neg_dot_prod_midway), step=step) # ------------------------------------------------------------------ neg_dot_prod_terminal = -np.sum( mu[terminal] * self.eval_seq.outgoing[terminal], axis=1) summary_ops_v2.histogram("neg_dot_prod_terminal", neg_dot_prod_terminal, step=step) summary_ops_v2.scalar("neg_dot_prod_terminal_mean", np.mean(neg_dot_prod_terminal), step=step) # ================================================================== isterminal = activation_values[2] ave_prec = average_precision_score(self.eval_seq.isterminal, isterminal) summary_ops_v2.scalar("average_precision", ave_prec, step=step) # ------------------------------------------------------------------ precision, recall, thresh = precision_recall_curve( y_true=self.eval_seq.isterminal, probas_pred=np.round(isterminal / 0.05) * 0.05) # ------------------------------------------------------------------ fig, ax = plt.subplots() ax.plot(recall, precision, "-o") frac = np.mean(self.eval_seq.isterminal) ax.plot([0, 1], [frac, frac]) ax.set_xlabel("Recall") ax.set_ylabel("Precision") fig.canvas.draw() # ------------------------------------------------------------------ plot = np.array(fig.canvas.renderer.buffer_rgba()) plot = np.expand_dims(plot, 0) summary_ops_v2.image("Precision-Recall", plot, step=step) # ================================================================== plt.close() writer.flush()
def eval_one_epoch(model, linear_model, test_features, summary_directory, global_step, config, epoch, training): metr_reconstruction_loss = tf.metrics.Mean("reconstruction_loss") metr_auto_reconstruction_loss = tf.metrics.Mean("auto_reconstruction_loss") metr_exp_reconstruction_loss = tf.metrics.Mean("exp_reconstruction_loss") metr_kl_loss = tf.metrics.Mean("kl_loss") metr_linear_loss = tf.metrics.Mean("linear_loss") metr_pen = tf.metrics.Mean("penalty") reconstruction_losses = [] linear_losses = [] images = [] images2 = [] for _features in test_features: # get predictions and latents linear_predictions = get_linear_predictions(linear_model, _features) anno = get_annotations(_features) latents = get_latents(config, model, _features, training) (reconstruction_loss, mean_kld_z, reconstruction, penalty) = _loss_fn_ae(model, linear_predictions, _features["inputs"], anno, config, training) # mean encoding out_det = model.autoencode(_features["inputs"], training=training)["output"] autoencode_rec_loss = tf.reduce_sum(tf.square( out_det - _features["inputs"]), axis=[1, 2, 3]) squared_error = _loss_fn_lm(latents, linear_model, anno) reconstruction_losses.append(reconstruction_loss.numpy()) linear_losses.append(squared_error.numpy()) # update mean-metric metr_auto_reconstruction_loss(autoencode_rec_loss) metr_reconstruction_loss(reconstruction_loss) metr_linear_loss(squared_error) metr_kl_loss(mean_kld_z) metr_pen(penalty) se_x_xhatexp = get_sq_er_x_xhatexp(config, model, linear_model, _features, training) exp_reconstruction = get_xhatexp(config, model, linear_model, _features, training) metr_exp_reconstruction_loss(se_x_xhatexp) # append input images, and only keep 4 - otherwise we'd get OOM images.append(tf.concat([_features["inputs"][0:1, :, :, :], reconstruction[0:1, :, :, :], exp_reconstruction[0:1, :, :, :]], axis=2)) # append input images, and only keep 4 - otherwise we'd get OOM images2.append(tf.concat([_features["inputs"][0:1, :, :, :], reconstruction[0:1, :, :, :]], axis=2)) shuffle(images) images = images[-4:] shuffle(images2) images2 = images2[-4:] writer = tf.summary.create_file_writer(summary_directory) with writer.as_default(), tf.summary.record_if(True): scalar("reconstruction_loss", metr_reconstruction_loss.result(), step=global_step) scalar("exp_reconstruction_loss", metr_exp_reconstruction_loss.result(), step=global_step) scalar("auto_reconstruction_loss", metr_auto_reconstruction_loss.result(), step=global_step) scalar("linear_loss", metr_linear_loss.result(), step=global_step) scalar("kl_loss", metr_kl_loss.result(), step=global_step) scalar("penalty", metr_pen.result(), step=global_step) histogram("distribution_reconstruction_loss", np.concatenate(reconstruction_losses, axis=0).flatten(), step=global_step) histogram("distribution_linear_loss", np.concatenate(linear_losses, axis=0).flatten(), step=global_step) image("images", tf.concat(images, axis=0), step=global_step) image("images2", tf.concat(images2, axis=0), step=global_step) out_dict = {"reconstruction_loss": metr_reconstruction_loss.result(), "exp_reconstruction_loss": metr_exp_reconstruction_loss.result(), "auto_reconstruction_loss": metr_auto_reconstruction_loss.result(), "kl_loss": metr_kl_loss.result(), "linear_loss": metr_linear_loss.result(), "penalty": metr_pen.result() } return out_dict