def on_epoch_end(self, epoch, logs=None): logs = logs or {} if self.validation_data and self.histogram_freq: if epoch % self.histogram_freq == 0: # TODO(fchollet): implement batched calls to sess.run # (current call will likely go OOM on GPU) if self.model.uses_learning_phase: cut_v_data = len(self.model.inputs) val_data = self.validation_data[:cut_v_data] + [0] tensors = self.model.inputs + [K.learning_phase()] else: val_data = self.validation_data tensors = self.model.inputs feed_dict = dict(zip(tensors, val_data)) result = self.sess.run([self.merged], feed_dict=feed_dict) summary_str = result[0] self.writer.add_summary(summary_str, epoch) if self.embeddings_freq and self.embeddings_logs: if epoch % self.embeddings_freq == 0: for log in self.embeddings_logs: self.saver.save(self.sess, log, epoch) for name, value in logs.items(): if name in ['batch', 'size']: continue summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, epoch) self.writer.flush()
def _write_custom_summaries(self, step, logs=None): """Writes metrics out as custom scalar summaries. Arguments: step: the global step to use for TensorBoard. logs: dict. Keys are scalar summary names, values are NumPy scalars. """ logs = logs or {} if context.executing_eagerly(): # use v2 summary ops with self.writer.as_default( ), summary_ops_v2.always_record_summaries(): for name, value in logs.items(): if isinstance(value, np.ndarray): value = value.item() summary_ops_v2.scalar(name, value, step=step) else: # use FileWriter from v1 summary for name, value in logs.items(): if isinstance(value, np.ndarray): value = value.item() summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name self.writer.add_summary(summary, step) self.writer.flush()
def _save_custom_img(self, name, value): summary = tf_summary.Summary() image = tf.Summary.Image() image.height = value['height'] image.width = value['width'] image.colorspace = 3 # code for 'RGB' image.encoded_image_string = value['enc_string'] summary.value.add(tag=name, image=image) self.writer.add_summary(summary, self._current_step())
def on_epoch_end(self, epoch, logs=None): logs = logs or {} if not self.validation_data and self.histogram_freq: raise ValueError('If printing histograms, validation_data must be ' 'provided, and cannot be a generator.') if self.validation_data and self.histogram_freq: if epoch % self.histogram_freq == 0: val_data = self.validation_data tensors = ( self.model.inputs + self.model.targets + self.model.sample_weights) if self.model.uses_learning_phase: tensors += [K.learning_phase()] assert len(val_data) == len(tensors) val_size = val_data[0].shape[0] i = 0 while i < val_size: step = min(self.batch_size, val_size - i) batch_val = [] batch_val.append(val_data[0][i:i + step] if val_data[0] is not None else None) batch_val.append(val_data[1][i:i + step] if val_data[1] is not None else None) batch_val.append(val_data[2][i:i + step] if val_data[2] is not None else None) if self.model.uses_learning_phase: # do not slice the learning phase batch_val = [x[i:i + step] if x is not None else None for x in val_data[:-1]] batch_val.append(val_data[-1]) else: batch_val = [x[i:i + step] if x is not None else None for x in val_data] feed_dict = {} for key, val in zip(tensors, batch_val): if val is not None: feed_dict[key] = val result = self.sess.run([self.merged], feed_dict=feed_dict) summary_str = result[0] self.writer.add_summary(summary_str, epoch) i += self.batch_size for name, value in logs.items(): if name in ['batch', 'size']: continue summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, epoch) self.writer.flush()
def _write_custom_summaries(self, step, logs=None): """Writes metrics out as custom scalar summaries. Arguments: step: the global step to use for Tensorboard. logs: dict. Keys are scalar summary names, values are NumPy scalars. """ logs = logs or {} for name, value in logs.items(): summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, step) self.writer.flush()
def on_epoch_end(self, epoch, logs=None): logs = logs or {} if self.validation_data and self.histogram_freq: if epoch % self.histogram_freq == 0: val_data = self.validation_data tensors = ( self.model.inputs + self.model.targets + self.model.sample_weights) if self.model.uses_learning_phase: tensors += [K.learning_phase()] assert len(val_data) == len(tensors) val_size = val_data[0].shape[0] i = 0 while i < val_size: step = min(self.batch_size, val_size - i) batch_val = [] batch_val.append(val_data[0][i:i + step]) batch_val.append(val_data[1][i:i + step]) batch_val.append(val_data[2][i:i + step]) if self.model.uses_learning_phase: batch_val.append(val_data[3]) feed_dict = dict(zip(tensors, batch_val)) result = self.sess.run([self.merged], feed_dict=feed_dict) summary_str = result[0] self.writer.add_summary(summary_str, epoch) i += self.batch_size if self.embeddings_freq and self.embeddings_ckpt_path: if epoch % self.embeddings_freq == 0: self.saver.save(self.sess, self.embeddings_ckpt_path, epoch) for name, value in logs.items(): if name in ['batch', 'size']: continue summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, epoch) self.writer.flush()
def on_epoch_end(self, epoch, logs=None): """Checks if summary ops should run next epoch, logs scalar summaries.""" logs = logs or {} if self.histogram_freq and self.histogram_freq > 1: if self.merged in self.model.test_function.fetches: self.model.test_function.fetches.remove(self.merged) if self.merged in self.model.test_function.fetch_callbacks: self.model.test_function.fetch_callbacks.pop(self.merged) for name, value in logs.items(): if name in ['batch', 'size']: continue summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, epoch) self.writer.flush()
def _save_scalar(self, name, value): summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value if isinstance(value, float) else value.item() summary_value.tag = name self.writer.add_summary(summary, self._current_step())
def on_epoch_end(self, epoch, logs=None): """Checks if summary ops should run next epoch, logs scalar summaries.""" logs = logs or {} # pop the histogram summary op after each epoch if self.histogram_freq: if self.merged in self.model.test_function.fetches: self.model.test_function.fetches.remove(self.merged) if self.merged in self.model.test_function.fetch_callbacks: self.model.test_function.fetch_callbacks.pop(self.merged) if self.embeddings_data is None and self.embeddings_freq: raise ValueError('To visualize embeddings, embeddings_data must ' 'be provided.') if self.embeddings_freq and self.embeddings_data is not None: if epoch % self.embeddings_freq == 0: # We need a second forward-pass here because we're passing # the `embeddings_data` explicitly. This design allows to pass # arbitrary data as `embeddings_data` and results from the fact # that we need to know the size of the `tf.Variable`s which # hold the embeddings in `set_model`. At this point, however, # the `validation_data` is not yet set. embeddings_data = self.embeddings_data n_samples = embeddings_data[0].shape[0] i = 0 while i < n_samples: step = min(self.batch_size, n_samples - i) batch = slice(i, i + step) if isinstance(self.model.input, list): feed_dict = { model_input: embeddings_data[idx][batch] for idx, model_input in enumerate(self.model.input) } else: feed_dict = {self.model.input: embeddings_data[0][batch]} feed_dict.update({self.batch_id: i, self.step: step}) if self.model.uses_learning_phase: feed_dict[K.learning_phase()] = False self.sess.run(self.assign_embeddings, feed_dict=feed_dict) self.saver.save(self.sess, os.path.join(self.log_dir, 'keras_embedding.ckpt'), epoch) i += self.batch_size for name, value in logs.items(): if name in ['batch', 'size']: continue summary = tf_summary.Summary() summary_value = summary.value.add() summary_value.simple_value = value.item() summary_value.tag = name self.writer.add_summary(summary, epoch) self.writer.flush()