def _build_session_groups(self): """Returns a list of SessionGroups protobuffers from the summary data.""" # Algorithm: We keep a dict 'groups_by_name' mapping a SessionGroup name # (str) to a SessionGroup protobuffer. We traverse the runs associated with # the plugin--each representing a single session. We form a Session # protobuffer from each run and add it to the relevant SessionGroup object # in the 'groups_by_name' dict. We create the SessionGroup object, if this # is the first session of that group we encounter. groups_by_name = {} run_to_tag_to_content = self._context.hparams_metadata( self._experiment_id, run_tag_filter=provider.RunTagFilter(tags=[ metadata.SESSION_START_INFO_TAG, metadata.SESSION_END_INFO_TAG, ]), ) # The TensorBoard runs with session start info are the # "sessions", which are not necessarily the runs that actually # contain metrics (may be in subdirectories). session_names = [ run for (run, tags) in run_to_tag_to_content.items() if metadata.SESSION_START_INFO_TAG in tags ] metric_runs = set() metric_tags = set() for session_name in session_names: for metric in self._experiment.metric_infos: metric_name = metric.name (run, tag) = metrics.run_tag_from_session_and_metric( session_name, metric_name) metric_runs.add(run) metric_tags.add(tag) all_metric_evals = self._context.read_last_scalars( self._experiment_id, run_tag_filter=provider.RunTagFilter(runs=metric_runs, tags=metric_tags), ) for (session_name, tag_to_content) in run_to_tag_to_content.items(): if metadata.SESSION_START_INFO_TAG not in tag_to_content: continue start_info = metadata.parse_session_start_info_plugin_data( tag_to_content[metadata.SESSION_START_INFO_TAG]) end_info = None if metadata.SESSION_END_INFO_TAG in tag_to_content: end_info = metadata.parse_session_end_info_plugin_data( tag_to_content[metadata.SESSION_END_INFO_TAG]) session = self._build_session(session_name, start_info, end_info, all_metric_evals) if session.status in self._request.allowed_statuses: self._add_session(session, start_info, groups_by_name) # Compute the session group's aggregated metrics for each group. groups = groups_by_name.values() for group in groups: # We sort the sessions in a group so that the order is deterministic. group.sessions.sort(key=operator.attrgetter("name")) self._aggregate_metrics(group) return groups
def run(self): """Executes the request. Returns: An array of tuples representing the metric evaluations--each of the form (<wall time in secs>, <training step>, <metric value>). """ run, tag = metrics.run_tag_from_session_and_metric( self._request.session_name, self._request.metric_name) body, _ = self._scalars_plugin_instance.scalars_impl( tag, run, self._experiment, scalars_plugin.OutputFormat.JSON) return body
def _build_session_metric_values(self, session_name, all_metric_evals): """Builds the session metric values.""" # result is a list of api_pb2.MetricValue instances. result = [] metric_infos = self._experiment.metric_infos for metric_info in metric_infos: metric_name = metric_info.name (run, tag) = metrics.run_tag_from_session_and_metric( session_name, metric_name) datum = all_metric_evals.get(run, {}).get(tag) if not datum: # It's ok if we don't find the metric in the session. # We skip it here. For filtering and sorting purposes its value is None. continue result.append( api_pb2.MetricValue( name=metric_name, wall_time_secs=datum.wall_time, training_step=datum.step, value=datum.value, )) return result