def test_replace_invalid_values(self, row, expected_row):
     clean_row = util.replace_invalid_values(row)
     self.assertSequenceEqual(clean_row, expected_row)
Exemplo n.º 2
0
  def add_status_and_metrics_to_bigquery(
      self, job_status, aggregated_metrics_dict,
      metric_name_to_visual_bounds=None):
    """Adds job_status and metrics to their respective BigQuery tables.

    Args:
      job_status (dict): Contains information about the Kubernetes Job.
      aggregated_metrics_dict (dict): Key is metric name and value is a
        MetricPoint containing the aggregated value for that metric.
      metric_name_to_visual_bounds (dict, optional): Key is metric name and
        value is a tuple consisting of (lower_bound, upper bound) for that
        metric. These bounds are useful for showing helpful lines on charts
        of metrics history. If not provided, the rows saved to BigQuery will
        have `null` for upper_bound and lower_bound.
    """
    if not self.metric_collection_config.get('write_to_bigquery', True):
      self.logger.info('Skipping writing metrics and job_status to BigQuery.')
      return

    if not metric_name_to_visual_bounds:
      metric_name_to_visual_bounds = {}

    # Compute the join key to link together job_history and metric_history.
    unique_key = str(uuid.uuid4())

    # Every job should have 1 job status row and it should exist even if
    # no other metrics exist.
    job_history_row = [
        unique_key,
        self.test_name,
        self.test_type,
        self.accelerator,
        self.framework_version,
        job_status['final_status'],
        job_status['num_failures'],
        int(job_status['stop_time'] - job_status['start_time']),
        self._wall_time_to_sql_timestamp(job_status['stop_time']),
        self.debug_info.stackdriver_logs_link,
        job_status['publish_time'],
        self.debug_info.download_command,
        self.debug_info.workload_link,
    ]

    # Create rows to represent the computed metrics for this job.
    metric_history_rows = []
    for metric_name, metric_point in aggregated_metrics_dict.items():
      lower_bound, upper_bound = metric_name_to_visual_bounds.get(
          metric_name, (None, None))
      metric_history_rows.append([
        unique_key,
        self.test_name,
        self._wall_time_to_sql_timestamp(metric_point.wall_time),
        metric_name,
        float(metric_point.metric_value),
        lower_bound,
        upper_bound,
      ])

    # Insert rows in Bigquery.
    for table_id, rows in [
        (self.job_history_table_id, [job_history_row]),
        (self.metric_history_table_id, metric_history_rows),
    ]:
      if not rows:
        continue
      self.logger.info(
          'Inserting {} rows into BigQuery table `{}`'.format(
              len(rows), table_id))
      table = self.bigquery_client.get_table(table_id)
      clean_rows = [util.replace_invalid_values(row) for row in rows]
      errors = self.bigquery_client.insert_rows(table, clean_rows)
      if errors == []:
        self.logger.info('Successfully added rows to Bigquery.')
      else:
        # TODO: Maybe add retry logic. insert_rows seems to be atomic for all
        #       elements in the list, so it should be safe to retry.
        self.logger.error(
            'Failed to add rows to Bigquery. Errors: {}'.format(errors),
            debug_info = self.debug_info)