def generate_values_aggregated(self): """Generator function, consume query_results and produce values.""" first_exc = None same_stage = self.stage == self.source_stage aggregated_stage = self.source_stage.aggregated() for successful, rows_or_exception in self.query_results: if not successful: first_exc = rows_or_exception if first_exc: # A query failed, we still consume the results continue for row in rows_or_exception: if aggregated_stage: (time_start_ms, offset, shard, value, count) = row else: (time_start_ms, offset, value) = row shard = 0 count = 1 # Find the replica id from the shard id. replica = (shard & SHARD_REPLICA_MASK) >> SHARD_REPLICA_SHIFT timestamp_ms = time_start_ms + offset * self.source_stage.precision_ms assert timestamp_ms >= self.time_start_ms assert timestamp_ms < self.time_end_ms if not same_stage: timestamp_ms = bg_utils.round_down( timestamp_ms, self.stage.precision_ms ) if self.current_timestamp_ms != timestamp_ms: # This needs to be optimized because in the common case # there is absolutely nothing to aggregate. ts, _value, _count = self.run_aggregator() if ts is not None: yield (ts, _value) if self.aggregated else (ts, _value, _count) self.current_timestamp_ms = timestamp_ms self.current_values[replica].append(value) self.current_counts[replica].append(count) if not same_stage or aggregated_stage: ts, _value, _count = self.run_aggregator() if ts is not None: yield (ts, _value) if self.aggregated else (ts, _value, _count) if first_exc: raise RetryableError(first_exc)
def generate_values_aggregated(self): """Generator function, consume query_results and produce values.""" first_exc = None same_stage = self.stage == self.source_stage aggregated_stage = self.source_stage.aggregated() for successful, rows_or_exception in self.query_results: if not successful: first_exc = rows_or_exception if first_exc: # A query failed, we still consume the results continue for row in rows_or_exception: if aggregated_stage: (time_start_ms, offset, shard, value, count) = row else: (time_start_ms, offset, value) = row shard = 0 count = 1 # Find the replica id from the shard id. replica = (shard & SHARD_REPLICA_MASK) >> SHARD_REPLICA_SHIFT timestamp_ms = time_start_ms + offset * self.source_stage.precision_ms assert timestamp_ms >= self.time_start_ms assert timestamp_ms < self.time_end_ms if not same_stage: timestamp_ms = bg_utils.round_down(timestamp_ms, self.stage.precision_ms) if self.current_timestamp_ms != timestamp_ms: # This needs to be optimized because in the common case # there is absolutely nothing to aggregate. ts, _value, _count = self.run_aggregator() if ts is not None: yield (ts, _value) if self.aggregated else (ts, _value, _count) self.current_timestamp_ms = timestamp_ms self.current_values[replica].append(value) self.current_counts[replica].append(count) if not same_stage or aggregated_stage: ts, _value, _count = self.run_aggregator() if ts is not None: yield (ts, _value) if self.aggregated else (ts, _value, _count) if first_exc: raise RetryableError(first_exc)
def round_down(self, timestamp): """Round down a timestamp to a multiple of the precision.""" return bg_utils.round_down(timestamp, self.precision)