예제 #1
0
    def _ooze_task(self, task: CeleryTask, job_scope: JobScope,
                   job_context: JobContext, score: int):
        """Non-blocking task oozing function."""
        task.delay(job_scope, job_context)
        self.oozed_count += 1
        if self.oozed_count % OOZING_COUNTER_STEP == 0:
            self.counter += OOZING_COUNTER_STEP

        Measure.histogram(
            f'{__name__}.job_scores',
            tags={
                'sweep_id': self.sweep_id,
                'ad_account_id': job_scope.ad_account_id,
                'report_type': job_scope.report_type,
                'report_variant': job_scope.report_variant,
                'job_type': job_scope.job_type,
            },
        )(score)
예제 #2
0
def _send_measurement_task_runtime(job_scope: JobScope, bucket: int):
    _measurement_base_name = f'{__name__}.report_tasks_outcome'
    _measurement_tags = {
        'ad_account_id': job_scope.ad_account_id,
        'sweep_id': job_scope.sweep_id,
        'report_type': job_scope.report_type,
        'report_variant': job_scope.report_variant,
        'bucket': bucket,
        'job_type': job_scope.job_type,
    }
    if job_scope.datapoint_count and job_scope.datapoint_count > 0:
        Measure.counter(f'{_measurement_base_name}.data_points',
                        tags=_measurement_tags).increment(
                            job_scope.datapoint_count)
        Measure.histogram(f'{_measurement_base_name}.data_points',
                          tags=_measurement_tags)(job_scope.datapoint_count)

    Measure.gauge(f'{_measurement_base_name}.running_time',
                  tags=_measurement_tags)(job_scope.running_time)
예제 #3
0
def iter_expectations(reality_claims_iter: Iterable[RealityClaim]) -> Generator[ExpectationClaim, None, None]:
    """
    Converts an instance of RealityClaim object (claiming that certain
    entities exist and providing some metadata about their existence)
    into one or more ExpectationClaim objects that express our expectations
    about what report types (for what dates) we expect to see.
    """
    histogram_counter = defaultdict(int)
    for claim in reality_claims_iter:
        jobs_generators = entity_expectation_generator_map.get(claim.entity_type, [])
        for jobs_generator in jobs_generators:
            for expectation_claim in jobs_generator(claim):
                yield expectation_claim
                job_type = detect_job_type(expectation_claim.report_type, expectation_claim.entity_type)
                histogram_counter[(claim.ad_account_id, claim.entity_type, job_type)] += 1

    for ((ad_account_id, entity_type, job_type), count) in histogram_counter.items():
        Measure.histogram(
            f'{__name__}.{iter_expectations.__name__}.expectations_per_reality_claim',
            tags={'ad_account_id': ad_account_id, 'entity_type': entity_type, 'job_type': job_type},
        )(count)
예제 #4
0
def iter_scorable(
    claims: Iterable[ExpectationClaim]
) -> Generator[ScorableClaim, None, None]:
    """Select signature for each expectation claim based on job history."""
    histogram_counter = defaultdict(int)
    for claim in claims:
        for scorable_claim in generate_scorable(claim):
            job_type = detect_job_type(claim.report_type, claim.entity_type)
            histogram_counter[(claim.ad_account_id, claim.entity_type,
                               job_type)] += 1
            yield scorable_claim

    for ((ad_account_id, entity_type, job_type),
         count) in histogram_counter.items():
        Measure.histogram(
            f'{__name__}.{iter_scorable.__name__}.scorable_claims_per_expectation_claim',
            tags={
                'ad_account_id': ad_account_id,
                'entity_type': entity_type,
                'job_type': job_type
            },
        )(count)
    def _report_metrics(self, interval: int):
        """Regularly report pulse metrics for previous minute to Datadog."""
        name_map = {
            FailureBucket.Success: 'success',
            FailureBucket.Other: 'other',
            FailureBucket.Throttling: 'throttling',
            FailureBucket.UserThrottling: 'user_throttling',
            FailureBucket.ApplicationThrottling: 'application_throttling',
            FailureBucket.AdAccountThrottling: 'adaccount_throttling',
            FailureBucket.TooLarge: 'too_large',
            FailureBucket.WorkingOnIt: 'working_on_it',
            FailureBucket.InaccessibleObject: 'inaccessible_object',
        }

        while True:
            gevent.sleep(interval)
            prev_minute = self.now_in_minutes() - 1
            pulse_values = {
                int(k): int(v)
                for k, v in self.redis.hgetall(self._gen_key(
                    prev_minute)).items()
            }

            total = 0
            for bucket, name in name_map.items():
                value = pulse_values.get(bucket, 0)
                # Exclude non-terminal state from total
                if bucket != FailureBucket.WorkingOnIt:
                    total += value
                Measure.histogram(f'{__name__}.pulse_stats',
                                  tags={
                                      'sweep_id': self.sweep_id,
                                      'bucket': name
                                  })(value)

            if total:
                Measure.histogram(f'{__name__}.pulse_stats',
                                  tags={
                                      'sweep_id': self.sweep_id,
                                      'bucket': 'total'
                                  })(total)

            in_progress = self._get_in_progress_count()
            Measure.histogram(f'{__name__}.pulse_stats',
                              tags={
                                  'sweep_id': self.sweep_id,
                                  'bucket': 'in_progress'
                              })(in_progress)
예제 #6
0
def iter_persist_prioritized(
    sweep_id: str, prioritized_iter: Iterable[PrioritizationClaim]
) -> Generator[PrioritizationClaim, None, None]:
    """Persist prioritized jobs and pass-through context objects for inspection."""

    AccountCache.reset()

    with SortedJobsQueue(sweep_id).JobsWriter() as add_to_queue:

        _measurement_name_base = f'{__name__}.{iter_persist_prioritized.__name__}'

        _before_next_prioritized = time.time()
        for prioritization_claim in prioritized_iter:
            job_type = detect_job_type(prioritization_claim.report_type,
                                       prioritization_claim.entity_type)
            _measurement_tags = {
                'entity_type': prioritization_claim.entity_type,
                'report_type': prioritization_claim.report_type,
                'ad_account_id': prioritization_claim.ad_account_id,
                'job_type': job_type,
                'sweep_id': sweep_id,
            }

            Measure.timing(f'{_measurement_name_base}.next_prioritized',
                           tags=_measurement_tags,
                           sample_rate=0.01)(
                               (time.time() - _before_next_prioritized) * 1000)

            score = prioritization_claim.score
            if not should_persist(score):
                logger.debug(
                    f'Not persisting job {prioritization_claim.job_id} due to low score: {score}'
                )
                continue

            # Following are JobScope attributes we don't store on JobID
            # so we need to store them separately.
            # See JobScope object for exact attr names.
            # At this point persister forms the
            # auxiliary data blob for saving on Data Flower.
            # We don't have to do that here.
            # It can be pre-computed and placed on the JobSignature
            # TODO: contemplate moving auxiliary data formation to
            #       place where JobSignatures are generated and use that
            #       data for Data Flower (as it was originally intended
            #       but not implemented because saving each job's data
            #       individually to Data Flower was too slow)
            # So, here you would unpack
            # **job_kwargs
            # that you get from prioritization_claim.score_job_pairs
            # ... Until then:
            extra_data = {}
            if prioritization_claim.timezone:
                extra_data[
                    'ad_account_timezone_name'] = prioritization_claim.timezone

            with Measure.timer(f'{_measurement_name_base}.add_to_queue',
                               tags=_measurement_tags):
                if prioritization_claim.report_age_in_days is not None:
                    Measure.histogram(
                        f'{_measurement_name_base}.report_age',
                        tags=_measurement_tags)(
                            prioritization_claim.report_age_in_days)
                add_to_queue(prioritization_claim.job_id, score, **extra_data)

            # This time includes the time consumer of this generator wastes
            # between reads from us. Good way to measure how quickly we are
            # consumed (what pauses we have between each consumption)
            with Measure.timer(f'{_measurement_name_base}.yield_result',
                               tags=_measurement_tags):
                yield prioritization_claim

            _before_next_prioritized = time.time()