Пример #1
0
 def __remember_job_properties(self, job: Job):
     if self.job_type is None:
         self.job_type = job.job_type
     if self.job_group is None:
         self.job_group = job.job_group
     if self.job_tier is None:
         self.job_tier = job.tier
     if self.job_platform_option is None:
         self.job_platform_option = job.get_platform_option()
     self.save()
Пример #2
0
def test_has_job(find_it):
    job = Job(id=123, repository=Repository(), guid='12345')
    job_list = [
        {'id': 111},
        {'id': 222},
    ]

    if find_it:
        job_list.append({'id': 123})
        assert has_job(job, job_list)
    else:
        assert not has_job(job, job_list)
Пример #3
0
    def normalize(cls, jobs):
        retries = defaultdict(int)
        items = []

        for job in jobs:
            task_id = job.taskcluster_metadata.task_id
            retry_id = job.taskcluster_metadata.retry_id

            # If a task is re-run, use the data from the last run.
            if retry_id < retries[task_id]:
                logger.trace(
                    f"Skipping {job} because there is a newer run of it.")
                continue

            retries[task_id] = retry_id

            note = ""
            notes = job.jobnote_set.all()
            if len(notes):
                note = str(notes[0].text)

            item = {
                "id":
                str(task_id),
                "label":
                job.job_type.name,
                "result":
                job.result,
                "state":
                job.state,
                "classification":
                job.failure_classification.name,
                "classification_note":
                note,
                "duration":
                Job.get_duration(job.submit_time, job.start_time,
                                 job.end_time),
            }

            result_map = {
                "success": "passed",
                "testfailed": "failed",
                "busted": "failed",
                "usercancel": "canceled",
                "retry": "exception",
            }
            if item["result"] in result_map:
                item["result"] = result_map[item["result"]]

            items.append(item)
        return items
Пример #4
0
    def normalize(cls, jobs):
        retries = defaultdict(int)
        items = []

        for job in jobs:
            task_id = job.taskcluster_metadata.task_id
            retry_id = job.taskcluster_metadata.retry_id

            # If a task is re-run, use the data from the last run.
            if retry_id < retries[task_id]:
                logger.trace(
                    f"Skipping {job} because there is a newer run of it.")
                continue

            retries[task_id] = retry_id

            note = ""
            notes = job.jobnote_set.all()
            if len(notes):
                note = str(notes[0].text)

            result = {
                "id":
                str(task_id),
                "label":
                job.job_type.name,
                "result":
                job.result,
                "classification":
                job.failure_classification.name,
                "classification_note":
                note,
                "duration":
                Job.get_duration(job.submit_time, job.start_time,
                                 job.end_time),
            }
            items.append(result)
        return items