Esempio n. 1
0
    def test_correct_format(self):
        project = self.create_project()
        build = self.create_build(project)
        job = self.create_job(build)
        jobphase = self.create_jobphase(job)
        jobstep = self.create_jobstep(jobphase)
        handler = ManifestJsonHandler(jobstep)

        fp = StringIO(self.json_file_format % jobstep.id.hex)
        handler.process(fp)
        assert not FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
    def test_correct_format(self):
        project = self.create_project()
        build = self.create_build(project)
        job = self.create_job(build)
        jobphase = self.create_jobphase(job)
        jobstep = self.create_jobstep(jobphase)
        artifact = self.create_artifact(jobstep, 'manifest.json')
        handler = ManifestJsonHandler(jobstep)

        fp = StringIO(self.json_file_format % jobstep.id.hex)
        handler.process(fp, artifact)
        assert not FailureReason.query.filter(
            FailureReason.step_id == jobstep.id).first()
    def test_malformed(self):
        project = self.create_project()
        build = self.create_build(project)
        job = self.create_job(build)
        jobphase = self.create_jobphase(job)
        jobstep = self.create_jobstep(jobphase)
        artifact = self.create_artifact(jobstep, 'manifest.json')
        handler = ManifestJsonHandler(jobstep)

        fp = StringIO('invalid_file')
        handler.process(fp, artifact)
        assert Result.infra_failed
        assert FailureReason.query.filter(
            FailureReason.step_id == jobstep.id,
            FailureReason.reason == 'malformed_manifest_json',
        ).first()
Esempio n. 4
0
    def test_malformed(self):
        project = self.create_project()
        build = self.create_build(project)
        job = self.create_job(build)
        jobphase = self.create_jobphase(job)
        jobstep = self.create_jobstep(jobphase)
        artifact = self.create_artifact(jobstep, 'manifest.json')
        handler = ManifestJsonHandler(jobstep)

        fp = StringIO('invalid_file')
        handler.process(fp, artifact)
        assert Result.infra_failed
        assert FailureReason.query.filter(
            FailureReason.step_id == jobstep.id,
            FailureReason.reason == 'malformed_manifest_json',
        ).first()
Esempio n. 5
0
    def test_malformed(self):
        project = self.create_project()
        build = self.create_build(project)
        job = self.create_job(build)
        jobphase = self.create_jobphase(job)
        jobstep = self.create_jobstep(jobphase)
        handler = ManifestJsonHandler(jobstep)

        # TODO(nate): temporarily disabled
        if False:
            fp = StringIO('invalid_file')
            handler.process(fp)
            assert Result.infra_failed
            assert FailureReason.query.filter(
                FailureReason.step_id == jobstep.id,
                FailureReason.reason == 'malformed_manifest_json',
            ).first()
Esempio n. 6
0
 def verify_final_artifacts(self, step, artifacts):
     # If the Jenkins run was aborted, we don't expect a manifest file.
     if (step.result != Result.aborted and
             not any(ManifestJsonHandler.can_process(a.name) for a in artifacts)):
         db.session.add(FailureReason(
             step_id=step.id,
             job_id=step.job.id,
             build_id=step.job.build_id,
             project_id=step.job.project_id,
             reason='missing_manifest_json',
         ))
         step.result = Result.infra_failed
         db.session.add(step)
         db.session.commit()
Esempio n. 7
0
 def verify_final_artifacts(self, step, artifacts):
     # If the Jenkins run was aborted or timed out, we don't expect a manifest file.
     if (step.result != Result.aborted and
         not step.data.get('timed_out', False) and
             not any(ManifestJsonHandler.can_process(a.name) for a in artifacts)):
         db.session.add(FailureReason(
             step_id=step.id,
             job_id=step.job.id,
             build_id=step.job.build_id,
             project_id=step.job.project_id,
             reason='missing_manifest_json',
         ))
         step.result = Result.infra_failed
         db.session.add(step)
         db.session.commit()
Esempio n. 8
0
    def _sync_results(self, step, item):
        job_name = step.data['job_name']
        build_no = step.data['build_no']

        artifacts = item.get('artifacts', ())

        # If the Jenkins run was aborted, we don't expect a manifest file.
        if (step.result != Result.aborted and
                not any(ManifestJsonHandler.can_process(os.path.basename(a['fileName'])) for a in artifacts)):
            db.session.add(FailureReason(
                step_id=step.id,
                job_id=step.job.id,
                build_id=step.job.build_id,
                project_id=step.job.project_id,
                reason='missing_manifest_json',
            ))
            step.result = Result.infra_failed
            db.session.add(step)
            db.session.commit()

        # Detect and warn if there are duplicate artifact file names as we were relying on
        # uniqueness before.
        artifact_filenames = set()
        for artifact in artifacts:
            if artifact['fileName'] in artifact_filenames:
                self.logger.warning('Duplicate artifact filename found: %s', artifact['fileName'])
            artifact_filenames.add(artifact['fileName'])

        self._sync_generic_results(step, artifacts)

        # sync console log
        self.logger.info('Syncing console log for %s', step.id)
        try:
            result = True
            while result:
                result = self._sync_log(
                    jobstep=step,
                    name=step.label,
                    job_name=job_name,
                    build_no=build_no,
                )

        except Exception:
            db.session.rollback()
            current_app.logger.exception(
                'Unable to sync console log for job step %r',
                step.id.hex)
Esempio n. 9
0
    def _sync_results(self, step, item):
        job_name = step.data["job_name"]
        build_no = step.data["build_no"]

        artifacts = item.get("artifacts", ())
        if self.sync_phase_artifacts:
            # if we are allowing phase artifacts and we find *any* artifacts
            # that resemble a phase we need to change the behavior of the
            # the remainder of tasks
            phased_results = any(a["fileName"].endswith("phase.json") for a in artifacts)
        else:
            phased_results = False

        # If the Jenkins run was aborted, we don't expect a manifest file.
        if step.result != Result.aborted:
            if not any(ManifestJsonHandler.can_process(os.path.basename(a["fileName"])) for a in artifacts):
                db.session.add(
                    FailureReason(
                        step_id=step.id,
                        job_id=step.job.id,
                        build_id=step.job.build_id,
                        project_id=step.job.project_id,
                        reason="missing_manifest_json",
                    )
                )
                step.result = Result.infra_failed
                db.session.add(step)
                db.session.commit()

        # artifacts sync differently depending on the style of job results
        if phased_results:
            self._sync_phased_results(step, artifacts)
        else:
            self._sync_generic_results(step, artifacts)

        # sync console log
        self.logger.info("Syncing console log for %s", step.id)
        try:
            result = True
            while result:
                result = self._sync_log(jobstep=step, name=step.label, job_name=job_name, build_no=build_no)

        except Exception:
            db.session.rollback()
            current_app.logger.exception("Unable to sync console log for job step %r", step.id.hex)
Esempio n. 10
0
    def _sync_phased_results(self, step, artifacts):
        # due to the limitations of Jenkins and our requirement to have more
        # insight into the actual steps a build process takes and unfortunately
        # the best way to do this is to rewrite history within Changes
        job = step.job
        is_diff = not job.source.is_commit()
        project = step.project

        artifacts_by_name = dict((a["fileName"], a) for a in artifacts)
        pending_artifacts = set(artifacts_by_name.keys())

        phase_steps = set()
        phase_step_data = {
            "job_name": step.data["job_name"],
            "build_no": step.data["build_no"],
            "generated": True,
            # TODO: _pick_master here seems very suspicious.
            "master": self._pick_master(step.data["job_name"], is_diff),
        }

        phases = set()

        # fetch each phase and create it immediately (as opposed to async)
        for artifact_data in artifacts:
            artifact_filename = artifact_data["fileName"]

            if not artifact_filename.endswith("phase.json"):
                continue

            pending_artifacts.remove(artifact_filename)

            resp = self.fetch_artifact(step, artifact_data)
            phase_data = resp.json()

            if phase_data["retcode"]:
                result = Result.failed
            else:
                result = Result.passed

            date_started = datetime.utcfromtimestamp(phase_data["startTime"])
            date_finished = datetime.utcfromtimestamp(phase_data["endTime"])

            jobphase, created = get_or_create(
                JobPhase,
                where={"job": job, "label": phase_data["name"]},
                defaults={
                    "project": project,
                    "result": result,
                    "status": Status.finished,
                    "date_started": date_started,
                    "date_finished": date_finished,
                },
            )
            phases.add(jobphase)

            jobstep, created = get_or_create(
                JobStep,
                where={"phase": jobphase, "label": step.label},
                defaults={
                    "job": job,
                    "node": step.node,
                    "project": project,
                    "result": result,
                    "status": Status.finished,
                    "date_started": date_started,
                    "date_finished": date_finished,
                    "data": phase_step_data,
                },
            )
            sync_job_step.delay_if_needed(task_id=jobstep.id.hex, parent_task_id=job.id.hex, step_id=jobstep.id.hex)
            phase_steps.add(jobstep)

            # capture the log if available
            try:
                log_artifact = artifacts_by_name[phase_data["log"]]
            except KeyError:
                self.logger.warning("Unable to find logfile for phase: %s", phase_data)
            else:
                pending_artifacts.remove(log_artifact["fileName"])

                self._handle_generic_artifact(jobstep=jobstep, artifact=log_artifact, skip_checks=True)

        # ideally we don't mark the base step as a failure if any of the phases
        # report more correct results
        if phases and step.result == Result.failed and any(p.result == Result.failed for p in phases):
            step.result = Result.passed
            db.session.add(step)

        if not pending_artifacts:
            return

        # Alias to clarify that this is the JobStep that actually ran on a slave.
        original_step = step
        # all remaining artifacts get bound to the final phase
        final_step = sorted(phase_steps, key=lambda x: x.date_finished, reverse=True)[0]
        for artifact_name in pending_artifacts:
            # Manifest files are associated with the original step so we can validate that the ID is correct.
            responsible_step = original_step if ManifestJsonHandler.can_process(artifact_name) else final_step
            self._handle_generic_artifact(jobstep=responsible_step, artifact=artifacts_by_name[artifact_name])