Exemple #1
0
    def save(self, repository):
        author = self._get_author(self.author)
        if self.author == self.committer:
            committer = author
        else:
            committer = self._get_author(self.committer)

        revision, created = create_or_update(Revision, where={
            'repository': repository,
            'sha': self.id,
        }, values={
            'author': author,
            'committer': committer,
            'message': self.message,
            'parents': self.parents,
            'branches': self.branches,
            'date_created': self.author_date,
            'date_committed': self.committer_date,
        })

        # we also want to create a source for this item as it's the canonical
        # representation in the UI
        try_create(Source, {
            'revision_sha': self.id,
            'repository': repository,
        })

        return (revision, created)
    def verify_final_artifacts(self, step, artifacts):
        super(JenkinsTestCollectorBuilder,
              self).verify_final_artifacts(step, artifacts)

        # We annotate the "expanded" jobs with this tag, so the individual
        # shards will no longer require the critical artifact
        if step.data.get('expanded'):
            return

        expected_image = self.get_expected_image(step.job_id)

        # if this is a snapshot build then we don't have to worry about
        # sanity checking the normal artifacts
        if expected_image:
            return

        required_handler = self.get_required_handler()

        if not any(required_handler.can_process(a.name) for a in artifacts):
            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': job.id,
                    'build_id': job.build_id,
                    'project_id': job.project_id,
                    'reason': 'missing_artifact'
                })
            db.session.commit()
Exemple #3
0
def sync_job_step(step_id):
    step = JobStep.query.get(step_id)
    if not step:
        return

    implementation = get_build_step(step.job_id)
    implementation.update_step(step=step)

    if step.status != Status.finished:
        is_finished = False
    else:
        is_finished = sync_job_step.verify_all_children() == Status.finished

    if not is_finished:
        raise sync_job_step.NotFinished

    missing_tests = is_missing_tests(step)

    try_create(ItemStat,
               where={
                   'item_id': step.id,
                   'name': 'tests_missing',
               },
               defaults={'value': int(missing_tests)})

    if step.result == Result.passed and missing_tests:
        step.result = Result.failed
        db.session.add(step)
        db.session.commit()
Exemple #4
0
    def _sync_results(self, step, item):
        """
        At this point, we have already collected all of the artifacts, so if
        this is the initial collection phase and we did not collect a
        critical artifact then we error.
        """
        super(JenkinsCollectorBuilder, self)._sync_results(step, item)

        # We annotate the "expanded" jobs with this tag, so the individual
        # shards will no longer require the critical artifact
        if step.data.get('expanded'):
            return

        expected_image = self.get_expected_image(step.job_id)

        # if this is a snapshot build then we don't have to worry about
        # sanity checking the normal artifacts
        if expected_image:
            return

        artifacts = item.get('artifacts', ())
        required_artifact = self.get_required_artifact()

        if not any(os.path.basename(a['fileName']) == required_artifact for a in artifacts):
            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(FailureReason, {
                'step_id': step.id,
                'job_id': job.id,
                'build_id': job.build_id,
                'project_id': job.project_id,
                'reason': 'missing_artifact'
            })
Exemple #5
0
def record_coverage_stats(step):
    coverage_stats = db.session.query(
        func.sum(FileCoverage.lines_covered).label('lines_covered'),
        func.sum(FileCoverage.lines_uncovered).label('lines_uncovered'),
        func.sum(FileCoverage.diff_lines_covered).label('diff_lines_covered'),
        func.sum(
            FileCoverage.diff_lines_uncovered).label('diff_lines_uncovered'),
    ).filter(FileCoverage.step_id == step.id, ).group_by(
        FileCoverage.step_id, ).first()

    stat_list = (
        'lines_covered',
        'lines_uncovered',
        'diff_lines_covered',
        'diff_lines_uncovered',
    )
    for stat_name in stat_list:
        try_create(ItemStat,
                   where={
                       'item_id': step.id,
                       'name': stat_name,
                   },
                   defaults={
                       'value': getattr(coverage_stats, stat_name, 0) or 0,
                   })
Exemple #6
0
    def get_tests(self, fp):
        try:
            # libxml has a limit on the size of a text field by default, but we encode stdout/stderr.
            #
            # Its not good to have such huge text fields in the first place but we still want to
            # avoid hard failing here if we do.
            parser = etree.XMLParser(huge_tree=True)
            root = etree.fromstring(fp.read(), parser=parser)
        except Exception:
            # Record the JobStep ID so we have any hope of tracking these down.
            self.logger.exception('Failed to parse XML; (step={})'.format(
                self.step.id.hex))
            try_create(
                FailureReason, {
                    'step_id': self.step.id,
                    'job_id': self.step.job_id,
                    'build_id': self.step.job.build_id,
                    'project_id': self.step.project_id,
                    'reason': 'malformed_artifact'
                })
            db.session.commit()
            return []

        if root.tag == 'unittest-results':
            return self.get_bitten_tests(root)
        return self.get_xunit_tests(root)
Exemple #7
0
    def delay(self, **kwargs):
        """
        Enqueue this task.

        >>> task.delay(
        >>>     task_id='33846695b2774b29a71795a009e8168a',
        >>>     parent_task_id='659974858dcf4aa08e73a940e1066328',
        >>> )
        """
        assert kwargs.get('task_id')

        fn_kwargs = dict((k, v) for k, v in kwargs.iteritems()
                         if k not in ('task_id', 'parent_task_id'))

        try_create(Task,
                   where={
                       'task_name': self.task_name,
                       'parent_id': kwargs.get('parent_task_id'),
                       'task_id': kwargs['task_id'],
                       'status': Status.queued,
                       'data': {
                           'kwargs': fn_kwargs,
                       },
                   })

        db.session.commit()

        queue.delay(
            self.task_name,
            kwargs=kwargs,
            countdown=CONTINUE_COUNTDOWN,
        )
Exemple #8
0
    def get_tests(self, fp):
        try:
            # libxml has a limit on the size of a text field by default, but we encode stdout/stderr.
            #
            # Its not good to have such huge text fields in the first place but we still want to
            # avoid hard failing here if we do.
            parser = etree.XMLParser(huge_tree=True)
            root = etree.fromstring(fp.read(), parser=parser)
        except Exception:
            uri = build_uri('/find_build/{0}/'.format(
                self.step.job.build_id.hex))
            self.logger.warning('Failed to parse XML; (step=%s, build=%s)',
                                self.step.id.hex,
                                uri,
                                exc_info=True)
            try_create(
                FailureReason, {
                    'step_id': self.step.id,
                    'job_id': self.step.job_id,
                    'build_id': self.step.job.build_id,
                    'project_id': self.step.project_id,
                    'reason': 'malformed_artifact'
                })
            db.session.commit()
            return []

        if root.tag == 'unittest-results':
            return self.get_bitten_tests(root)
        return self.get_xunit_tests(root)
Exemple #9
0
    def verify_final_artifacts(self, step, artifacts):
        super(JenkinsTestCollectorBuilder, self).verify_final_artifacts(step, artifacts)

        # We annotate the "expanded" jobs with this tag, so the individual
        # shards will no longer require the critical artifact
        if step.data.get('expanded'):
            return

        expected_image = self.get_expected_image(step.job_id)

        # if this is a snapshot build then we don't have to worry about
        # sanity checking the normal artifacts
        if expected_image:
            return

        required_handler = self.get_required_handler()

        if not any(required_handler.can_process(a.name) for a in artifacts):
            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(FailureReason, {
                'step_id': step.id,
                'job_id': job.id,
                'build_id': job.build_id,
                'project_id': job.project_id,
                'reason': 'missing_artifact'
            })
            db.session.commit()
Exemple #10
0
def aggregate_flaky_tests(day=None, max_flaky_tests=200):
    if day is None:
        day = datetime.utcnow().date() - timedelta(days=1)

    try:
        projects = Project.query.all()

        for project in projects:
            tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests)

            for test in tests:
                _log_metrics(
                    "flaky_test_reruns",
                    flaky_test_reruns_name=test['name'],
                    flaky_test_reruns_project_id=test['project_id'],
                    flaky_test_reruns_flaky_runs=test['flaky_runs'],
                    flaky_test_reruns_passing_runs=test['passing_runs'],
                )
                try_create(FlakyTestStat, {
                    'name': test['name'],
                    'project_id': test['project_id'],
                    'date': day,
                    'last_flaky_run_id': test['id'],
                    'flaky_runs': test['flaky_runs'],
                    'double_reruns': test['double_reruns'],
                    'passing_runs': test['passing_runs'],
                })
                # Potentially hundreds of commits per project may be a bit excessive,
                # but the metric posting can potentially take seconds, meaning this could be
                # a very long-running transaction otherwise.
                db.session.commit()

        db.session.commit()
    except Exception as err:
        logging.exception(unicode(err))
Exemple #11
0
    def delay(self, **kwargs):
        """
        Enqueue this task.

        >>> task.delay(
        >>>     task_id='33846695b2774b29a71795a009e8168a',
        >>>     parent_task_id='659974858dcf4aa08e73a940e1066328',
        >>> )
        """
        assert kwargs.get('task_id')

        fn_kwargs = dict(
            (k, v) for k, v in kwargs.iteritems()
            if k not in ('task_id', 'parent_task_id')
        )

        try_create(Task, where={
            'task_name': self.task_name,
            'parent_id': kwargs.get('parent_task_id'),
            'task_id': kwargs['task_id'],
            'status': Status.queued,
            'data': {
                'kwargs': fn_kwargs,
            },
        })

        db.session.commit()

        queue.delay(
            self.task_name,
            kwargs=kwargs,
            countdown=CONTINUE_COUNTDOWN,
        )
Exemple #12
0
    def get_tests(self, fp):
        try:
            # libxml has a limit on the size of a text field by default, but we encode stdout/stderr.
            #
            # Its not good to have such huge text fields in the first place but we still want to
            # avoid hard failing here if we do.
            parser = etree.XMLParser(huge_tree=True)
            root = etree.fromstring(fp.read(), parser=parser)
        except Exception:
            uri = build_uri("/find_build/{0}/".format(self.step.job.build_id.hex))
            self.logger.warning("Failed to parse XML; (step=%s, build=%s)", self.step.id.hex, uri, exc_info=True)
            try_create(
                FailureReason,
                {
                    "step_id": self.step.id,
                    "job_id": self.step.job_id,
                    "build_id": self.step.job.build_id,
                    "project_id": self.step.project_id,
                    "reason": "malformed_artifact",
                },
            )
            db.session.commit()
            return []

        if root.tag == "unittest-results":
            return self.get_bitten_tests(root)
        return self.get_xunit_tests(root)
Exemple #13
0
def sync_job_step(step_id):
    step = JobStep.query.get(step_id)
    if not step:
        return

    implementation = get_build_step(step.job_id)
    implementation.update_step(step=step)

    if step.status != Status.finished:
        is_finished = False
    else:
        is_finished = sync_job_step.verify_all_children() == Status.finished

    if not is_finished:
        raise sync_job_step.NotFinished

    missing_tests = is_missing_tests(step)

    try_create(ItemStat, where={
        'item_id': step.id,
        'name': 'tests_missing',
    }, defaults={
        'value': int(missing_tests)
    })

    if step.result == Result.passed and missing_tests:
        step.result = Result.failed
        db.session.add(step)
        db.session.commit()
 def _add_failure_reason(self):
     try_create(FailureReason, {
         'step_id': self.step.id,
         'job_id': self.step.job_id,
         'build_id': self.step.job.build_id,
         'project_id': self.step.project_id,
         'reason': 'malformed_artifact'
     })
     db.session.commit()
Exemple #15
0
def build_finished_handler(build, **kwargs):
    if build.result != Result.passed:
        return

    url = current_app.config.get('GREEN_BUILD_URL')
    if not url:
        logger.info('GREEN_BUILD_URL not set')
        return

    auth = current_app.config['GREEN_BUILD_AUTH']
    if not auth:
        logger.info('GREEN_BUILD_AUTH not set')
        return

    # we only want to identify stable revisions
    if build.patch_id or not build.revision_sha:
        logger.debug('Ignoring build due to non-commit: %s', build.id)
        return

    options = get_options(build.project_id)

    if options.get('green-build.notify', '1') != '1':
        logger.info('green-build.notify disabled for project: %s', build.project_id)
        return

    if build.repository.backend != RepositoryBackend.hg:
        logger.info('Repository backend is not supported: %s', build.repository.id)
        return

    vcs = build.repository.get_vcs()
    if vcs is None:
        logger.info('Repository has no VCS set: %s', build.repository.id)
        return

    # ensure we have the latest changes
    vcs.update()

    release_id = vcs.run(['log', '-r %s' % (build.revision_sha,), '--limit=1', '--template={rev}:{node|short}'])

    project = options.get('green-build.project') or build.project.slug

    requests.post(url, auth=auth, data={
        'project': project,
        'id': release_id,
        'build_url': build_uri('/projects/{0}/builds/{1}/'.format(
            build.project.slug, build.id.hex)),
        'build_server': 'changes',
    })

    try_create(Event, where={
        'type': EventType.green_build,
        'item_id': build.source_id,
        'data': {
            'build': build.id.hex,
        }
    })
Exemple #16
0
 def _add_failure_reason(self):
     try_create(
         FailureReason, {
             'step_id': self.step.id,
             'job_id': self.step.job_id,
             'build_id': self.step.job.build_id,
             'project_id': self.step.project_id,
             'reason': 'malformed_artifact'
         })
     db.session.commit()
Exemple #17
0
    def save(self, test_list):
        if not test_list:
            return

        step = self.step
        job = step.job
        project = job.project
        # agg_groups_by_id = {}

        # create all test cases
        for test in test_list:
            testcase = TestCase(
                job=job,
                step=step,
                name_sha=test.name_sha,
                project=project,
                name=test.name,
                duration=test.duration,
                message=test.message,
                result=test.result,
                date_created=test.date_created,
                reruns=test.reruns
            )
            db.session.add(testcase)

            if test.artifacts:
                for ta in test.artifacts:
                    testartifact = TestArtifact(
                        name=ta['name'],
                        type=ta['type'],
                        test=testcase,)
                    testartifact.save_base64_content(ta['base64'])
                    db.session.add(testartifact)

        try:
            db.session.commit()
        except IntegrityError:
            db.session.rollback()
            logger.exception('Duplicate test name; (step={})'.format(step.id.hex))
            try_create(FailureReason, {
                'step_id': step.id,
                'job_id': step.job_id,
                'build_id': step.job.build_id,
                'project_id': step.project_id,
                'reason': 'duplicate_test_name'
            })
            db.session.commit()

        try:
            self._record_test_counts(test_list)
            self._record_test_failures(test_list)
            self._record_test_duration(test_list)
            self._record_test_rerun_counts(test_list)
        except Exception:
            logger.exception('Failed to record aggregate test statistics')
Exemple #18
0
    def post(self, build_id):
        build = Build.query.get(build_id)
        if build is None:
            return "", 404

        if not session.get("uid"):
            # don't do anything if they aren't logged in
            return "", 200

        try_create(BuildSeen, where={"build_id": build.id, "user_id": session["uid"]})

        return "", 200
Exemple #19
0
def _record_tests_missing(job):
    tests_missing_count = db.session.query(func.sum(ItemStat.value), ).filter(
        ItemStat.item_id.in_(
            db.session.query(JobStep.id).filter(JobStep.job_id == job.id, )),
        ItemStat.name == 'tests_missing',
    ).as_scalar()

    try_create(ItemStat,
               where={
                   'item_id': job.id,
                   'name': 'tests_missing',
               },
               defaults={'value': tests_missing_count})
Exemple #20
0
def aggregate_job_stat(job, name, func_=func.sum):
    value = db.session.query(func.coalesce(func_(ItemStat.value), 0), ).filter(
        ItemStat.item_id.in_(
            db.session.query(JobStep.id).filter(JobStep.job_id == job.id, )),
        ItemStat.name == name,
    ).as_scalar()

    try_create(ItemStat,
               where={
                   'item_id': job.id,
                   'name': name,
               },
               defaults={'value': value})
Exemple #21
0
def aggregate_build_stat(build, name, func_=func.sum):
    value = db.session.query(func.coalesce(func_(ItemStat.value), 0), ).filter(
        ItemStat.item_id.in_(
            db.session.query(Job.id).filter(Job.build_id == build.id, )),
        ItemStat.name == name,
    ).as_scalar()

    try_create(ItemStat,
               where={
                   'item_id': build.id,
                   'name': name,
                   'value': value,
               })
Exemple #22
0
    def post(self, build_id):
        build = Build.query.get(build_id)
        if build is None:
            return '', 404

        if not session.get('uid'):
            return '', 401

        try_create(BuildSeen, where={
            'build_id': build.id,
            'user_id': session['uid'],
        })

        return '', 200
Exemple #23
0
    def post(self, build_id):
        build = Build.query.get(build_id)
        if build is None:
            return '', 404

        if not session.get('uid'):
            # don't do anything if they aren't logged in
            return self.respond({})

        try_create(BuildSeen, where={
            'build_id': build.id,
            'user_id': session['uid'],
        })

        return self.respond({})
 def process(self, fp):
     try:
         phase_config = json.load(fp)
         _, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
         implementation.expand_jobs(self.step, phase_config)
     except Exception:
         uri = build_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
         self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
         try_create(FailureReason, {
             'step_id': self.step.id,
             'job_id': self.step.job_id,
             'build_id': self.step.job.build_id,
             'project_id': self.step.project_id,
             'reason': 'malformed_artifact'
         })
         db.session.commit()
Exemple #25
0
    def save(self, repository):
        author = self._get_author(self.author)
        if self.author == self.committer:
            committer = author
        else:
            committer = self._get_author(self.committer)

        revision, created = create_or_update(Revision, where={
            'repository': repository,
            'sha': self.id,
        }, values={
            'author': author,
            'committer': committer,
            'message': self.message,
            'parents': self.parents,
            'branches': self.branches,
            'date_created': self.author_date,
            'date_committed': self.committer_date,
        })

        # This call is relatively expensive - only do if necessary.
        if created:
            vcs = repository.get_vcs()
            if vcs:
                revision.patch_hash = vcs.get_patch_hash(self.id)

        # we also want to create a source for this item as it's the canonical
        # representation in the UI
        source = try_create(Source, {
            'revision_sha': self.id,
            'repository': repository,
        })

        return (revision, created, source)
Exemple #26
0
    def send(self, job, parent=None):
        # TODO(dcramer): we should send a clipping of a relevant job log
        recipients = self.get_recipients(job)
        if not recipients:
            return

        event = try_create(Event,
                           where={
                               'type': EventType.email,
                               'item_id': job.build_id,
                               'data': {
                                   'recipients': recipients,
                               }
                           })
        if not event:
            # We've already sent out notifications for this build
            return

        context = self.get_context(job, parent)

        msg = Message(context['title'],
                      recipients=recipients,
                      extra_headers={
                          'Reply-To':
                          ', '.join(sanitize_address(r) for r in recipients),
                      })
        msg.body = render_template('listeners/mail/notification.txt',
                                   **context)
        msg.html = Markup(
            toronado.from_string(
                render_template('listeners/mail/notification.html',
                                **context)))

        mail.send(msg)
Exemple #27
0
    def send(self, msg, build):
        msg.recipients = filter_recipients(msg.recipients)
        if not msg.recipients:
            self.logger.info(
                'Exiting for collection_id={} because its message has no '
                'recipients.'.format(build.collection_id))
            return

        event = try_create(Event,
                           where={
                               'type': EventType.email,
                               'item_id': build.collection_id,
                               'data': {
                                   'triggering_build_id': build.id.hex,
                                   'recipients': msg.recipients,
                               }
                           })
        # If we were unable to create the Event, we must've done so (and thus sent the mail) already.
        if not event:
            self.logger.warning(
                'An email has already been sent for collection_id=%s, (build_id=%s).',
                build.collection_id, build.id.hex)
            return

        mail.send(msg)
Exemple #28
0
    def send(self, job, parent=None):
        # TODO(dcramer): we should send a clipping of a relevant job log
        recipients = self.get_recipients(job)
        if not recipients:
            return

        event = try_create(Event, where={
            'type': EventType.email,
            'item_id': job.build_id,
            'data': {
                'recipients': recipients,
            }
        })
        if not event:
            # We've already sent out notifications for this build
            return

        context = self.get_context(job, parent)

        msg = Message(context['title'], recipients=recipients, extra_headers={
            'Reply-To': ', '.join(sanitize_address(r) for r in recipients),
        })
        msg.body = render_template('listeners/mail/notification.txt', **context)
        msg.html = Markup(toronado.from_string(
            render_template('listeners/mail/notification.html', **context)
        ))

        mail.send(msg)
Exemple #29
0
    def post(self, build_id):
        build = Build.query.get(build_id)
        if build is None:
            return '', 404

        user = get_current_user()
        if user is None:
            # don't do anything if they aren't logged in
            return self.respond({})

        try_create(BuildSeen, where={
            'build_id': build.id,
            'user_id': user.id,
        })

        return self.respond({})
Exemple #30
0
    def post(self, build_id):
        build = Build.query.get(build_id)
        if build is None:
            return '', 404

        user = get_current_user()
        if user is None:
            # don't do anything if they aren't logged in
            return self.respond({})

        try_create(BuildSeen,
                   where={
                       'build_id': build.id,
                       'user_id': user.id,
                   })

        return self.respond({})
Exemple #31
0
def aggregate_build_stat(build, name, func_=func.sum):
    value = db.session.query(
        func.coalesce(func_(ItemStat.value), 0),
    ).filter(
        ItemStat.item_id.in_(
            db.session.query(Job.id).filter(
                Job.build_id == build.id,
            )
        ),
        ItemStat.name == name,
    ).as_scalar()

    try_create(ItemStat, where={
        'item_id': build.id,
        'name': name,
        'value': value,
    })
Exemple #32
0
def _record_tests_missing(job):
    tests_missing_count = db.session.query(
        func.sum(ItemStat.value),
    ).filter(
        ItemStat.item_id.in_(
            db.session.query(JobStep.id).filter(
                JobStep.job_id == job.id,
            )
        ),
        ItemStat.name == 'tests_missing',
    ).as_scalar()

    try_create(ItemStat, where={
        'item_id': job.id,
        'name': 'tests_missing',
    }, defaults={
        'value': tests_missing_count
    })
def _comment_posted_for_collection_of_build(build):
    event = try_create(Event, where={
        'type': EventType.phabricator_comment,
        'item_id': build.collection_id,
        'data': {
            'triggering_build_id': build.id.hex,
        }
    })
    return not event
Exemple #34
0
def aggregate_job_stat(job, name, func_=func.sum):
    value = db.session.query(
        func.coalesce(func_(ItemStat.value), 0),
    ).filter(
        ItemStat.item_id.in_(
            db.session.query(JobStep.id).filter(
                JobStep.job_id == job.id,
                JobStep.replacement_id.is_(None),
            )
        ),
        ItemStat.name == name,
    ).as_scalar()

    try_create(ItemStat, where={
        'item_id': job.id,
        'name': name,
        'value': value,
    })
Exemple #35
0
    def _sync_results(self, step, item):
        super(JenkinsCollectorBuilder, self)._sync_results(step, item)

        if step.data.get('expanded'):
            return

        artifacts = item.get('artifacts', ())
        if not any(a['fileName'].endswith('jobs.json') for a in artifacts):
            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(FailureReason, {
                'step_id': step.id,
                'job_id': job.id,
                'build_id': job.build_id,
                'project_id': job.project_id,
                'reason': 'missing_artifact'
            })
def _comment_posted_for_collection_of_build(build):
    event = try_create(Event,
                       where={
                           'type': EventType.phabricator_comment,
                           'item_id': build.collection_id,
                           'data': {
                               'triggering_build_id': build.id.hex,
                           }
                       })
    return not event
Exemple #37
0
def aggregate_flaky_tests(day=None, max_flaky_tests=200):
    if day is None:
        day = datetime.utcnow().date() - timedelta(days=1)

    try:
        projects = Project.query.all()

        for project in projects:
            tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests)

            for test in tests:
                first_run = (
                    db.session.query(TestCase.date_created)
                    .filter(TestCase.project_id == test["project_id"], TestCase.name_sha == test["hash"])
                    .order_by(TestCase.date_created)
                    .limit(1)
                    .scalar()
                )

                log_metrics(
                    "flaky_test_reruns",
                    flaky_test_reruns_name=test["name"],
                    flaky_test_reruns_project_id=test["project_id"],
                    flaky_test_reruns_flaky_runs=test["flaky_runs"],
                    flaky_test_reruns_passing_runs=test["passing_runs"],
                )
                try_create(
                    FlakyTestStat,
                    {
                        "name": test["name"],
                        "project_id": test["project_id"],
                        "date": day,
                        "last_flaky_run_id": test["id"],
                        "flaky_runs": test["flaky_runs"],
                        "double_reruns": test["double_reruns"],
                        "passing_runs": test["passing_runs"],
                        "first_run": first_run,
                    },
                )

        db.session.commit()
    except Exception as err:
        logging.exception(unicode(err))
Exemple #38
0
    def get_tests(self, fp):
        try:
            root = etree.fromstring(fp.read())
        except Exception:
            # Record the JobStep ID so we have any hope of tracking these down.
            self.logger.exception('Failed to parse XML; (step={})'.format(self.step.id.hex))
            try_create(FailureReason, {
                'step_id': self.step.id,
                'job_id': self.step.job_id,
                'build_id': self.step.job.build_id,
                'project_id': self.step.project_id,
                'reason': 'malformed_artifact'
            })
            db.session.commit()
            return []

        if root.tag == 'unittest-results':
            return self.get_bitten_tests(root)
        return self.get_xunit_tests(root)
Exemple #39
0
 def report_malformed(self):
     new_fr = try_create(FailureReason, {
         'step_id': self.step.id,
         'job_id': self.step.job_id,
         'build_id': self.step.job.build_id,
         'project_id': self.step.project_id,
         'reason': 'malformed_artifact'
     })
     if new_fr:
         db.session.commit()
Exemple #40
0
 def report_malformed(self):
     new_fr = try_create(
         FailureReason, {
             'step_id': self.step.id,
             'job_id': self.step.job_id,
             'build_id': self.step.job.build_id,
             'project_id': self.step.project_id,
             'reason': 'malformed_artifact'
         })
     if new_fr:
         db.session.commit()
Exemple #41
0
    def _sync_results(self, step, item):
        super(JenkinsCollectorBuilder, self)._sync_results(step, item)

        if step.data.get('expanded'):
            return

        artifacts = item.get('artifacts', ())
        if not any(a['fileName'].endswith('jobs.json') for a in artifacts):
            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': job.id,
                    'build_id': job.build_id,
                    'project_id': job.project_id,
                    'reason': 'missing_artifact'
                })
Exemple #42
0
def aggregate_flaky_tests(day=None, max_flaky_tests=200):
    if day is None:
        day = datetime.utcnow().date() - timedelta(days=1)

    try:
        projects = Project.query.all()

        for project in projects:
            tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests)

            for test in tests:
                first_run = db.session.query(
                    TestCase.date_created
                ).filter(
                    TestCase.project_id == test['project_id'],
                    TestCase.name_sha == test['hash']
                ).order_by(
                    TestCase.date_created
                ).limit(1).scalar()

                log_metrics(
                    "flaky_test_reruns",
                    flaky_test_reruns_name=test['name'],
                    flaky_test_reruns_project_id=test['project_id'],
                    flaky_test_reruns_flaky_runs=test['flaky_runs'],
                    flaky_test_reruns_passing_runs=test['passing_runs'],
                )
                try_create(FlakyTestStat, {
                    'name': test['name'],
                    'project_id': test['project_id'],
                    'date': day,
                    'last_flaky_run_id': test['id'],
                    'flaky_runs': test['flaky_runs'],
                    'double_reruns': test['double_reruns'],
                    'passing_runs': test['passing_runs'],
                    'first_run': first_run
                })

        db.session.commit()
    except Exception as err:
        logging.exception(unicode(err))
Exemple #43
0
def record_coverage_stats(step):
    coverage_stats = db.session.query(
        func.sum(FileCoverage.lines_covered).label('lines_covered'),
        func.sum(FileCoverage.lines_uncovered).label('lines_uncovered'),
        func.sum(FileCoverage.diff_lines_covered).label('diff_lines_covered'),
        func.sum(FileCoverage.diff_lines_uncovered).label('diff_lines_uncovered'),
    ).filter(
        FileCoverage.step_id == step.id,
    ).group_by(
        FileCoverage.step_id,
    ).first()

    stat_list = (
        'lines_covered', 'lines_uncovered',
        'diff_lines_covered', 'diff_lines_uncovered',
    )
    for stat_name in stat_list:
        try_create(ItemStat, where={
            'item_id': step.id,
            'name': stat_name,
            'value': getattr(coverage_stats, stat_name, 0) or 0,
        })
Exemple #44
0
def aggregate_flaky_tests(day=None, max_flaky_tests=200):
    if day is None:
        day = datetime.utcnow().date() - timedelta(days=1)

    try:
        projects = Project.query.all()

        for project in projects:
            tests = get_flaky_tests(day, day + timedelta(days=1), [project],
                                    max_flaky_tests)

            for test in tests:
                first_run = db.session.query(TestCase.date_created).filter(
                    TestCase.project_id == test['project_id'],
                    TestCase.name_sha == test['hash']).order_by(
                        TestCase.date_created).limit(1).scalar()

                log_metrics(
                    "flaky_test_reruns",
                    flaky_test_reruns_name=test['name'],
                    flaky_test_reruns_project_id=test['project_id'],
                    flaky_test_reruns_flaky_runs=test['flaky_runs'],
                    flaky_test_reruns_passing_runs=test['passing_runs'],
                )
                try_create(
                    FlakyTestStat, {
                        'name': test['name'],
                        'project_id': test['project_id'],
                        'date': day,
                        'last_flaky_run_id': test['id'],
                        'flaky_runs': test['flaky_runs'],
                        'double_reruns': test['double_reruns'],
                        'passing_runs': test['passing_runs'],
                        'first_run': first_run
                    })

        db.session.commit()
    except Exception as err:
        logging.exception(unicode(err))
Exemple #45
0
    def _sync_results(self, step, item):
        """
        At this point, we have already collected all of the artifacts, so if
        this is the initial collection phase and we did not collect a
        critical artifact then we error.
        """
        super(JenkinsCollectorBuilder, self)._sync_results(step, item)

        # We annotate the "expanded" jobs with this tag, so the individual
        # shards will no longer require the critical artifact
        if step.data.get('expanded'):
            return

        expected_image = self.get_expected_image(step.job_id)

        # if this is a snapshot build then we don't have to worry about
        # sanity checking the normal artifacts
        if expected_image:
            return

        artifacts = item.get('artifacts', ())
        required_artifact = self.get_required_artifact()

        if not any(
                os.path.basename(a['fileName']) == required_artifact
                for a in artifacts):
            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': job.id,
                    'build_id': job.build_id,
                    'project_id': job.project_id,
                    'reason': 'missing_artifact'
                })
Exemple #46
0
    def get_tests(self, fp):
        try:
            # libxml has a limit on the size of a text field by default, but we encode stdout/stderr.
            #
            # Its not good to have such huge text fields in the first place but we still want to
            # avoid hard failing here if we do.
            parser = etree.XMLParser(huge_tree=True)
            root = etree.fromstring(fp.read(), parser=parser)
        except Exception:
            # Record the JobStep ID so we have any hope of tracking these down.
            self.logger.exception('Failed to parse XML; (step={})'.format(self.step.id.hex))
            try_create(FailureReason, {
                'step_id': self.step.id,
                'job_id': self.step.job_id,
                'build_id': self.step.job.build_id,
                'project_id': self.step.project_id,
                'reason': 'malformed_artifact'
            })
            db.session.commit()
            return []

        if root.tag == 'unittest-results':
            return self.get_bitten_tests(root)
        return self.get_xunit_tests(root)
Exemple #47
0
    def _record_test_rerun_counts(self, test_list):
        job = self.step.job

        rerun_count = db.session.query(func.count(TestCase.id)).filter(
            TestCase.job_id == job.id,
            TestCase.reruns > 0,
        ).as_scalar()

        create_or_update(ItemStat,
                         where={
                             'item_id': self.step.id,
                             'name': 'test_rerun_count',
                         },
                         values={
                             'value': sum(1 for t in test_list if t.reruns),
                         })

        create_or_update(ItemStat,
                         where={
                             'item_id': job.id,
                             'name': 'test_rerun_count',
                         },
                         values={
                             'value': rerun_count,
                         })

        instance = try_create(ItemStat,
                              where={
                                  'item_id': job.build_id,
                                  'name': 'test_rerun_count',
                              },
                              defaults={'value': rerun_count})
        if not instance:
            ItemStat.query.filter(
                ItemStat.item_id == job.build_id,
                ItemStat.name == 'test_rerun_count',
            ).update(
                {
                    'value':
                    select([func.sum(ItemStat.value)]).where(
                        and_(
                            ItemStat.name == 'test_rerun_count',
                            ItemStat.item_id.in_(
                                select([Job.id]).where(
                                    Job.build_id == job.build_id, )))),
                },
                synchronize_session=False)
Exemple #48
0
    def _record_test_duration(self, test_list):
        job = self.step.job

        test_duration = db.session.query(func.sum(TestCase.duration)).filter(
            TestCase.job_id == job.id, ).as_scalar()

        create_or_update(ItemStat,
                         where={
                             'item_id': self.step.id,
                             'name': 'test_duration',
                         },
                         values={
                             'value': sum(t.duration for t in test_list),
                         })

        create_or_update(ItemStat,
                         where={
                             'item_id': job.id,
                             'name': 'test_duration',
                         },
                         values={
                             'value': test_duration,
                         })

        instance = try_create(ItemStat,
                              where={
                                  'item_id': job.build_id,
                                  'name': 'test_duration',
                              },
                              defaults={'value': test_duration})
        if not instance:
            ItemStat.query.filter(
                ItemStat.item_id == job.build_id,
                ItemStat.name == 'test_duration',
            ).update(
                {
                    'value':
                    select([func.sum(ItemStat.value)]).where(
                        and_(
                            ItemStat.name == 'test_duration',
                            ItemStat.item_id.in_(
                                select([Job.id]).where(
                                    Job.build_id == job.build_id, )))),
                },
                synchronize_session=False)
Exemple #49
0
    def save(self, repository):
        author = self._get_author(self.author)
        if self.author == self.committer:
            committer = author
        else:
            committer = self._get_author(self.committer)

        revision, created = create_or_update(Revision,
                                             where={
                                                 'repository': repository,
                                                 'sha': self.id,
                                             },
                                             values={
                                                 'author':
                                                 author,
                                                 'committer':
                                                 committer,
                                                 'message':
                                                 self.message,
                                                 'parents':
                                                 self.parents,
                                                 'branches':
                                                 self.branches,
                                                 'date_created':
                                                 self.author_date,
                                                 'date_committed':
                                                 self.committer_date,
                                             })

        # This call is relatively expensive - only do if necessary.
        if created:
            vcs = repository.get_vcs()
            if vcs:
                revision.patch_hash = vcs.get_patch_hash(self.id)

        # we also want to create a source for this item as it's the canonical
        # representation in the UI
        source = try_create(Source, {
            'revision_sha': self.id,
            'repository': repository,
        })

        return (revision, created, source)
Exemple #50
0
    def _record_test_rerun_counts(self, test_list):
        job = self.step.job

        rerun_count = db.session.query(func.count(TestCase.id)).filter(
            TestCase.job_id == job.id,
            TestCase.reruns > 0,
        ).as_scalar()

        create_or_update(ItemStat, where={
            'item_id': self.step.id,
            'name': 'test_rerun_count',
        }, values={
            'value': sum(1 for t in test_list if t.reruns),
        })

        create_or_update(ItemStat, where={
            'item_id': job.id,
            'name': 'test_rerun_count',
        }, values={
            'value': rerun_count,
        })

        instance = try_create(ItemStat, where={
            'item_id': job.build_id,
            'name': 'test_rerun_count',
        }, defaults={
            'value': rerun_count
        })
        if not instance:
            ItemStat.query.filter(
                ItemStat.item_id == job.build_id,
                ItemStat.name == 'test_rerun_count',
            ).update({
                'value': select([func.sum(ItemStat.value)]).where(
                    and_(
                        ItemStat.name == 'test_rerun_count',
                        ItemStat.item_id.in_(select([Job.id]).where(
                            Job.build_id == job.build_id,
                        ))
                    )
                ),
            }, synchronize_session=False)
Exemple #51
0
    def _record_test_duration(self, test_list):
        job = self.step.job

        test_duration = db.session.query(func.sum(TestCase.duration)).filter(
            TestCase.job_id == job.id,
        ).as_scalar()

        create_or_update(ItemStat, where={
            'item_id': self.step.id,
            'name': 'test_duration',
        }, values={
            'value': sum(t.duration for t in test_list),
        })

        create_or_update(ItemStat, where={
            'item_id': job.id,
            'name': 'test_duration',
        }, values={
            'value': test_duration,
        })

        instance = try_create(ItemStat, where={
            'item_id': job.build_id,
            'name': 'test_duration',
        }, defaults={
            'value': test_duration
        })
        if not instance:
            ItemStat.query.filter(
                ItemStat.item_id == job.build_id,
                ItemStat.name == 'test_duration',
            ).update({
                'value': select([func.sum(ItemStat.value)]).where(
                    and_(
                        ItemStat.name == 'test_duration',
                        ItemStat.item_id.in_(select([Job.id]).where(
                            Job.build_id == job.build_id,
                        ))
                    )
                ),
            }, synchronize_session=False)
Exemple #52
0
    def post(self):
        """
        Notify Changes of a newly created diff.

        Depending on system configuration, this may create 0 or more new builds,
        and the resulting response will be a list of those build objects.
        """
        args = self.parser.parse_args()

        repository = args.repository
        if not args.repository:
            return error("Repository not found")

        projects = list(
            Project.query.options(subqueryload_all('plans'), ).filter(
                Project.status == ProjectStatus.active,
                Project.repository_id == repository.id,
            ))

        # no projects bound to repository
        if not projects:
            return self.respond([])

        options = dict(
            db.session.query(
                ProjectOption.project_id, ProjectOption.value).filter(
                    ProjectOption.project_id.in_([p.id for p in projects]),
                    ProjectOption.name.in_([
                        'phabricator.diff-trigger',
                    ])))

        projects = [p for p in projects if options.get(p.id, '1') == '1']

        if not projects:
            return self.respond([])

        statsreporter.stats().incr('diffs_posted_from_phabricator')

        label = args.label[:128]
        author = args.author
        message = args.message
        sha = args.sha
        target = 'D{}'.format(args['phabricator.revisionID'])

        try:
            identify_revision(repository, sha)
        except MissingRevision:
            # This may just be a broken request (which is why we respond with a 400) but
            # it also might indicate Phabricator and Changes being out of sync somehow,
            # so we err on the side of caution and log it as an error.
            logging.error(
                "Diff %s was posted for an unknown revision (%s, %s)", target,
                sha, repository.url)
            return error("Unable to find commit %s in %s." %
                         (sha, repository.url),
                         problems=['sha', 'repository'])

        source_data = {
            'phabricator.buildTargetPHID': args['phabricator.buildTargetPHID'],
            'phabricator.diffID': args['phabricator.diffID'],
            'phabricator.revisionID': args['phabricator.revisionID'],
            'phabricator.revisionURL': args['phabricator.revisionURL'],
        }

        patch = Patch(
            repository=repository,
            parent_revision_sha=sha,
            diff=''.join(args.patch_file),
        )
        db.session.add(patch)

        source = Source(
            patch=patch,
            repository=repository,
            revision_sha=sha,
            data=source_data,
        )
        db.session.add(source)

        phabricatordiff = try_create(
            PhabricatorDiff, {
                'diff_id': args['phabricator.diffID'],
                'revision_id': args['phabricator.revisionID'],
                'url': args['phabricator.revisionURL'],
                'source': source,
            })
        if phabricatordiff is None:
            logging.error("Diff %s, Revision %s already exists",
                          args['phabricator.diffID'],
                          args['phabricator.revisionID'])
            return error("Diff already exists within Changes")

        project_options = ProjectOptionsHelper.get_options(
            projects, ['build.file-whitelist'])
        diff_parser = DiffParser(patch.diff)
        files_changed = diff_parser.get_changed_files()

        collection_id = uuid.uuid4()
        builds = []
        for project in projects:
            plan_list = get_build_plans(project)
            if not plan_list:
                logging.warning('No plans defined for project %s',
                                project.slug)
                continue

            try:
                if not files_changed_should_trigger_project(
                        files_changed,
                        project,
                        project_options[project.id],
                        sha,
                        diff=patch.diff):
                    logging.info(
                        'No changed files matched project trigger for project %s',
                        project.slug)
                    continue
            except InvalidDiffError:
                # ok, the build will fail and the user will be notified
                pass
            except ProjectConfigError:
                logging.error(
                    'Project config for project %s is not in a valid format. Author is %s.',
                    project.slug,
                    author.name,
                    exc_info=True)

            builds.append(
                create_build(
                    project=project,
                    collection_id=collection_id,
                    sha=sha,
                    target=target,
                    label=label,
                    message=message,
                    author=author,
                    patch=patch,
                    tag="phabricator",
                ))
        # This is the counterpoint to the above 'diffs_posted_from_phabricator';
        # at this point we've successfully processed the diff, so comparing this
        # stat to the above should give us the phabricator diff failure rate.
        statsreporter.stats().incr(
            'diffs_successfully_processed_from_phabricator')

        return self.respond(builds)
Exemple #53
0
    def post_impl(self):
        """
        Notify Changes of a newly created diff.

        Depending on system configuration, this may create 0 or more new builds,
        and the resulting response will be a list of those build objects.
        """

        # we manually check for arg presence here so we can send a more specific
        # error message to the user (rather than a plain 400)
        args = self.parser.parse_args()
        if not args.repository:
            # No need to postback a comment for this
            statsreporter.stats().incr("diffs_repository_not_found")
            return error("Repository not found")

        repository = args.repository

        projects = list(
            Project.query.options(subqueryload_all('plans'), ).filter(
                Project.status == ProjectStatus.active,
                Project.repository_id == repository.id,
            ))

        # no projects bound to repository
        if not projects:
            return self.respond([])

        options = dict(
            db.session.query(
                ProjectOption.project_id, ProjectOption.value).filter(
                    ProjectOption.project_id.in_([p.id for p in projects]),
                    ProjectOption.name.in_([
                        'phabricator.diff-trigger',
                    ])))

        # Filter out projects that aren't configured to run builds off of diffs
        # - Diff trigger disabled
        # - No build plans
        projects = [
            p for p in projects
            if options.get(p.id, '1') == '1' and get_build_plans(p)
        ]

        if not projects:
            return self.respond([])

        statsreporter.stats().incr('diffs_posted_from_phabricator')

        label = args.label[:128]
        author = args.author
        message = args.message
        sha = args.sha
        target = 'D%s' % args['phabricator.revisionID']

        try:
            identify_revision(repository, sha)
        except MissingRevision:
            # This may just be a broken request (which is why we respond with a 400) but
            # it also might indicate Phabricator and Changes being out of sync somehow,
            # so we err on the side of caution and log it as an error.
            logging.error(
                "Diff %s was posted for an unknown revision (%s, %s)", target,
                sha, repository.url)
            # We should postback since this can happen if a user diffs dependent revisions
            statsreporter.stats().incr("diffs_missing_base_revision")
            return self.postback_error(
                "Unable to find base revision {revision} in {repo} on Changes. Some possible reasons:\n"
                " - You may be working on multiple stacked diffs in your local repository.\n"
                "   {revision} only exists in your local copy. Changes thus cannot apply your patch\n"
                " - If you are sure that's not the case, it's possible you applied your patch to an extremely\n"
                "   recent revision which Changes hasn't picked up yet. Retry in a minute\n"
                .format(
                    revision=sha,
                    repo=repository.url,
                ),
                target,
                problems=['sha', 'repository'])

        source_data = {
            'phabricator.buildTargetPHID': args['phabricator.buildTargetPHID'],
            'phabricator.diffID': args['phabricator.diffID'],
            'phabricator.revisionID': args['phabricator.revisionID'],
            'phabricator.revisionURL': args['phabricator.revisionURL'],
        }

        patch = Patch(
            repository=repository,
            parent_revision_sha=sha,
            diff=''.join(line.decode('utf-8') for line in args.patch_file),
        )
        db.session.add(patch)

        source = Source(
            patch=patch,
            repository=repository,
            revision_sha=sha,
            data=source_data,
        )
        db.session.add(source)

        phabricatordiff = try_create(
            PhabricatorDiff, {
                'diff_id': args['phabricator.diffID'],
                'revision_id': args['phabricator.revisionID'],
                'url': args['phabricator.revisionURL'],
                'source': source,
            })
        if phabricatordiff is None:
            logging.warning("Diff %s, Revision %s already exists",
                            args['phabricator.diffID'],
                            args['phabricator.revisionID'])
            # No need to inform user about this explicitly
            statsreporter.stats().incr("diffs_already_exists")
            return error("Diff already exists within Changes")

        project_options = ProjectOptionsHelper.get_options(
            projects, ['build.file-whitelist'])
        diff_parser = DiffParser(patch.diff)
        files_changed = diff_parser.get_changed_files()

        collection_id = uuid.uuid4()
        builds = []
        for project in projects:
            plan_list = get_build_plans(project)
            # We already filtered out empty build plans
            assert plan_list, ('No plans defined for project {}'.format(
                project.slug))

            try:
                if not files_changed_should_trigger_project(
                        files_changed,
                        project,
                        project_options[project.id],
                        sha,
                        diff=patch.diff):
                    logging.info(
                        'No changed files matched project trigger for project %s',
                        project.slug)
                    continue
            except InvalidDiffError:
                # ok, the build will fail and the user will be notified
                pass
            except ProjectConfigError:
                logging.error(
                    'Project config for project %s is not in a valid format. Author is %s.',
                    project.slug,
                    author.name,
                    exc_info=True)

            builds.append(
                create_build(
                    project=project,
                    collection_id=collection_id,
                    sha=sha,
                    target=target,
                    label=label,
                    message=message,
                    author=author,
                    patch=patch,
                    tag="phabricator",
                ))

        # This is the counterpoint to the above 'diffs_posted_from_phabricator';
        # at this point we've successfully processed the diff, so comparing this
        # stat to the above should give us the phabricator diff failure rate.
        statsreporter.stats().incr(
            'diffs_successfully_processed_from_phabricator')

        return self.respond(builds)
Exemple #54
0
    def post(self):
        """
        Notify Changes of a newly created diff.

        Depending on system configuration, this may create 0 or more new builds,
        and the resulting response will be a list of those build objects.
        """
        args = self.parser.parse_args()

        repository = args.repository
        if not args.repository:
            return error("Repository not found")

        projects = list(
            Project.query.options(subqueryload_all('plans'), ).filter(
                Project.status == ProjectStatus.active,
                Project.repository_id == repository.id,
            ))

        # no projects bound to repository
        if not projects:
            return self.respond([])

        options = dict(
            db.session.query(
                ProjectOption.project_id, ProjectOption.value).filter(
                    ProjectOption.project_id.in_([p.id for p in projects]),
                    ProjectOption.name.in_([
                        'phabricator.diff-trigger',
                    ])))

        projects = [p for p in projects if options.get(p.id, '1') == '1']

        if not projects:
            return self.respond([])

        label = args.label[:128]
        author = args.author
        message = args.message
        sha = args.sha
        target = 'D{}'.format(args['phabricator.revisionID'])

        try:
            identify_revision(repository, sha)
        except MissingRevision:
            return error("Unable to find commit %s in %s." %
                         (sha, repository.url),
                         problems=['sha', 'repository'])

        source_data = {
            'phabricator.buildTargetPHID': args['phabricator.buildTargetPHID'],
            'phabricator.diffID': args['phabricator.diffID'],
            'phabricator.revisionID': args['phabricator.revisionID'],
            'phabricator.revisionURL': args['phabricator.revisionURL'],
        }

        patch = Patch(
            repository=repository,
            parent_revision_sha=sha,
            diff=''.join(args.patch_file),
        )
        db.session.add(patch)

        source = Source(
            patch=patch,
            repository=repository,
            revision_sha=sha,
            data=source_data,
        )
        db.session.add(source)

        phabricatordiff = try_create(
            PhabricatorDiff, {
                'diff_id': args['phabricator.diffID'],
                'revision_id': args['phabricator.revisionID'],
                'url': args['phabricator.revisionURL'],
                'source': source,
            })
        if phabricatordiff is None:
            logging.error("Diff %s, Revision %s already exists",
                          args['phabricator.diffID'],
                          args['phabricator.revisionID'])
            return error("Diff already exists within Changes")

        project_options = ProjectOptionsHelper.get_options(
            projects, ['build.file-whitelist'])
        diff_parser = DiffParser(patch.diff)
        files_changed = diff_parser.get_changed_files()

        collection_id = uuid.uuid4()
        builds = []
        for project in projects:
            plan_list = get_build_plans(project)
            if not plan_list:
                logging.warning('No plans defined for project %s',
                                project.slug)
                continue

            if not in_project_files_whitelist(project_options[project.id],
                                              files_changed):
                logging.info(
                    'No changed files matched build.file-whitelist for project %s',
                    project.slug)
                continue

            builds.append(
                create_build(
                    project=project,
                    collection_id=collection_id,
                    sha=sha,
                    target=target,
                    label=label,
                    message=message,
                    author=author,
                    patch=patch,
                ))

        return self.respond(builds)
Exemple #55
0
def sync_job_step(step_id):
    """
    Polls a jenkins build for updates. May have sync_artifact children.
    """
    step = JobStep.query.get(step_id)
    if not step:
        return

    jobplan, implementation = JobPlan.get_build_step_for_job(
        job_id=step.job_id)

    # only synchronize if upstream hasn't suggested we're finished
    if step.status != Status.finished:
        implementation.update_step(step=step)

    db.session.flush()

    _sync_from_artifact_store(step)

    if step.status == Status.finished:
        _sync_artifacts_for_jobstep(step)

    is_finished = (
        step.status == Status.finished and
        # make sure all child tasks (like sync_artifact) have also finished
        sync_job_step.verify_all_children() == Status.finished)

    if not is_finished:
        default_timeout = current_app.config['DEFAULT_JOB_TIMEOUT_MIN']
        if has_timed_out(step, jobplan, default_timeout=default_timeout):
            old_status = step.status
            step.data['timed_out'] = True
            implementation.cancel_step(step=step)

            # Not all implementations can actually cancel, but it's dead to us as of now
            # so we mark it as finished.
            step.status = Status.finished
            step.date_finished = datetime.utcnow()

            # Implementations default to marking canceled steps as aborted,
            # but we're not canceling on good terms (it should be done by now)
            # so we consider it a failure here.
            #
            # We check whether the step was marked as in_progress to make a best
            # guess as to whether this is an infrastructure failure, or the
            # repository under test is just taking too long. This won't be 100%
            # reliable, but is probably good enough.
            if old_status == Status.in_progress:
                step.result = Result.failed
            else:
                step.result = Result.infra_failed
            db.session.add(step)

            job = step.job
            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': job.id,
                    'build_id': job.build_id,
                    'project_id': job.project_id,
                    'reason': 'timeout'
                })

            db.session.flush()
            statsreporter.stats().incr('job_step_timed_out')
            # If we timeout something that isn't in progress, that's our fault, and we should know.
            if old_status != Status.in_progress:
                current_app.logger.warning(
                    "Timed out jobstep that wasn't in progress: %s (was %s)",
                    step.id, old_status)

        raise sync_job_step.NotFinished

    # Ignore any 'failures' if the build did not finish properly.
    # NOTE(josiah): we might want to include "unknown" and "skipped" here as
    # well, or have some named condition like "not meaningful_result(step.result)".
    if step.result in (Result.aborted, Result.infra_failed):
        _report_jobstep_result(step)
        return

    # Check for FailureReason objects generated by child jobs
    failure_result = _result_from_failure_reasons(step)
    if failure_result and failure_result != step.result:
        step.result = failure_result
        db.session.add(step)
        db.session.commit()
        if failure_result == Result.infra_failed:
            _report_jobstep_result(step)
            return

    try:
        record_coverage_stats(step)
    except Exception:
        current_app.logger.exception(
            'Failing recording coverage stats for step %s', step.id)

    # We need the start time of this step's phase to determine if we're part of
    # the last phase. So, if date_started is empty, wait for sync_phase to catch
    # up and try again.
    if _expects_tests(jobplan) and not step.phase.date_started:
        current_app.logger.warning(
            "Phase[%s].date_started is missing. Retrying Step", step.phase.id)

        # Reset result to unknown to reduce window where test might be incorrectly green.
        # Set status to in_progress so that the next sync_job_step will fetch status from Jenkins again.
        step.result = Result.unknown
        step.status = Status.in_progress
        raise sync_job_step.NotFinished

    missing_tests = is_missing_tests(step, jobplan)

    try_create(ItemStat,
               where={
                   'item_id': step.id,
                   'name': 'tests_missing',
                   'value': int(missing_tests),
               })

    if missing_tests:
        if step.result != Result.failed:
            step.result = Result.failed
            db.session.add(step)

        try_create(
            FailureReason, {
                'step_id': step.id,
                'job_id': step.job_id,
                'build_id': step.job.build_id,
                'project_id': step.project_id,
                'reason': 'missing_tests'
            })
        db.session.commit()

    db.session.flush()

    if has_test_failures(step):
        if step.result != Result.failed:
            step.result = Result.failed
            db.session.add(step)

        try_create(
            FailureReason, {
                'step_id': step.id,
                'job_id': step.job_id,
                'build_id': step.job.build_id,
                'project_id': step.project_id,
                'reason': 'test_failures'
            })
        db.session.commit()
    _report_jobstep_result(step)
Exemple #56
0
def sync_job_step(step_id):
    step = JobStep.query.get(step_id)
    if not step:
        return

    jobplan, implementation = JobPlan.get_build_step_for_job(
        job_id=step.job_id)

    # only synchronize if upstream hasn't suggested we're finished
    if step.status != Status.finished:
        implementation.update_step(step=step)

    db.session.flush()

    if step.status != Status.finished:
        is_finished = False
    else:
        is_finished = sync_job_step.verify_all_children() == Status.finished

    if not is_finished:
        if has_timed_out(step, jobplan):
            implementation.cancel_step(step=step)

            step.result = Result.failed
            db.session.add(step)

            job = step.job
            try_create(
                FailureReason, {
                    'step_id': step.id,
                    'job_id': job.id,
                    'build_id': job.build_id,
                    'project_id': job.project_id,
                    'reason': 'timeout'
                })

            db.session.flush()
        if step.status != Status.in_progress:
            retry_after = QUEUED_RETRY_DELAY
        else:
            retry_after = None
        raise sync_job_step.NotFinished(retry_after=retry_after)

    # ignore any 'failures' if its aborted
    if step.result == Result.aborted:
        return

    try:
        record_coverage_stats(step)
    except Exception:
        current_app.logger.exception(
            'Failing recording coverage stats for step %s', step.id)

    missing_tests = is_missing_tests(step, jobplan)

    try_create(ItemStat,
               where={
                   'item_id': step.id,
                   'name': 'tests_missing',
               },
               defaults={'value': int(missing_tests)})

    if step.result == Result.passed and missing_tests:
        step.result = Result.failed
        db.session.add(step)

    if missing_tests:
        if step.result != Result.failed:
            step.result = Result.failed
            db.session.add(step)

        try_create(
            FailureReason, {
                'step_id': step.id,
                'job_id': step.job_id,
                'build_id': step.job.build_id,
                'project_id': step.project_id,
                'reason': 'missing_tests'
            })
        db.session.commit()

    db.session.flush()

    if has_test_failures(step):
        if step.result != Result.failed:
            step.result = Result.failed
            db.session.add(step)

        try_create(
            FailureReason, {
                'step_id': step.id,
                'job_id': step.job_id,
                'build_id': step.job.build_id,
                'project_id': step.project_id,
                'reason': 'test_failures'
            })
        db.session.commit()