Example #1
0
    def get(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        args = self.get_parser.parse_args()

        queryset = Plan.query.filter(
            Plan.project_id == project.id,
        )

        if args.query:
            queryset = queryset.filter(
                func.lower(Plan.label).contains(args.query.lower()),
            )

        if args.status:
            queryset = queryset.filter(
                Plan.status == PlanStatus[args.status],
            )

        if args.sort == 'name':
            queryset = queryset.order_by(Plan.label.asc())
        elif args.sort == 'date':
            queryset = queryset.order_by(Plan.date_created.asc())

        return self.paginate(queryset)
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        if args.build_id:
            build = Build.query.get(args.build_id)
            if not build:
                return error("Build not found", http_code=404)
        else:
            latest_build = Build.query.join(
                Source, Source.id == Build.source_id,
            ).filter(
                Source.patch_id == None,    # NOQA
                Build.project_id == project.id,
                Build.result == Result.passed,
                Build.status == Status.finished,
            ).order_by(
                Build.date_created.desc(),
            ).limit(1).first()

            if not latest_build:
                return self.respond({})
            build = latest_build

        return self.respond(_generate_testgroup_data(build, project.id, args.parent))
Example #3
0
    def __getitem__(self, sliced):

        project = Project.get(self.project_id)
        whitelist = ProjectOptionsHelper.get_whitelisted_paths(project)

        repo = Repository.query.get(self.repository_id)
        vcs = repo.get_vcs()

        log = vcs.log(offset=sliced.start, limit=sliced.stop - sliced.start, branch=self.branch, paths=whitelist)

        revs = [rev.id for rev in log]
        if revs == []:
            return []

        # restrict the join to the last N jobs otherwise this can get
        # significantly expensive as we have to seek quite a ways
        recent_runs = list(TestCase.query.options(
            joinedload('job.build'),
            joinedload('job.build.author'),
            joinedload('job.build.source'),
            joinedload('job.build.source.revision'),
        ).filter(
            # join filters
            TestCase.job_id == Job.id,
            Job.source_id == Source.id,
            # other filters
            Job.project_id == project.id,
            Source.patch_id == None,  # NOQA
            Source.revision_sha.in_(revs),
            TestCase.name_sha == self.test.name_sha,
        ))

        # Sort by date created; this ensures the runs that end up in
        # recent_runs_map are always the latest run for any given sha
        recent_runs.sort(key=lambda run: run.date_created)
        recent_runs_map = {
            recent_run.job.build.source.revision_sha: recent_run
            for recent_run in recent_runs
        }

        recent_runs = map(recent_runs_map.get, revs)

        jobs = set(r.job for r in recent_runs if r)
        builds = set(j.build for j in jobs)

        serialized_jobs = dict(zip(jobs, self.serialize(jobs)))
        serialized_builds = dict(zip(builds, self.serialize(builds)))

        results = []

        for recent_run in recent_runs:
            if recent_run is not None:
                s_recent_run = self.serialize(recent_run)
                s_recent_run['job'] = serialized_jobs[recent_run.job]
                s_recent_run['job']['build'] = serialized_builds[recent_run.job.build]
                results.append(s_recent_run)
            else:
                results.append(None)

        return results
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return error("Project not found", http_code=404)

        args = self.parser.parse_args()
        if args.date:
            try:
                query_date = datetime.strptime(args.date, '%Y-%m-%d').date()
            except:
                return error('Can\'t parse date "%s"' % (args.date))
        else:
            # This `7` is hard-coded to match the code in config.py which kicks
            # off the cron job 7 hours past midnight GMT (which corresponds to
            # midnight PST)
            delta = timedelta(days=2 if datetime.utcnow().hour < 7 else 1)
            query_date = datetime.utcnow().date() - delta

        data = {
            'date': str(query_date),
            'chartData': self.get_chart_data(project.id, query_date),
            'flakyTests': self.get_flaky_tests(project.id, query_date)
        }

        return self.respond(data)
Example #5
0
    def get(self, project_id, commit_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        repo = project.repository
        try:
            revision = Revision.get_by_sha_prefix_query(
                repo.id,
                commit_id,
            ).options(
                joinedload('author')
            ).scalar()
        except MultipleResultsFound:
            return '', 404
        else:
            if not revision:
                return '', 404

            context = self.serialize(revision)

            context.update({
                'repository': repo,
            })

            return self.respond(context)
Example #6
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return error("Project not found", http_code=404)

        args = self.parser.parse_args()
        if args.date:
            try:
                query_date = datetime.strptime(args.date, '%Y-%m-%d').date()
            except:
                return error('Can\'t parse date "%s"' % (args.date))
        else:
            # This `7` is hard-coded to match the code in config.py which kicks
            # off the cron job 7 hours past midnight GMT (which corresponds to
            # midnight PST)
            delta = timedelta(days=2 if datetime.utcnow().hour < 7 else 1)
            query_date = datetime.utcnow().date() - delta

        data = {
            'date': str(query_date),
            'chartData': self.get_chart_data(project.id, query_date),
            'flakyTests': self.get_flaky_tests(project.id, query_date)
        }

        return self.respond(data)
Example #7
0
    def get(self, project_id, commit_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        repo = project.repository
        revision = Revision.query.options(
            joinedload('author'),
        ).filter(
            Revision.repository_id == repo.id,
            Revision.sha == commit_id,
        ).first()
        if not revision:
            return '', 404

        build_query = Build.query.options(
            joinedload('author'),
            contains_eager('source').joinedload('revision'),
        ).join(
            Source, Build.source_id == Source.id,
        ).filter(
            Build.project_id == project.id,
            Source.revision_sha == revision.sha,
            Source.patch == None,  # NOQA
        ).order_by(Build.date_created.desc())

        return self.paginate(build_query)
    def get(self, project_id, commit_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        repo = project.repository
        revision = Revision.query.options(joinedload('author'), ).filter(
            Revision.repository_id == repo.id,
            Revision.sha == commit_id,
        ).first()
        if not revision:
            return '', 404

        build_query = Build.query.options(
            joinedload('author'),
            contains_eager('source').joinedload('revision'),
        ).join(
            Source,
            Build.source_id == Source.id,
        ).filter(
            Build.project_id == project.id,
            Source.revision_sha == revision.sha,
            Source.patch == None,  # NOQA
        ).order_by(Build.date_created.desc())

        return self.paginate(build_query)
Example #9
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return error('project not found', http_code=404)

        args = self.get_parser.parse_args()

        # we want to only return commits in the repo that are within the
        # project's whitelist
        paths = None
        if not args.every_commit:
            paths = ProjectOptionsHelper.get_whitelisted_paths(project)

        repo = project.repository
        offset = (args.page - 1) * args.per_page
        limit = args.per_page + 1  # +1 to tell if there are more revs to get

        vcs = repo.get_vcs()
        if vcs:
            try:
                commits = self.get_commits_from_vcs(
                    repo, vcs, offset, limit, paths, args.parent, args.branch)
            except ValueError as err:
                return error(err.message)
        else:
            if args.parent or args.branch:
                param = 'Branches' if args.branch else 'Parents'
                return error(
                    '{0} not supported for projects with no repository.'.format(param),
                    http_code=422)
            # TODO: right now this fallback returns every commit for projects
            # with whitelisted paths.  At the very least, we need to tell the
            # frontend about this (perhaps using a response header)
            commits = self.get_commits_from_db(repo, offset, limit)

        page_links = self.make_links(
            current_page=args.page,
            has_next_page=len(commits) > args.per_page,
        )
        # we fetched one extra commit so that we'd know whether to create a
        # next link. Delete it
        commits = commits[:args.per_page]

        builds_map = {}
        if commits:
            builds_map = self.get_builds_for_commits(
                commits, project, args.all_builds)

        results = []
        for result in commits:
            if args.all_builds:
                result['builds'] = builds_map.get(result['id'], [])
            else:
                result['build'] = builds_map.get(result['id'])
            results.append(result)

        return self.respond(results, serialize=False, links=page_links)
Example #10
0
    def post(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        args = self.post_parser.parse_args()

        plan = Plan(label=args.name, project_id=project.id,)
        db.session.add(plan)

        return self.respond(plan)
Example #11
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        latest_build = Build.query.join(
            Source, Source.id == Build.source_id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project_id == project.id,
            Build.result == Result.passed,
            Build.status == Status.finished,
        ).order_by(
            Build.date_created.desc(),
        ).limit(1).first()

        if not latest_build:
            return self.respond([])

        job_list = db.session.query(Job.id).filter(
            Job.build_id == latest_build.id,
        )

        if not job_list:
            return self.respond([])

        # use the most recent test
        test_list = TestCase.query.filter(
            TestCase.project_id == project.id,
            TestCase.job_id.in_(job_list),
        )

        if args.min_duration:
            test_list = test_list.filter(
                TestCase.duration >= args.min_duration,
            )

        if args.query:
            test_list = test_list.filter(
                TestCase.name.contains(args.query),
            )

        if args.sort == 'duration':
            sort_by = TestCase.duration.desc()
        elif args.sort == 'name':
            sort_by = TestCase.name.asc()

        test_list = test_list.order_by(sort_by)

        return self.paginate(test_list, serializers={
            TestCase: GeneralizedTestCase(),
        })
Example #12
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        queryset = Snapshot.query.options(
            joinedload('source').joinedload('revision'), ).filter(
                Snapshot.project_id == project.id, ).order_by(
                    Snapshot.date_created.desc(), )

        return self.paginate(queryset)
Example #13
0
    def get(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        last_build = Build.query.options(
            joinedload('author'),
            contains_eager('source')
        ).join(
            Source, Build.source_id == Source.id,
        ).filter(
            Build.project == project,
            Build.status == Status.finished,
            *build_type.get_any_commit_build_filters()
        ).order_by(
            Build.date_created.desc(),
        ).first()
        if not last_build or last_build.result == Result.passed:
            last_passing_build = last_build
        else:
            last_passing_build = Build.query.options(
                joinedload('author'),
                contains_eager('source')
            ).join(
                Source, Build.source_id == Source.id,
            ).filter(
                Build.project == project,
                Build.result == Result.passed,
                Build.status == Status.finished,
                *build_type.get_any_commit_build_filters()
            ).order_by(
                Build.date_created.desc(),
            ).first()

        options = dict(
            (o.name, o.value) for o in ProjectOption.query.filter(
                ProjectOption.project_id == project.id,
            )
        )
        for key, value in OPTION_DEFAULTS.iteritems():
            options.setdefault(key, value)

        data = self.serialize(project)
        data['lastBuild'] = last_build
        data['lastPassingBuild'] = last_passing_build
        data['repository'] = project.repository
        data['options'] = options
        data['stats'] = self._get_stats(project)
        data['containsActiveAutogeneratedPlan'] = project_lib.contains_active_autogenerated_plan(project)

        return self.respond(data)
    def post(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        args = self.post_parser.parse_args()

        plan = Plan(
            label=args.name,
            project_id=project.id,
        )
        db.session.add(plan)

        return self.respond(plan)
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        latest_build = Build.query.join(
            Source,
            Source.id == Build.source_id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project_id == project.id,
            Build.result == Result.passed,
            Build.status == Status.finished,
        ).order_by(Build.date_created.desc(), ).limit(1).first()

        if not latest_build:
            return self.respond([])

        job_list = db.session.query(Job.id).filter(
            Job.build_id == latest_build.id, )

        if not job_list:
            return self.respond([])

        # use the most recent test
        test_list = TestCase.query.filter(
            TestCase.project_id == project.id,
            TestCase.job_id.in_(job_list),
        )

        if args.min_duration:
            test_list = test_list.filter(
                TestCase.duration >= args.min_duration, )

        if args.query:
            test_list = test_list.filter(TestCase.name.contains(args.query), )

        if args.sort == 'duration':
            sort_by = TestCase.duration.desc()
        elif args.sort == 'name':
            sort_by = TestCase.name.asc()

        test_list = test_list.order_by(sort_by)

        return self.paginate(test_list,
                             serializers={
                                 TestCase: GeneralizedTestCase(),
                             })
Example #16
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        queryset = Snapshot.query.options(
            joinedload('source').joinedload('revision'),
        ).filter(
            Snapshot.project_id == project.id,
        ).order_by(
            Snapshot.date_created.desc(),
        )

        return self.paginate(queryset)
Example #17
0
def get_project_slug_from_project_id(*args, **kwargs):
    """
    Get the project slug from the project ID. This function assumes that
    the project ID is passed as the keyword argument `project_id`.

    Returns:
        basestring - project slug
    Raises:
        ResourceNotFound - if the project is not found
    """
    project_id = kwargs['project_id']
    # use our custom .get() function instead of .query.get()
    project = Project.get(project_id)
    if project is None:
        raise ResourceNotFound(
            'Project with ID {} not found.'.format(project_id))
    return project.slug
Example #18
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        latest_build = Build.query.join(
            Source, Source.id == Build.source_id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project_id == project.id,
            Build.result == Result.passed,
            Build.status == Status.finished,
        ).order_by(
            Build.date_created.desc(),
        ).limit(1).first()

        if not latest_build:
            return self.respond([])

        # use the most recent coverage
        cover_list = FileCoverage.query.filter(
            FileCoverage.job_id.in_(
                db.session.query(Job.id).filter(Job.build_id == latest_build.id)
            )
        )

        if args.query:
            cover_list = cover_list.filter(
                FileCoverage.filename.startswith(args.query),
            )

        if args.sort == 'lines_covered':
            sort_by = FileCoverage.lines_covered.desc()
        elif args.sort == 'lines_covered':
            sort_by = FileCoverage.lines_uncovered.desc()
        elif args.sort == 'name':
            sort_by = FileCoverage.filename.asc()

        cover_list = cover_list.order_by(sort_by)

        return self.paginate(cover_list, serializers={
            FileCoverage: GeneralizedFileCoverage(),
        })
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.get_parser.parse_args()

        filters = []

        if args.branch:
            filters.append(LatestGreenBuild.branch == args.branch)

        queryset = LatestGreenBuild.query.options(
            joinedload('build').joinedload('source').joinedload(
                'revision')).filter(LatestGreenBuild.project_id == project.id,
                                    *filters)

        return self.paginate(queryset)
Example #20
0
    def get(self, project_id, test_hash):
        project = Project.get(project_id)
        if not project:
            return error("Project not found", http_code=404)

        # use the most recent test run to find basic details
        test = TestCase.query.filter(
            TestCase.project_id == project.id,
            TestCase.name_sha == test_hash,
        ).order_by(TestCase.date_created.desc()).limit(1).first()
        if not test:
            return error("Test not found", http_code=404)

        context = self.serialize(test, {
            TestCase: GeneralizedTestCase(),
        })

        return self.respond(context)
    def get(self, project_id, source_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        repo = project.repository
        source = Source.query.filter(
            Source.id == source_id,
            Source.repository_id == repo.id,
        ).first()
        if source is None:
            return '', 404

        build_query = Build.query.options(joinedload('author'), ).filter(
            Build.project_id == project.id,
            Build.source_id == source.id,
        ).order_by(Build.date_created.desc())

        return self.paginate(build_query)
    def get(self, project_id, test_hash):
        project = Project.get(project_id)
        if not project:
            return '', 404

        # use the most recent test run to find basic details
        test = TestCase.query.filter(
            TestCase.project_id == project.id,
            TestCase.name_sha == test_hash,
        ).order_by(TestCase.date_created.desc()).limit(1).first()
        if not test:
            return '', 404

        args = self.get_parser.parse_args()

        return self.paginate(HistorySliceable(project_id, args.branch, test,
                                              project.repository_id,
                                              self.serialize),
                             serialize=False)
Example #23
0
    def get(self, project_id, test_hash):
        project = Project.get(project_id)
        if not project:
            return '', 404

        # use the most recent test run to find basic details
        test = TestCase.query.filter(
            TestCase.project_id == project.id,
            TestCase.name_sha == test_hash,
        ).order_by(TestCase.date_created.desc()).limit(1).first()
        if not test:
            return '', 404

        args = self.get_parser.parse_args()

        return self.paginate(
            HistorySliceable(project_id, args.branch, test, project.repository_id, self.serialize),
            serialize=False
        )
Example #24
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        latest_build = Build.query.join(
            Source,
            Source.id == Build.source_id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project_id == project.id,
            Build.result == Result.passed,
            Build.status == Status.finished,
        ).order_by(Build.date_created.desc(), ).limit(1).first()

        if not latest_build:
            return self.respond([])

        # use the most recent coverage
        cover_list = FileCoverage.query.filter(
            FileCoverage.job_id.in_(
                db.session.query(
                    Job.id).filter(Job.build_id == latest_build.id)))

        if args.query:
            cover_list = cover_list.filter(
                FileCoverage.filename.startswith(args.query), )

        if args.sort == 'lines_covered':
            sort_by = FileCoverage.lines_covered.desc()
        elif args.sort == 'lines_covered':
            sort_by = FileCoverage.lines_uncovered.desc()
        elif args.sort == 'name':
            sort_by = FileCoverage.filename.asc()

        cover_list = cover_list.order_by(sort_by)

        return self.paginate(cover_list,
                             serializers={
                                 FileCoverage: GeneralizedFileCoverage(),
                             })
    def get(self, project_id, source_id):
        project = Project.get(project_id)
        if not project:
            return error("Project not found", http_code=404)

        repo = project.repository
        source = Source.query.filter(
            Source.id == source_id,
            Source.repository_id == repo.id,
        ).first()
        if source is None:
            return error("Source not found", http_code=404)

        context = self.serialize(source)

        diff = source.generate_diff()

        args = self.get_parser.parse_args()

        if diff:
            files = self._get_files_from_raw_diff(diff)

            if args.coverage:
                coverage = merged_coverage_data(
                    c for c in get_coverage_by_source_id(source_id)
                    if c.filename in files)
                coverage_for_added_lines = self._filter_coverage_for_added_lines(
                    diff, coverage)

            tails_info = dict(source.data)
        else:
            coverage = None
            coverage_for_added_lines = None
            tails_info = None

        context['diff'] = diff
        if args.coverage:
            context['coverage'] = coverage
            context['coverageForAddedLines'] = coverage_for_added_lines
        context['tailsInfo'] = tails_info

        return self.respond(context)
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.get_parser.parse_args()

        filters = []

        if args.branch:
            filters.append(LatestGreenBuild.branch == args.branch)

        queryset = LatestGreenBuild.query.options(
            joinedload('build').joinedload('source').joinedload('revision')
        ).filter(
            LatestGreenBuild.project_id == project.id,
            *filters
        )

        return self.paginate(queryset)
    def get(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        last_build = Build.query.options(
            joinedload('author'), contains_eager('source')).join(
                Source,
                Build.source_id == Source.id,
            ).filter(Build.project == project, Build.status == Status.finished,
                     *build_type.get_any_commit_build_filters()).order_by(
                         Build.date_created.desc(), ).first()
        if not last_build or last_build.result == Result.passed:
            last_passing_build = last_build
        else:
            last_passing_build = Build.query.options(
                joinedload('author'), contains_eager('source')).join(
                    Source,
                    Build.source_id == Source.id,
                ).filter(Build.project == project,
                         Build.result == Result.passed,
                         Build.status == Status.finished,
                         *build_type.get_any_commit_build_filters()).order_by(
                             Build.date_created.desc(), ).first()

        options = dict((o.name, o.value) for o in ProjectOption.query.filter(
            ProjectOption.project_id == project.id, ))
        for key, value in OPTION_DEFAULTS.iteritems():
            options.setdefault(key, value)

        data = self.serialize(project)
        data['lastBuild'] = last_build
        data['lastPassingBuild'] = last_passing_build
        data['repository'] = project.repository
        data['options'] = options
        data['stats'] = self._get_stats(project)
        data[
            'containsActiveAutogeneratedPlan'] = project_lib.contains_active_autogenerated_plan(
                project)

        return self.respond(data)
    def get(self, project_id, source_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        repo = project.repository
        source = Source.query.filter(
            Source.id == source_id,
            Source.repository_id == repo.id,
        ).first()
        if source is None:
            return '', 404

        build_query = Build.query.options(
            joinedload('author'),
        ).filter(
            Build.project_id == project.id,
            Build.source_id == source.id,
        ).order_by(Build.date_created.desc())

        return self.paginate(build_query)
Example #29
0
    def get(self, project_id, source_id):
        project = Project.get(project_id)
        if not project:
            return error("Project not found", http_code=404)

        repo = project.repository
        source = Source.query.filter(
            Source.id == source_id,
            Source.repository_id == repo.id,
        ).first()
        if source is None:
            return error("Source not found", http_code=404)

        context = self.serialize(source)

        diff = source.generate_diff()

        args = self.get_parser.parse_args()

        if diff:
            files = self._get_files_from_raw_diff(diff)

            if args.coverage:
                coverage = merged_coverage_data(c for c in get_coverage_by_source_id(source_id)
                                                if c.filename in files)
                coverage_for_added_lines = self._filter_coverage_for_added_lines(diff, coverage)

            tails_info = dict(source.data)
        else:
            coverage = None
            coverage_for_added_lines = None
            tails_info = None

        context['diff'] = diff
        if args.coverage:
            context['coverage'] = coverage
            context['coverageForAddedLines'] = coverage_for_added_lines
        context['tailsInfo'] = tails_info

        return self.respond(context)
    def post(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        args = self.post_parser.parse_args()

        if args.name:
            project.name = args.name

        if args.slug:
            match = Project.query.filter(
                Project.slug == args.slug,
                Project.id != project.id,
            ).first()
            if match:
                return '{"error": "Project with slug %r already exists"}' % (
                    args.slug, ), 400

            project.slug = args.slug

        if args.repository:
            repository = Repository.get(args.repository)
            if repository is None:
                return '{"error": "Repository with url %r does not exist"}' % (
                    args.repository, ), 400
            project.repository = repository

        if args.status == 'inactive':
            project.status = ProjectStatus.inactive
        elif args.status == 'active':
            project.status = ProjectStatus.active

        db.session.add(project)

        data = self.serialize(project)
        data['repository'] = self.serialize(project.repository)

        return self.respond(data, serialize=False)
    def get(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        args = self.get_parser.parse_args()

        queryset = Plan.query.filter(Plan.project_id == project.id, )

        if args.query:
            queryset = queryset.filter(
                func.lower(Plan.label).contains(args.query.lower()), )

        if args.status:
            queryset = queryset.filter(
                Plan.status == PlanStatus[args.status], )

        if args.sort == 'name':
            queryset = queryset.order_by(Plan.label.asc())
        elif args.sort == 'date':
            queryset = queryset.order_by(Plan.date_created.asc())

        return self.paginate(queryset)
Example #32
0
    def post(self, project_id):
        project = Project.get(project_id)
        if project is None:
            return '', 404

        args = self.post_parser.parse_args()

        if args.name:
            project.name = args.name

        if args.slug:
            match = Project.query.filter(
                Project.slug == args.slug,
                Project.id != project.id,
            ).first()
            if match:
                return '{"error": "Project with slug %r already exists"}' % (args.slug,), 400

            project.slug = args.slug

        if args.repository:
            repository = Repository.get(args.repository)
            if repository is None:
                return '{"error": "Repository with url %r does not exist"}' % (args.repository,), 400
            project.repository = repository

        if args.status == 'inactive':
            project.status = ProjectStatus.inactive
        elif args.status == 'active':
            project.status = ProjectStatus.active

        db.session.add(project)

        data = self.serialize(project)
        data['repository'] = self.serialize(project.repository)

        return self.respond(data, serialize=False)
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        latest_build = Build.query.join(
            Source,
            Source.id == Build.source_id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project_id == project.id,
            Build.result == Result.passed,
            Build.status == Status.finished,
        ).order_by(Build.date_created.desc(), ).limit(1).first()

        if not latest_build:
            return '{}'

        # use the most recent coverage
        cover_list = FileCoverage.query.filter(
            FileCoverage.job_id.in_(
                db.session.query(Job.id).filter(
                    Job.build_id == latest_build.id, )))

        if args.parent:
            cover_list = cover_list.filter(
                FileCoverage.filename.startswith(args.parent), )

        cover_list = list(cover_list)

        groups = build_tree(
            [c.filename for c in cover_list],
            sep='/',
            min_children=2,
            parent=args.parent,
        )

        results = []
        for group in groups:
            num_files = 0
            total_lines_covered = 0
            total_lines_uncovered = 0
            for file_coverage in cover_list:
                filename = file_coverage.filename
                if filename == group or filename.startswith(group + '/'):
                    num_files += 1
                    total_lines_covered += file_coverage.lines_covered
                    total_lines_uncovered += file_coverage.lines_uncovered

            if args.parent:
                filename = group[len(args.parent) + len('/'):]
            else:
                filename = group

            data = {
                'filename': filename,
                'path': group,
                'totalLinesCovered': total_lines_covered,
                'totalLinesUncovered': total_lines_uncovered,
                'numFiles': num_files,
            }
            results.append(data)
        results.sort(key=lambda x: x['totalLinesUncovered'], reverse=True)

        trail = []
        context = []
        if args.parent:
            for chunk in args.parent.split('/'):
                context.append(chunk)
                trail.append({
                    'path': '/'.join(context),
                    'name': chunk,
                })

        data = {
            'groups': results,
            'trail': trail,
        }

        return self.respond(data, serialize=False)
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        points = args.points or POINTS_DEFAULT[args.resolution]

        if args.from_date:
            date_end = datetime.fromtimestamp(args.from_date)
        else:
            date_end = datetime.now()

        date_end = date_end.replace(
            minute=0, second=0, microsecond=0)

        if args.resolution == '1h':
            grouper = func.date_trunc('hour', Build.date_created)
            decr_res = decr_hour
        elif args.resolution == '1d':
            grouper = func.date_trunc('day', Build.date_created)
            date_end = date_end.replace(hour=0)
            decr_res = decr_day
        elif args.resolution == '1w':
            grouper = func.date_trunc('week', Build.date_created)
            date_end = date_end.replace(hour=0)
            date_end -= timedelta(days=date_end.weekday())
            decr_res = decr_week
        elif args.resolution == '1m':
            grouper = func.date_trunc('month', Build.date_created)
            date_end = date_end.replace(hour=0, day=1)
            decr_res = decr_month

        if args.agg:
            value = getattr(func, args.agg)(ItemStat.value)
        else:
            value = func.avg(ItemStat.value)

        date_begin = date_end.replace()
        for _ in xrange(points):
            date_begin = decr_res(date_begin)

        # TODO(dcramer): put minimum date bounds
        results = dict(db.session.query(
            grouper.label('grouper'),
            value.label('value'),
        ).filter(
            ItemStat.item_id == Build.id,
            ItemStat.name == args.stat,
            Build.project_id == project.id,
            Build.date_created >= date_begin,
            Build.date_created < date_end,
        ).group_by('grouper'))

        data = []
        cur_date = date_end.replace()
        for _ in xrange(points):
            cur_date = decr_res(cur_date)
            data.append({
                'time': int(float(cur_date.strftime('%s.%f')) * 1000),
                'value': int(float(results.get(cur_date, 0))),
            })
        data.reverse()

        return self.respond(data, serialize=False)
Example #35
0
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return error('project not found', http_code=404)

        args = self.get_parser.parse_args()

        # we want to only return commits in the repo that are within the
        # project's whitelist
        paths = None
        if not args.every_commit:
            paths = ProjectOptionsHelper.get_whitelisted_paths(project)

        repo = project.repository
        offset = (args.page - 1) * args.per_page
        limit = args.per_page + 1  # +1 to tell if there are more revs to get

        vcs = repo.get_vcs()
        if vcs:
            try:
                commits = self.get_commits_from_vcs(repo, vcs, offset, limit,
                                                    paths, args.parent,
                                                    args.branch)
            except ValueError as err:
                return error(err.message)
        else:
            if args.parent or args.branch:
                param = 'Branches' if args.branch else 'Parents'
                return error(
                    '{0} not supported for projects with no repository.'.
                    format(param),
                    http_code=422)
            # TODO: right now this fallback returns every commit for projects
            # with whitelisted paths.  At the very least, we need to tell the
            # frontend about this (perhaps using a response header)
            commits = self.get_commits_from_db(repo, offset, limit)

        page_links = self.make_links(
            current_page=args.page,
            has_next_page=len(commits) > args.per_page,
        )
        # we fetched one extra commit so that we'd know whether to create a
        # next link. Delete it
        commits = commits[:args.per_page]

        builds_map = {}
        if commits:
            builds_map = self.get_builds_for_commits(commits, project,
                                                     args.all_builds)

        revision_result_map = self.get_revision_result_map(
            project, [c['id'] for c in commits])

        results = []
        for result in commits:
            if args.all_builds:
                result['builds'] = builds_map.get(result['id'], [])
            else:
                result['build'] = builds_map.get(result['id'])
            if result['id'] in revision_result_map:
                result['revisionResult'] = self.serialize(
                    revision_result_map[result['id']])
            results.append(result)

        return self.respond(results, serialize=False, links=page_links)
Example #36
0
    def post(self, project_id):
        """Initiates a new snapshot for this project."""
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.post_parser.parse_args()

        repository = project.repository

        try:
            revision = identify_revision(repository, args.sha)
        except MissingRevision:
            # if the default fails, we absolutely can't continue and the
            # client should send a valid revision
            return error("Unable to find a matching revision.")

        if revision:
            sha = revision.sha
        else:
            sha = args.sha

        plan_list = get_snapshottable_plans(project)

        if not plan_list:
            return error("No snapshottable plans associated with project.")

        source, _ = get_or_create(Source,
                                  where={
                                      'repository': repository,
                                      'revision_sha': sha,
                                      'patch_id': None,
                                  })

        build = Build(
            source_id=source.id,
            source=source,
            project_id=project.id,
            project=project,
            label='Create Snapshot',
            status=Status.queued,
            cause=Cause.snapshot,
            target=sha[:12],
            tags=['snapshot'],
            # Snapshot builds are often part of the solution to queueing, so we make them
            # high priority to schedule them sooner.
            priority=BuildPriority.high,
        )
        db.session.add(build)

        # TODO(dcramer): this needs to update with the build result
        snapshot = Snapshot(
            project_id=project.id,
            source_id=source.id,
            build_id=build.id,
            status=SnapshotStatus.pending,
        )
        db.session.add(snapshot)

        jobs = []
        for plan in plan_list:
            job = Job(
                build=build,
                build_id=build.id,
                project=project,
                project_id=project.id,
                source=build.source,
                source_id=build.source_id,
                status=build.status,
                label='Create Snapshot: %s' % (plan.label, ),
            )
            db.session.add(job)

            jobplan = JobPlan.build_jobplan(plan, job)
            db.session.add(jobplan)

            image = SnapshotImage(
                job=job,
                snapshot=snapshot,
                plan=plan,
            )
            db.session.add(image)

            jobs.append(job)

        db.session.commit()

        for job in jobs:
            create_job.delay(
                job_id=job.id.hex,
                task_id=job.id.hex,
                parent_task_id=job.build_id.hex,
            )

        db.session.commit()

        sync_build.delay(
            build_id=build.id.hex,
            task_id=build.id.hex,
        )

        return self.respond(snapshot)
Example #37
0
    def post(self, project_id):
        """Initiates a new snapshot for this project."""
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.post_parser.parse_args()

        repository = project.repository

        try:
            revision = identify_revision(repository, args.sha)
        except MissingRevision:
            # if the default fails, we absolutely can't continue and the
            # client should send a valid revision
            return error("Unable to find a matching revision.")

        if revision:
            sha = revision.sha
        else:
            sha = args.sha

        plan_list = get_snapshottable_plans(project)

        if not plan_list:
            return error("No snapshottable plans associated with project.")

        source, _ = get_or_create(Source, where={
            'repository': repository,
            'revision_sha': sha,
            'patch_id': None,
        })

        build = Build(
            source_id=source.id,
            source=source,
            project_id=project.id,
            project=project,
            label='Create Snapshot',
            status=Status.queued,
            cause=Cause.snapshot,
            target=sha[:12],
            tags=['snapshot'],
            # Snapshot builds are often part of the solution to queueing, so we make them
            # high priority to schedule them sooner.
            priority=BuildPriority.high,
        )
        db.session.add(build)

        # TODO(dcramer): this needs to update with the build result
        snapshot = Snapshot(
            project_id=project.id,
            source_id=source.id,
            build_id=build.id,
            status=SnapshotStatus.pending,
        )
        db.session.add(snapshot)

        jobs = []
        for plan in plan_list:
            job = Job(
                build=build,
                build_id=build.id,
                project=project,
                project_id=project.id,
                source=build.source,
                source_id=build.source_id,
                status=build.status,
                label='Create Snapshot: %s' % (plan.label,),
            )
            db.session.add(job)

            jobplan = JobPlan.build_jobplan(plan, job)
            db.session.add(jobplan)

            image = SnapshotImage(
                job=job,
                snapshot=snapshot,
                plan=plan,
            )
            db.session.add(image)

            jobs.append(job)

        db.session.commit()

        for job in jobs:
            create_job.delay(
                job_id=job.id.hex,
                task_id=job.id.hex,
                parent_task_id=job.build_id.hex,
            )

        db.session.commit()

        sync_build.delay(
            build_id=build.id.hex,
            task_id=build.id.hex,
        )

        return self.respond(snapshot)
    def __getitem__(self, sliced):

        project = Project.get(self.project_id)
        whitelist = ProjectOptionsHelper.get_whitelisted_paths(project)

        repo = Repository.query.get(self.repository_id)
        vcs = repo.get_vcs()

        log = vcs.log(offset=sliced.start,
                      limit=sliced.stop - sliced.start,
                      branch=self.branch,
                      paths=whitelist)

        revs = [rev.id for rev in log]
        if revs == []:
            return []

        # restrict the join to the last N jobs otherwise this can get
        # significantly expensive as we have to seek quite a ways
        recent_runs = list(
            TestCase.query.options(
                joinedload('job.build'),
                joinedload('job.build.author'),
                joinedload('job.build.source'),
                joinedload('job.build.source.revision'),
            ).filter(
                # join filters
                TestCase.job_id == Job.id,
                Job.source_id == Source.id,
                # other filters
                Job.project_id == project.id,
                Source.patch_id == None,  # NOQA
                Source.revision_sha.in_(revs),
                TestCase.name_sha == self.test.name_sha,
            ))

        # Sort by date created; this ensures the runs that end up in
        # recent_runs_map are always the latest run for any given sha
        recent_runs.sort(key=lambda run: run.date_created)
        recent_runs_map = {
            recent_run.job.build.source.revision_sha: recent_run
            for recent_run in recent_runs
        }

        recent_runs = map(recent_runs_map.get, revs)

        jobs = set(r.job for r in recent_runs if r)
        builds = set(j.build for j in jobs)

        serialized_jobs = dict(zip(jobs, self.serialize(jobs)))
        serialized_builds = dict(zip(builds, self.serialize(builds)))

        results = []

        for recent_run in recent_runs:
            if recent_run is not None:
                s_recent_run = self.serialize(recent_run)
                s_recent_run['job'] = serialized_jobs[recent_run.job]
                s_recent_run['job']['build'] = serialized_builds[
                    recent_run.job.build]
                results.append(s_recent_run)
            else:
                results.append(None)

        return results
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.get_parser.parse_args()

        filters = []

        if args.authors:
            filters.append(Build.author_id.in_([a.id for a in args.authors]))
        elif args.authors is not None:
            return []

        if args.source:
            filters.append(Build.target.startswith(args.source))

        # is this from the search bar
        if args.query:
            clauses = []
            # search by revision title
            clauses.append(Build.label.contains(args.query))
            # search by prefix
            clauses.append(Build.target.startswith(args.query))
            # allows users to paste a full commit hash and still
            # find the relevant build(s). Should be fine for mercurial/git,
            # and svn will never have long enough strings
            if len(args.query) > 12:
                clauses.append(Build.target.startswith(args.query[0:12]))
            # if they searched for something that looks like a phabricator
            # identifier, try to find it
            if might_be_diffusion_iden(args.query):
                possible_hash = get_hash_from_diffusion_iden(args.query)
                if possible_hash:
                    # the query should always be at least as long or longer than
                    # our commit identifiers
                    clauses.append(
                        Build.target.startswith(possible_hash[0:12]))
            filters.append(or_(*clauses))

        if args.result:
            filters.append(Build.result == Result[args.result])

        if args.cause:
            filters.append(Build.cause == Cause[args.cause])

        if args.tag:
            tags = filter(bool, args.tag)
            # Avoid empty tags, which historically are meant to mean "no tag" restriction.
            if tags:
                filters.append(or_(*[Build.tags.any(t) for t in tags]))

        if args.patches_only:
            filters.append(Source.patch_id != None)  # NOQA
        elif not args.include_patches:
            filters.append(Source.patch_id == None)  # NOQA

        queryset = Build.query.options(
            joinedload('project', innerjoin=True),
            joinedload('author'),
            contains_eager('source').joinedload('revision'),
        ).join(
            Source, Source.id == Build.source_id,
        ).filter(
            Build.project_id == project.id,
            Source.repository_id == project.repository_id,
            *filters
        ).order_by(Build.date_created.desc())

        return self.paginate(queryset)
    def get(self, project_id):
        project = Project.get(project_id)
        if not project:
            return '', 404

        args = self.parser.parse_args()

        latest_build = Build.query.join(
            Source, Source.id == Build.source_id,
        ).filter(
            Source.patch_id == None,  # NOQA
            Build.project_id == project.id,
            Build.result == Result.passed,
            Build.status == Status.finished,
        ).order_by(
            Build.date_created.desc(),
        ).limit(1).first()

        if not latest_build:
            return '{}'

        # use the most recent coverage
        cover_list = FileCoverage.query.filter(
            FileCoverage.job_id.in_(
                db.session.query(Job.id).filter(
                    Job.build_id == latest_build.id,
                )
            )
        )

        if args.parent:
            cover_list = cover_list.filter(
                FileCoverage.filename.startswith(args.parent),
            )

        cover_list = list(cover_list)

        groups = build_tree(
            [c.filename for c in cover_list],
            sep='/',
            min_children=2,
            parent=args.parent,
        )

        results = []
        for group in groups:
            num_files = 0
            total_lines_covered = 0
            total_lines_uncovered = 0
            for file_coverage in cover_list:
                filename = file_coverage.filename
                if filename == group or filename.startswith(group + '/'):
                    num_files += 1
                    total_lines_covered += file_coverage.lines_covered
                    total_lines_uncovered += file_coverage.lines_uncovered

            if args.parent:
                filename = group[len(args.parent) + len('/'):]
            else:
                filename = group

            data = {
                'filename': filename,
                'path': group,
                'totalLinesCovered': total_lines_covered,
                'totalLinesUncovered': total_lines_uncovered,
                'numFiles': num_files,
            }
            results.append(data)
        results.sort(key=lambda x: x['totalLinesUncovered'], reverse=True)

        trail = []
        context = []
        if args.parent:
            for chunk in args.parent.split('/'):
                context.append(chunk)
                trail.append({
                    'path': '/'.join(context),
                    'name': chunk,
                })

        data = {
            'groups': results,
            'trail': trail,
        }

        return self.respond(data, serialize=False)