示例#1
0
    def retrieve(self, request, project, jm, pk=None):
        """
        GET method implementation for detail view

        Return a single job with log_references and
        artifact names and links to the artifact blobs.
        """
        obj = jm.get_job(pk)
        if obj:
            job = obj[0]
            job["resource_uri"] = reverse("jobs-detail",
                                          kwargs={"project": jm.project, "pk": job["id"]})
            job["logs"] = jm.get_log_references(pk)

            # make artifact ids into uris

            with ArtifactsModel(project) as artifacts_model:
                artifact_refs = artifacts_model.get_job_artifact_references(pk)
            job["artifacts"] = []
            for art in artifact_refs:
                ref = reverse("artifact-detail",
                              kwargs={"project": jm.project, "pk": art["id"]})
                art["resource_uri"] = ref
                job["artifacts"].append(art)

            option_collections = jm.refdata_model.get_all_option_collections()
            job["platform_option"] = get_option(job, option_collections)

            return Response(job)
        else:
            return Response("No job with id: {0}".format(pk), 404)
示例#2
0
    def list(self, request, project, jm):
        """
        GET method implementation for list view
        Optional paramters (default):
        - offset (0)
        - count (10)
        - return_type (dict)
        """
        filter = UrlQueryFilter(request.query_params)

        offset = int(filter.pop("offset", 0))
        count = min(int(filter.pop("count", 10)), 2000)
        return_type = filter.pop("return_type", "dict").lower()
        exclusion_profile = filter.pop("exclusion_profile", "default")
        visibility = filter.pop("visibility", "included")
        if exclusion_profile in ('false', 'null'):
            exclusion_profile = None
        results = jm.get_job_list(offset, count, conditions=filter.conditions,
                                  exclusion_profile=exclusion_profile,
                                  visibility=visibility)

        if results:
            option_collections = jm.refdata_model.get_all_option_collections()
            for job in results:
                job["platform_option"] = get_option(job, option_collections)

        response_body = dict(meta={"repository": project}, results=[])

        if results and return_type == "list":
            response_body["job_property_names"] = results[0].keys()
            results = [job.values() for job in results]
        response_body["results"] = results
        response_body["meta"].update(offset=offset, count=count)

        return Response(response_body)
示例#3
0
    def retrieve(self, request, project, jm, pk=None):
        """
        GET method implementation for detail view

        Return a single job with log_references and
        artifact names and links to the artifact blobs.
        """
        obj = jm.get_job(pk)
        if obj:
            job = obj[0]
            job["resource_uri"] = reverse("jobs-detail",
                                          kwargs={"project": jm.project, "pk": job["id"]})
            job["logs"] = jm.get_log_references(pk)

            # make artifact ids into uris

            with ArtifactsModel(project) as artifacts_model:
                artifact_refs = artifacts_model.get_job_artifact_references(pk)
            job["artifacts"] = []
            for art in artifact_refs:
                ref = reverse("artifact-detail",
                              kwargs={"project": jm.project, "pk": art["id"]})
                art["resource_uri"] = ref
                job["artifacts"].append(art)

            option_collections = jm.refdata_model.get_all_option_collections()
            job["platform_option"] = get_option(job, option_collections)

            return Response(job)
        else:
            return Response("No job with id: {0}".format(pk), 404)
示例#4
0
    def list(self, request, project, jm):
        """
        GET method implementation for list view
        Optional paramters (default):
        - offset (0)
        - count (10)
        - return_type (dict)
        """
        filter = UrlQueryFilter(request.query_params)

        offset = int(filter.pop("offset", 0))
        count = min(int(filter.pop("count", 10)), 2000)
        return_type = filter.pop("return_type", "dict").lower()
        exclusion_profile = filter.pop("exclusion_profile", "default")
        visibility = filter.pop("visibility", "included")
        if exclusion_profile in ('false', 'null'):
            exclusion_profile = None
        results = jm.get_job_list(offset, count, conditions=filter.conditions,
                                  exclusion_profile=exclusion_profile,
                                  visibility=visibility)

        if results:
            option_collections = jm.refdata_model.get_all_option_collections()
            for job in results:
                job["platform_option"] = get_option(job, option_collections)

        response_body = dict(meta={"repository": project}, results=[])

        if results and return_type == "list":
            response_body["job_property_names"] = results[0].keys()
            results = [job.values() for job in results]
        response_body["results"] = results
        response_body["meta"].update(offset=offset, count=count)

        return Response(response_body)
示例#5
0
    def list(self, request, project, jm):
        """
        GET method implementation for list view

        """
        filter = UrlQueryFilter(request.QUERY_PARAMS)

        offset = filter.pop("offset", 0)
        count = min(int(filter.pop("count", 10)), 1000)

        full = filter.pop('full', 'true').lower() == 'true'
        objs = jm.get_job_list(offset, count, full, filter.conditions)

        if objs:
            option_collections = jm.refdata_model.get_all_option_collections()
            for job in objs:
                job["platform_option"] = get_option(job, option_collections)

        return Response(objs)
示例#6
0
    def list(self, request, project, jm):
        """
        GET method implementation for list view

        """
        filter = UrlQueryFilter(request.QUERY_PARAMS)

        offset = filter.pop("offset", 0)
        count = min(int(filter.pop("count", 10)), 1000)

        full = filter.pop('full', 'true').lower() == 'true'
        objs = jm.get_job_list(offset, count, full, filter.conditions)

        if objs:
            option_collections = jm.refdata_model.get_all_option_collections()
            for job in objs:
                job["platform_option"] = get_option(job, option_collections)

        return Response(objs)
示例#7
0
    def get_resultsets_with_jobs(jm,
                                 rs_list,
                                 full,
                                 filter_kwargs,
                                 debug,
                                 exclusion_state='included',
                                 sort_key='push_timestamp'):
        """Convert db result of resultsets in a list to JSON"""

        rs_map = {}
        for rs in rs_list:
            rs_map[rs["id"]] = rs
            # all rs should have the revisions_uri, so add it here
            rs["revisions_uri"] = reverse("resultset-revisions",
                                          kwargs={
                                              "project": jm.project,
                                              "pk": rs["id"]
                                          })

        job_list = jm.get_result_set_job_list(rs_map.keys(), full,
                                              exclusion_state, **filter_kwargs)

        jobs_ungrouped = job_list.get('job_list', [])
        reference_signature_names = job_list.get('reference_signature_names',
                                                 {})

        option_collections = jm.refdata_model.get_all_option_collections()

        rs_grouper = lambda rsg: rsg["result_set_id"]

        # the main grouper for a result set is the combination of
        # platform and options
        platform_grouper = lambda pg: (
            PLATFORM_ORDER.get(pg["platform"], 100),
            OPT_ORDER.get(get_option(pg, option_collections), 100))

        def get_sortable_job_symbol(symbol):
            """
            Sort jobs by symbol.

            Symbol could be something like 1, 2 or 3.  Or A, B, C or R1, R2, R10.
            So this will pad the numeric portion with 0s like R001, R010, etc.
            """
            newsymbol = symbol
            if symbol.isdigit():
                newsymbol = symbol.zfill(3)
            else:
                x = re.split(r'(\d+)', symbol)
                newsymbol = x[0]
                if len(x) > 1:
                    newsymbol += x[1].zfill(3)

            return newsymbol

        job_group_grouper = lambda jgg: jgg["job_group_symbol"]
        job_type_grouper = lambda jtg: (get_sortable_job_symbol(jtg[
            'job_type_symbol']))

        rs_sorted = sorted(jobs_ungrouped, key=rs_grouper)
        resultsets = []
        for rs_id, resultset_group in itertools.groupby(rs_sorted,
                                                        key=rs_grouper):

            resultset = rs_map[rs_id]
            resultsets.append(resultset)

            # we found jobs for this resultset, so remove it from the map
            # now that it's in the ``resultsets`` list.
            # after we are done with all these jobs, whatever is in the map are
            # resultsets with no jobs yet, which we add back in to the list
            # of resultsets to be returned.
            del (rs_map[rs_id])

            job_counts = dict.fromkeys(
                jm.RESULTS + jm.INCOMPLETE_STATES + ["total"], 0)

            #itertools needs the elements to be sorted by the grouper
            by_platform = sorted(list(resultset_group), key=platform_grouper)
            platforms = []
            for platform_group_name, platform_group in itertools.groupby(
                    by_platform, key=platform_grouper):

                by_job_group = sorted(list(platform_group),
                                      key=job_group_grouper)

                platform_name = by_job_group[0]["platform"]
                platform_option = option_collections[
                    by_job_group[0]["option_collection_hash"]]['opt']

                groups = []
                for jg_symbol, jg_group in itertools.groupby(
                        by_job_group, job_group_grouper):

                    by_job_type = sorted(list(jg_group), key=job_type_grouper)

                    job_list = []
                    groups.append({
                        "symbol": jg_symbol,
                        "name": by_job_type[0]["job_group_name"],
                        "jobs": job_list
                    })

                    # build the uri ref for each job
                    for job in by_job_type:

                        job_list.append(
                            get_job_value_list(job, reference_signature_names,
                                               platform_option, jm.project,
                                               debug))

                        if job["state"] == "completed":
                            job_counts[job["result"]] += 1
                        else:
                            job_counts[job["state"]] += 1
                        job_counts["total"] += 1

                platforms.append({
                    "name": platform_name,
                    "option": platform_option,
                    "groups": groups,
                })

            #the unique set of results that are contained in this resultset
            #can be used to determine the resultset's severity
            resultset.update({
                "platforms": platforms,
                "job_counts": job_counts,
            })

        # the resultsets left in the map have no jobs, so fill in the fields
        # with blanks that WOULD otherwise have been filled.
        for rs in rs_map.values():
            rs.update({
                "platforms": [],
                "job_counts":
                dict.fromkeys(jm.RESULTS + jm.INCOMPLETE_STATES + ["total"],
                              0),
            })
            resultsets.append(rs)

        return sorted(resultsets, key=lambda x: x[sort_key], reverse=True)
示例#8
0
    def get_resultsets_with_jobs(jm, rs_list, full, filter_kwargs):
        """Convert db result of resultsets in a list to JSON"""

        # Fetch the job results all at once, then parse them out in memory.
        # organize the resultsets into an obj by key for lookups
        rs_map = {}
        for rs in rs_list:
            rs_map[rs["id"]] = rs
            # all rs should have the revisions_uri, so add it here
            rs["revisions_uri"] = reverse("resultset-revisions",
                kwargs={"project": jm.project, "pk": rs["id"]})

        jobs_ungrouped = jm.get_result_set_job_list(
            rs_map.keys(),
            full,
            **filter_kwargs
        )

        option_collections = jm.refdata_model.get_all_option_collections()

        rs_grouper = lambda rsg: rsg["result_set_id"]
        # the main grouper for a result set is the combination of
        # platform and options
        platform_grouper = lambda pg: "{0} {1}".format(
            pg["platform"],
            get_option(pg, option_collections)

        )
        job_group_grouper = lambda jgg: jgg["job_group_symbol"]
        job_type_grouper = lambda jtg: jtg['job_type_symbol']

        rs_sorted = sorted(jobs_ungrouped, key=rs_grouper)
        resultsets = []
        for rs_id, resultset_group in itertools.groupby(rs_sorted, key=rs_grouper):

            resultset = rs_map[rs_id]
            resultsets.append(resultset)

            # we found jobs for this resultset, so remove it from the map
            # now that it's in the ``resultsets`` list.
            # after we are done with all these jobs, whatever is in the map are
            # resultsets with no jobs yet, which we add back in to the list
            # of resultsets to be returned.
            del(rs_map[rs_id])

            job_counts = dict.fromkeys(
                jm.RESULTS + jm.INCOMPLETE_STATES + ["total"], 0)

            #itertools needs the elements to be sorted by the grouper
            by_platform = sorted(list(resultset_group), key=platform_grouper)
            platforms = []
            for platform_group_name, platform_group in itertools.groupby(
                    by_platform,
                    key=platform_grouper):

                by_job_group = sorted(list(platform_group), key=job_group_grouper)

                platform_name = by_job_group[0]["platform"]
                platform_option = option_collections[
                    by_job_group[0]["option_collection_hash"]]['opt']

                groups = []
                for jg_symbol, jg_group in itertools.groupby(
                        by_job_group,
                        job_group_grouper):

                    by_job_type = sorted(list(jg_group), key=job_type_grouper)

                    groups.append({
                        "symbol": jg_symbol,
                        "name": by_job_type[0]["job_group_name"],
                        "jobs": by_job_type
                    })

                    # build the uri ref for each job
                    for job in by_job_type:
                        job["id"] = job["job_id"]
                        del(job["job_id"])
                        del(job["option_collection_hash"])

                        job["platform_option"] = platform_option
                        job["resource_uri"] = reverse("jobs-detail",
                            kwargs={"project": jm.project, "pk": job["id"]})

                        if job["state"] == "completed":
                            job_counts[job["result"]] += 1
                        else:
                            job_counts[job["state"]] += 1
                        job_counts["total"] += 1

                platforms.append({
                    "name": platform_name,
                    "option": platform_option,
                    "groups": groups,
                })

            #the unique set of results that are contained in this resultset
            #can be used to determine the resultset's severity
            resultset.update({
                "platforms": platforms,
                "job_counts": job_counts,
            })

        # the resultsets left in the map have no jobs, so fill in the fields
        # with blanks that WOULD otherwise have been filled.
        for rs in rs_map.values():
            rs.update({
                "platforms": [],
                "job_counts": dict.fromkeys(
                    jm.RESULTS + jm.INCOMPLETE_STATES + ["total"], 0),
            })
            resultsets.append(rs)
        return sorted(
            resultsets,
            key=lambda x: x["push_timestamp"],
            reverse=True)
    def get_resultsets_with_jobs(
        jm, rs_list, full, filter_kwargs, debug, sort_key='push_timestamp'):
        """Convert db result of resultsets in a list to JSON"""

        if 'result_set_ids' in filter_kwargs:
            del filter_kwargs['result_set_ids']

        rs_map = {}
        for rs in rs_list:
            rs_map[rs["id"]] = rs
            # all rs should have the revisions_uri, so add it here
            rs["revisions_uri"] = reverse("resultset-revisions",
                kwargs={"project": jm.project, "pk": rs["id"]})

        job_list = jm.get_result_set_job_list(
            rs_map.keys(),
            full,
            **filter_kwargs
        )

        jobs_ungrouped = job_list.get('job_list', [])
        reference_signature_names = job_list.get('reference_signature_names', {})

        option_collections = jm.refdata_model.get_all_option_collections()

        rs_grouper = lambda rsg: rsg["result_set_id"]

        # the main grouper for a result set is the combination of
        # platform and options
        platform_grouper = lambda pg: (
            PLATFORM_ORDER.get(pg["platform"], 100),
            OPT_ORDER.get(get_option(pg, option_collections), 100)
        )

        def get_sortable_job_symbol(symbol):
            """
            Sort jobs by symbol.

            Symbol could be something like 1, 2 or 3.  Or A, B, C or R1, R2, R10.
            So this will pad the numeric portion with 0s like R001, R010, etc.
            """
            newsymbol = symbol
            if symbol.isdigit():
                newsymbol = symbol.zfill(3)
            else:
                x = re.split('(\d+)', symbol)
                newsymbol = x[0]
                if len(x) > 1:
                    newsymbol += x[1].zfill(3)

            return newsymbol

        job_group_grouper = lambda jgg: jgg["job_group_symbol"]
        job_type_grouper = lambda jtg: (get_sortable_job_symbol(jtg['job_type_symbol']))

        rs_sorted = sorted(jobs_ungrouped, key=rs_grouper)
        resultsets = []
        for rs_id, resultset_group in itertools.groupby(rs_sorted, key=rs_grouper):

            resultset = rs_map[rs_id]
            resultsets.append(resultset)

            # we found jobs for this resultset, so remove it from the map
            # now that it's in the ``resultsets`` list.
            # after we are done with all these jobs, whatever is in the map are
            # resultsets with no jobs yet, which we add back in to the list
            # of resultsets to be returned.
            del(rs_map[rs_id])

            job_counts = dict.fromkeys(
                jm.RESULTS + jm.INCOMPLETE_STATES + ["total"], 0)

            #itertools needs the elements to be sorted by the grouper
            by_platform = sorted(list(resultset_group), key=platform_grouper)
            platforms = []
            for platform_group_name, platform_group in itertools.groupby(
                    by_platform,
                    key=platform_grouper):

                by_job_group = sorted(list(platform_group), key=job_group_grouper)

                platform_name = by_job_group[0]["platform"]
                platform_option = option_collections[
                    by_job_group[0]["option_collection_hash"]]['opt']

                groups = []
                for jg_symbol, jg_group in itertools.groupby(
                        by_job_group,
                        job_group_grouper):

                    by_job_type = sorted(list(jg_group), key=job_type_grouper)

                    job_list = []
                    groups.append({
                        "symbol": jg_symbol,
                        "name": by_job_type[0]["job_group_name"],
                        "jobs": job_list
                    })

                    # build the uri ref for each job
                    for job in by_job_type:

                        job_list.append(
                            get_job_value_list(
                                job, reference_signature_names,
                                platform_option, jm.project, debug
                            )
                        )

                        if job["state"] == "completed":
                            job_counts[job["result"]] += 1
                        else:
                            job_counts[job["state"]] += 1
                        job_counts["total"] += 1

                platforms.append({
                    "name": platform_name,
                    "option": platform_option,
                    "groups": groups,
                })

            #the unique set of results that are contained in this resultset
            #can be used to determine the resultset's severity
            resultset.update({
                "platforms": platforms,
                "job_counts": job_counts,
            })

        # the resultsets left in the map have no jobs, so fill in the fields
        # with blanks that WOULD otherwise have been filled.
        for rs in rs_map.values():
            rs.update({
                "platforms": [],
                "job_counts": dict.fromkeys(
                    jm.RESULTS + jm.INCOMPLETE_STATES + ["total"], 0),
            })
            resultsets.append(rs)

        return sorted(
            resultsets,
            key=lambda x: x[sort_key],
            reverse=True)