Exemplo n.º 1
0
    def process_list(self, api_version, data):  # pylint: disable=unused-argument
        """
        Returns package details.

        :param data: json request parsed into data structure

        :returns: json response with package details
        """
        validate(data, JSON_SCHEMA)

        packages = data.get('package_list', None)
        packagelist = {}
        if not packages:
            return packagelist

        for pkg in packages:
            packagedata = packagelist.setdefault(pkg, {})
            name, epoch, ver, rel, arch = utils.split_packagename(pkg)
            if name in self.cache.packagename2id \
               and (epoch, ver, rel) in self.cache.evr2id \
               and arch in self.cache.arch2id:
                name_id = self.cache.packagename2id[name]
                evr_id = self.cache.evr2id[(epoch, ver, rel)]
                arch_id = self.cache.arch2id[arch]
                if (name_id, evr_id, arch_id) in self.cache.nevra2pkgid:
                    pkg_id = self.cache.nevra2pkgid[(name_id, evr_id, arch_id)]
                    pkg_detail = self.cache.package_details[pkg_id]
                    packagedata['summary'] = self.cache.strings.get(
                        pkg_detail[PKG_SUMMARY_ID], None)
                    packagedata['description'] = self.cache.strings.get(
                        pkg_detail[PKG_DESC_ID], None)
                    packagedata['source_package'] = self._get_source_package(
                        pkg_detail)
                    packagedata['repositories'] = []
                    packagedata[
                        'package_list'] = self._get_built_binary_packages(
                            pkg_id)
                    if pkg_id in self.cache.pkgid2repoids:
                        for repo_id in self.cache.pkgid2repoids[pkg_id]:
                            repodetail = self.cache.repo_detail[repo_id]
                            repodata = {
                                'label':
                                repodetail[REPO_LABEL],
                                'name':
                                repodetail[REPO_NAME],
                                'basearch':
                                utils.none2empty(repodetail[REPO_BASEARCH]),
                                'releasever':
                                utils.none2empty(repodetail[REPO_RELEASEVER])
                            }
                            packagedata['repositories'].append(repodata)
        response = {'package_list': packagelist}

        return response
Exemplo n.º 2
0
 def _update_errata(self, api_version: int, pkg_id: int, opts: dict, third_party: bool) -> tuple:
     """Add errata-related data, skip based on modified_since if needed"""
     data = dict()
     if opts["return_errata"] or opts["modified_since"] is not None:
         errata, modified = self._get_erratas(api_version, pkg_id, opts["modified_since"], third_party)
         if self._exclude_not_modified(modified, opts["modified_since"], len(errata)):
             return None, True
         if opts["return_errata"]:
             data["errata"] = none2empty(errata)
             first_published = self._get_first_published_from_erratas(errata)
             data["first_published"] = none2empty(first_published)
     return data, False
Exemplo n.º 3
0
 def _get_pkg_item(self, pkg_id: int) -> dict:
     pkg_nevra = self._build_nevra(pkg_id)
     errata = self._get_erratas(pkg_id)
     repositories = self._get_repositories(pkg_id)
     # Skip content with no repos and no erratas (Should skip third party content)
     first_published = self._get_first_published_from_erratas(errata)
     pkg_item = {
         "nevra": pkg_nevra,
         "first_published": none2empty(first_published),
         "repositories": none2empty(repositories),
         "errata": none2empty(errata),
     }
     return pkg_item
Exemplo n.º 4
0
 def _get_repositories(self, pkg_id):
     # FIXME Add support for modules and streams.
     repos = []
     if pkg_id in self.cache.pkgid2repoids:
         for repo_id in self.cache.pkgid2repoids[pkg_id]:
             detail = self.cache.repo_detail[repo_id]
             repos.append({
                 'label': detail[REPO_LABEL],
                 'name': detail[REPO_NAME],
                 'basearch': none2empty(detail[REPO_BASEARCH]),
                 'releasever': none2empty(detail[REPO_RELEASEVER]),
                 'revision': format_datetime(detail[REPO_REVISION])
             })
     return natsorted(repos, key=lambda repo_dict: repo_dict['label'])
Exemplo n.º 5
0
    def process_list(self, api_version, data):  # pylint: disable=unused-argument
        """
        Returns list of NEVRAs for given packge name.

        :param data: json request parsed into data structure

        :returns: json response with list of NEVRAs
        """
        # Date and time of last data change in the VMaaS DB
        last_change = format_datetime(self.cache.dbchange['last_change'])

        names = data.get('package_name_list', None)
        pkgnamelist = {}
        if not names:
            return pkgnamelist

        for name in names:
            pkgtree_list = pkgnamelist.setdefault(name, [])
            if name in self.cache.packagename2id:
                name_id = self.cache.packagename2id[name]
                pkg_ids = self._get_packages(name_id)
                for pkg_id in pkg_ids:
                    pkg_nevra = self._build_nevra(pkg_id)
                    errata = self._get_erratas(pkg_id)
                    repositories = self._get_repositories(pkg_id)
                    first_published = self._get_first_published_from_erratas(
                        errata)
                    pkgtree_list.append({
                        "nevra":
                        pkg_nevra,
                        "first_published":
                        none2empty(first_published),
                        "repositories":
                        none2empty(repositories),
                        "errata":
                        none2empty(errata),
                    })
            pkgnamelist[name] = natsorted(
                pkgtree_list, key=lambda nevra_list: nevra_list['nevra'])

        response = {
            'package_name_list': pkgnamelist,
            'last_change': last_change,
        }

        return response
Exemplo n.º 6
0
    def _get_pkg_errata_updates(self, update_pkg_id: int, errata_id: int,
                                module_ids: set, available_repo_ids: set,
                                valid_releasevers: set, nevra: str,
                                security_only: bool,
                                third_party: bool) -> list:
        errata_name = self.db_cache.errataid2name[errata_id]
        errata_detail = self.db_cache.errata_detail[errata_name]

        # Filter out non-security updates
        if filter_non_security(errata_detail, security_only):
            return []

        # If we don't want third party content, and current advisory is third party, skip it
        if not third_party and errata_detail[ERRATA_THIRD_PARTY]:
            return []

        if ((update_pkg_id, errata_id) in self.db_cache.pkgerrata2module and not \
                self.db_cache.pkgerrata2module[(update_pkg_id, errata_id)].intersection(module_ids)):
            return []
        repo_ids = self._get_repositories(update_pkg_id, [errata_id],
                                          available_repo_ids,
                                          valid_releasevers)
        pkg_errata_updates = []
        for repo_id in repo_ids:
            repo_details = self.db_cache.repo_detail[repo_id]
            pkg_errata_updates.append({
                'package':
                nevra,
                'erratum':
                errata_name,
                'repository':
                repo_details[REPO_LABEL],
                'basearch':
                none2empty(repo_details[REPO_BASEARCH]),
                'releasever':
                none2empty(repo_details[REPO_RELEASEVER])
            })
        return pkg_errata_updates
Exemplo n.º 7
0
 def _get_erratas(self, api_version: int, pkg_id: int, modified_since: datetime.datetime,
                  third_party: bool) -> tuple:
     erratas = []
     modified_found = False
     if pkg_id in self.cache.pkgid2errataids:
         errata_ids = self.cache.pkgid2errataids[pkg_id]
         for err_id in errata_ids:
             name = self.cache.errataid2name[err_id]
             detail = self.cache.errata_detail[name]
             if detail[ERRATA_THIRD_PARTY] and not third_party:
                 continue
             issued = detail[ERRATA_ISSUED]
             errata = {'name': name,
                       'issued': none2empty(format_datetime(issued))}
             if api_version >= 3:
                 updated_ts = detail[ERRATA_UPDATED]
                 errata['updated'] = none2empty(format_datetime(updated_ts))
                 modified_found = self._update_modified_found(modified_found, modified_since, updated_ts)
             cves = detail[ERRATA_CVE]
             if cves:
                 errata['cve_list'] = natsorted(cves)
             erratas.append(errata)
     erratas = natsorted(erratas, key=lambda err_dict: err_dict['name'])
     return erratas, modified_found
Exemplo n.º 8
0
 def _get_erratas(self, pkg_id):
     erratas = []
     if pkg_id in self.cache.pkgid2errataids:
         errata_ids = self.cache.pkgid2errataids[pkg_id]
         for err_id in errata_ids:
             name = self.cache.errataid2name[err_id]
             issued = self.cache.errata_detail[name][ERRATA_ISSUED]
             cves = self.cache.errata_detail[name][ERRATA_CVE]
             errata = {
                 'name': name,
                 'issued': none2empty(format_datetime(issued))
             }
             if cves:
                 errata['cve_list'] = natsorted(cves)
             erratas.append(errata)
     return natsorted(erratas, key=lambda err_dict: err_dict['name'])
Exemplo n.º 9
0
Arquivo: cve.py Projeto: eherget/vmaas
    def process_list(self, api_version, data): # pylint: disable=unused-argument
        """
        This method returns details for given set of CVEs.

        :param data: data obtained from api, we're interested in data["cve_list"]

        :returns: list of dictionaries containing detailed information for given cve list}

        """
        validate(data, JSON_SCHEMA)

        cves_to_process = data.get("cve_list", None)
        modified_since = data.get("modified_since", None)
        published_since = data.get("published_since", None)
        rh_only = data.get('rh_only', False)
        modified_since_dt = parse_datetime(modified_since)
        published_since_dt = parse_datetime(published_since)
        page = data.get("page", None)
        page_size = data.get("page_size", None)

        answer = {}
        if not cves_to_process:
            return answer

        cves_to_process = list(filter(None, cves_to_process))
        if len(cves_to_process) == 1:
            # treat single-label like a regex, get all matching names
            cves_to_process = self.find_cves_by_regex(cves_to_process[0])

        filters = [(filter_item_if_exists, [self.cache.cve_detail])]
        if rh_only:
            filters.append((self._filter_redhat_only, []))
        # if we have information about modified/published dates and receive "modified_since"
        # or "published_since" in request,
        # compare the dates
        if modified_since:
            filters.append((self._filter_modified_since, [modified_since_dt]))

        if published_since:
            filters.append((self._filter_published_since, [published_since_dt]))

        cve_list = {}
        cve_page_to_process, pagination_response = paginate(cves_to_process, page, page_size, filters=filters)
        for cve in cve_page_to_process:
            cve_detail = self.cache.cve_detail.get(cve, None)
            if not cve_detail:
                continue

            bin_pkg_list, src_pkg_list = pkgidlist2packages(self.cache, cve_detail[CVE_PID])
            cve_list[cve] = {
                "redhat_url": none2empty(cve_detail[CVE_REDHAT_URL]),
                "secondary_url": none2empty(cve_detail[CVE_SECONDARY_URL]),
                "synopsis": cve,
                "impact": none2empty(cve_detail[CVE_IMPACT]),
                "public_date": none2empty(format_datetime(cve_detail[CVE_PUBLISHED_DATE])),
                "modified_date": none2empty(format_datetime(cve_detail[CVE_MODIFIED_DATE])),
                "cwe_list": none2empty(cve_detail[CVE_CWE]),
                "cvss3_score": str(none2empty(cve_detail[CVE_CVSS3_SCORE])),
                "cvss3_metrics": str(none2empty(cve_detail[CVE_CVSS3_METRICS])),
                "cvss2_score": str(none2empty(cve_detail[CVE_CVSS2_SCORE])),
                "cvss2_metrics": str(none2empty(cve_detail[CVE_CVSS2_METRICS])),
                "description": none2empty(cve_detail[CVE_DESCRIPTION]),
                "package_list": bin_pkg_list,
                "source_package_list": src_pkg_list,
                "errata_list": [self.cache.errataid2name[eid] for eid in cve_detail[CVE_EID]],

            }
        response = {"cve_list": cve_list}
        response.update(pagination_response)
        if modified_since:
            response["modified_since"] = modified_since
        if published_since:
            response["published_since"] = published_since
        return response
Exemplo n.º 10
0
    def process_list(self, api_version, data): # pylint: disable=unused-argument
        """
        This method returns details for given set of Errata.

        :param data: data obtained from api, we're interested in data["errata_list"]

        :returns: dictionary containing detailed information for given errata list}

        """
        validate(data, JSON_SCHEMA)

        modified_since = data.get("modified_since", None)
        modified_since_dt = parse_datetime(modified_since)
        errata_to_process = data.get("errata_list", None)
        page = data.get("page", None)
        page_size = data.get("page_size", None)

        response = {"errata_list": {}}
        if modified_since:
            response["modified_since"] = modified_since

        if not errata_to_process:
            return response

        if len(errata_to_process) == 1:
            # treat single-label like a regex, get all matching names
            errata_to_process = self.find_errata_by_regex(errata_to_process[0])

        filters = [(filter_item_if_exists, [self.cache.errata_detail])]
        # if we have information about modified/published dates and receive "modified_since" in request,
        # compare the dates
        if modified_since:
            filters.append((self._filter_modified_since, [modified_since_dt]))

        errata_list = {}
        errata_page_to_process, pagination_response = paginate(errata_to_process, page, page_size, filters=filters)
        for errata in errata_page_to_process:
            errata_detail = self.cache.errata_detail.get(errata, None)
            if not errata_detail:
                continue

            bin_pkg_list, src_pkg_list = pkgidlist2packages(self.cache, errata_detail[ERRATA_PKGIDS])

            if errata_detail[ERRATA_MODULE]:
                for index, module_update in enumerate(errata_detail[ERRATA_MODULE]):
                    if all(str(elem).isdigit() for elem in errata_detail[ERRATA_MODULE][index]["package_list"]):
                        module_pkg_list, module_src_pkg_list = pkgidlist2packages(
                            self.cache, module_update["package_list"])
                        errata_detail[ERRATA_MODULE][index]["package_list"] = module_pkg_list
                        errata_detail[ERRATA_MODULE][index]["source_package_list"] = module_src_pkg_list

            errata_list[errata] = {
                "synopsis": none2empty(errata_detail[ERRATA_SYNOPSIS]),
                "summary": none2empty(errata_detail[ERRATA_SUMMARY]),
                "type": none2empty(errata_detail[ERRATA_TYPE]),
                "severity": none2empty(errata_detail[ERRATA_SEVERITY]),
                "description": none2empty(errata_detail[ERRATA_DESCRIPTION]),
                "solution": none2empty(errata_detail[ERRATA_SOLUTION]),
                "issued": none2empty(format_datetime(errata_detail[ERRATA_ISSUED])),
                "updated": none2empty(format_datetime(errata_detail[ERRATA_UPDATED])),
                "cve_list": errata_detail[ERRATA_CVE],
                "package_list": bin_pkg_list,
                "source_package_list": src_pkg_list,
                "bugzilla_list": errata_detail[ERRATA_BUGZILLA],
                "reference_list": errata_detail[ERRATA_REFERENCE],
                "modules_list": errata_detail[ERRATA_MODULE],
                "url": none2empty(errata_detail[ERRATA_URL])
                }
        response["errata_list"] = errata_list
        response.update(pagination_response)
        return response
Exemplo n.º 11
0
    def process_list(self, api_version, data):  # pylint: disable=unused-argument
        """
        Returns package details.

        :param data: json request parsed into data structure

        :returns: json response with package details
        """
        packages = data.get('package_list', None)
        # By default, don't include third party data
        want_third_party = data.get('third_party', False)

        packagelist = {}
        if not packages:
            return packagelist

        for pkg in packages:
            packagedata = packagelist.setdefault(pkg, {})
            is_third_party = False

            name, epoch, ver, rel, arch = parse_rpm_name(pkg,
                                                         default_epoch='0')
            if name in self.cache.packagename2id \
                    and (epoch, ver, rel) in self.cache.evr2id \
                    and arch in self.cache.arch2id:
                name_id = self.cache.packagename2id[name]
                evr_id = self.cache.evr2id[(epoch, ver, rel)]
                arch_id = self.cache.arch2id[arch]
                pkg_id = self.cache.nevra2pkgid.get((name_id, evr_id, arch_id),
                                                    None)
                if pkg_id:
                    pkg_detail = self.cache.package_details[pkg_id]
                    packagedata['summary'] = self.cache.strings.get(
                        pkg_detail[PKG_SUMMARY_ID], None)
                    packagedata['description'] = self.cache.strings.get(
                        pkg_detail[PKG_DESC_ID], None)
                    packagedata['source_package'] = self._get_source_package(
                        pkg_detail)
                    packagedata['repositories'] = []
                    packagedata[
                        'package_list'] = self._get_built_binary_packages(
                            pkg_id)
                    if pkg_id in self.cache.pkgid2repoids:
                        for repo_id in self.cache.pkgid2repoids[pkg_id]:
                            repodetail = self.cache.repo_detail[repo_id]
                            is_third_party = is_third_party or bool(
                                repodetail[REPO_THIRD_PARTY])
                            repodata = {
                                'label':
                                repodetail[REPO_LABEL],
                                'name':
                                repodetail[REPO_NAME],
                                'basearch':
                                utils.none2empty(repodetail[REPO_BASEARCH]),
                                'releasever':
                                utils.none2empty(repodetail[REPO_RELEASEVER]),
                            }
                            packagedata['repositories'].append(repodata)

            # If the package is third party, then remove it from result
            if not want_third_party and is_third_party:
                del packagelist[pkg]

        response = {'package_list': packagelist}

        return response
Exemplo n.º 12
0
 def _update_repositories(self, pkg_id: int, opts: dict) -> dict:
     if opts["return_repositories"]:
         repositories = self._get_repositories(pkg_id)
         return dict(repositories=none2empty(repositories))
     return dict()
Exemplo n.º 13
0
    def process_list(self, api_version, data):  # pylint: disable=unused-argument
        """
        Returns repository details.

        :param data: json request parsed into data structure

        :returns: json response with repository details
        """
        repos = data.get('repository_list', None)
        modified_since = data.get('modified_since', None)
        modified_since_dt = parse_datetime(modified_since)
        page = data.get("page", None)
        page_size = data.get("page_size", None)

        # By default, don't include third party data
        want_third_party = data.get('third_party', False)

        repolist = {}
        if not repos:
            return repolist

        filters = []
        if modified_since:
            filters.append((self._filter_modified_since, [modified_since_dt]))

        filters.append((self._filter_third_party, [want_third_party]))

        repos = self.try_expand_by_regex(repos)

        repos = list(set(repos))

        repo_details = {}
        for label in repos:
            for repo_id in self.cache.repolabel2ids.get(label, []):
                repo_details[label] = self.cache.repo_detail[repo_id]
        filters.append((filter_item_if_exists, [repo_details]))

        actual_page_size = 0
        repo_page_to_process, pagination_response = paginate(repos,
                                                             page,
                                                             page_size,
                                                             filters=filters)
        for label in repo_page_to_process:
            cs_id = self.cache.label2content_set_id[label]
            for repo_id in self.cache.repolabel2ids.get(label, []):
                repo_detail = self.cache.repo_detail[repo_id]
                if not modified_since_dt or self._modified_since(
                        repo_detail, modified_since_dt):
                    repolist.setdefault(label, []).append({
                        "label":
                        label,
                        "name":
                        repo_detail[REPO_NAME],
                        "url":
                        repo_detail[REPO_URL],
                        "basearch":
                        none2empty(repo_detail[REPO_BASEARCH]),
                        "releasever":
                        none2empty(repo_detail[REPO_RELEASEVER]),
                        "product":
                        repo_detail[REPO_PRODUCT],
                        "revision":
                        repo_detail[REPO_REVISION],
                        "cpes": [
                            self.cache.cpe_id2label[cpe_id] for cpe_id in
                            self.cache.content_set_id2cpe_ids.get(cs_id, [])
                        ],
                        "third_party":
                        repo_detail[REPO_THIRD_PARTY]
                    })
            actual_page_size += len(repolist[label])

        response = {
            'repository_list': repolist,
        }

        pagination_response['page_size'] = actual_page_size
        response.update(pagination_response)
        if modified_since:
            response["modified_since"] = modified_since

        return response
Exemplo n.º 14
0
    def process_list(self, api_version, data):  # pylint: disable=unused-argument
        """
        This method returns details for given set of Errata.

        :param data: data obtained from api, we're interested in data["errata_list"]

        :returns: dictionary containing detailed information for given errata list}
        """
        modified_since = data.get("modified_since", None)
        modified_since_dt = parse_datetime(modified_since)
        third_party = data.get("third_party", False)
        errata_to_process = data.get("errata_list", None)
        page = data.get("page", None)
        page_size = data.get("page_size", None)
        errata_type = data.get("type", None)
        severity = data.get("severity", [])

        response = {"errata_list": {}}
        filters = [(filter_item_if_exists, [self.cache.errata_detail]),
                   (self._filter_third_party, [third_party])]
        if modified_since:
            response["modified_since"] = modified_since
            # if we have information about modified/published dates and receive "modified_since" in request,
            # compare the dates
            filters.append((self._filter_modified_since, [modified_since_dt]))
        if errata_type:
            errata_type = [t.lower() for t in set(errata_type)] \
                if isinstance(errata_type, list) else [errata_type.lower()]
            response["type"] = errata_type
            filters.append((self._filter_errata_by_prop, ["type",
                                                          errata_type]))

        if severity is None or len(severity) != 0:
            severity = self._prepare_severity(severity)
            response["severity"] = severity
            filters.append(
                (self._filter_errata_by_prop, ["severity", severity]))

        if not errata_to_process:
            return response

        errata_to_process = self.try_expand_by_regex(errata_to_process)

        errata_list = {}
        errata_page_to_process, pagination_response = paginate(
            errata_to_process, page, page_size, filters=filters)
        for errata in errata_page_to_process:
            errata_detail = self.cache.errata_detail.get(errata, None)
            if not errata_detail:
                continue

            bin_pkg_list, src_pkg_list = pkgidlist2packages(
                self.cache, errata_detail[ERRATA_PKGIDS])

            if errata_detail[ERRATA_MODULE]:
                for index, module_update in enumerate(
                        errata_detail[ERRATA_MODULE]):
                    if all(
                            str(elem).isdigit()
                            for elem in errata_detail[ERRATA_MODULE][index]
                        ["package_list"]):
                        module_pkg_list, module_src_pkg_list = pkgidlist2packages(
                            self.cache, module_update["package_list"])
                        errata_detail[ERRATA_MODULE][index][
                            "package_list"] = module_pkg_list
                        errata_detail[ERRATA_MODULE][index][
                            "source_package_list"] = module_src_pkg_list

            errata_list[errata] = {
                "synopsis":
                none2empty(errata_detail[ERRATA_SYNOPSIS]),
                "summary":
                none2empty(errata_detail[ERRATA_SUMMARY]),
                "type":
                none2empty(errata_detail[ERRATA_TYPE]),
                "severity":
                errata_detail[ERRATA_SEVERITY],
                "description":
                none2empty(errata_detail[ERRATA_DESCRIPTION]),
                "solution":
                none2empty(errata_detail[ERRATA_SOLUTION]),
                "issued":
                none2empty(format_datetime(errata_detail[ERRATA_ISSUED])),
                "updated":
                none2empty(format_datetime(errata_detail[ERRATA_UPDATED])),
                "cve_list":
                errata_detail[ERRATA_CVE],
                "package_list":
                bin_pkg_list,
                "source_package_list":
                src_pkg_list,
                "bugzilla_list":
                errata_detail[ERRATA_BUGZILLA],
                "reference_list":
                errata_detail[ERRATA_REFERENCE],
                "modules_list":
                errata_detail[ERRATA_MODULE],
                "url":
                none2empty(errata_detail[ERRATA_URL]),
                "third_party":
                errata_detail[ERRATA_THIRD_PARTY]
            }
        response["errata_list"] = errata_list
        response.update(pagination_response)
        return response
Exemplo n.º 15
0
    def _process_updates(self, packages_to_process, api_version,
                         available_repo_ids, repo_ids_key, response,
                         module_ids):
        # pylint: disable=too-many-branches
        module_filter = module_ids is not None
        for pkg, pkg_dict in packages_to_process.items():
            name, epoch, ver, rel, arch = pkg_dict['parsed_nevra']
            name_id = self.db_cache.packagename2id[name]
            evr_id = self.db_cache.evr2id.get((epoch, ver, rel), None)
            arch_id = self.db_cache.arch2id.get(arch, None)
            current_evr_indexes = self.db_cache.updates_index[name_id].get(
                evr_id, [])

            # Package with given NEVRA not found in cache/DB
            if not current_evr_indexes:
                continue

            current_nevra_pkg_id = None
            for current_evr_index in current_evr_indexes:
                pkg_id = self.db_cache.updates[name_id][current_evr_index]
                current_nevra_arch_id = self.db_cache.package_details[pkg_id][
                    2]
                if current_nevra_arch_id == arch_id:
                    current_nevra_pkg_id = pkg_id
                    break

            # Package with given NEVRA not found in cache/DB
            if not current_nevra_pkg_id:
                continue

            if api_version == 1:
                sum_id = self.db_cache.package_details[current_nevra_pkg_id][
                    PKG_SUMMARY_ID]
                response['update_list'][pkg][
                    'summary'] = self.db_cache.strings.get(sum_id, None)

                desc_id = self.db_cache.package_details[current_nevra_pkg_id][
                    PKG_DESC_ID]
                response['update_list'][pkg][
                    'description'] = self.db_cache.strings.get(desc_id, None)

            response['update_list'][pkg]['available_updates'] = []

            # No updates found for given NEVRA
            last_version_pkg_id = self.db_cache.updates[name_id][-1]
            if last_version_pkg_id == current_nevra_pkg_id:
                continue

            # Get associated product IDs
            original_package_repo_ids = set()
            original_package_repo_ids.update(
                self.db_cache.pkgid2repoids.get(current_nevra_pkg_id, []))
            product_ids = self._get_related_products(original_package_repo_ids)
            valid_releasevers = self._get_valid_releasevers(
                original_package_repo_ids)

            # Get candidate package IDs
            update_pkg_ids = self.db_cache.updates[name_id][
                current_evr_indexes[-1] + 1:]

            for update_pkg_id in update_pkg_ids:
                # Filter out packages without errata
                if update_pkg_id not in self.db_cache.pkgid2errataids:
                    continue

                # Filter arch compatibility
                updated_nevra_arch_id = self.db_cache.package_details[
                    update_pkg_id][2]
                if (updated_nevra_arch_id != arch_id and updated_nevra_arch_id
                        not in self.db_cache.arch_compat[arch_id]):
                    continue

                errata_ids = self.db_cache.pkgid2errataids.get(
                    update_pkg_id, set())
                nevra = self._build_nevra(update_pkg_id)
                for errata_id in errata_ids:
                    if (module_filter and (update_pkg_id, errata_id)
                            in self.db_cache.pkgerrata2module
                            and not self.db_cache.pkgerrata2module[
                                (update_pkg_id,
                                 errata_id)].intersection(module_ids)):
                        continue
                    repo_ids = self._get_repositories(product_ids,
                                                      update_pkg_id,
                                                      [errata_id],
                                                      available_repo_ids,
                                                      valid_releasevers)
                    for repo_id in repo_ids:
                        repo_details = self.db_cache.repo_detail[repo_id]
                        response['update_list'][pkg][
                            'available_updates'].append({
                                'package':
                                nevra,
                                'erratum':
                                self.db_cache.errataid2name[errata_id],
                                'repository':
                                repo_details[REPO_LABEL],
                                'basearch':
                                none2empty(repo_details[REPO_BASEARCH]),
                                'releasever':
                                none2empty(repo_details[REPO_RELEASEVER])
                            })

            if self.use_hot_cache.upper() == "YES":
                HOT_CACHE_INSERTS.inc()
                self.hot_cache.insert(repo_ids_key + pkg,
                                      response['update_list'][pkg])
Exemplo n.º 16
0
    def process_list(self, api_version, data):  # pylint: disable=unused-argument
        """
        Returns repository details.

        :param data: json request parsed into data structure

        :returns: json response with repository details
        """
        repos = data.get('repository_list', None)
        modified_since = data.get('modified_since', None)
        modified_since_dt = parse_datetime(modified_since)
        page = data.get("page", None)
        page_size = data.get("page_size", None)
        repolist = {}
        if not repos:
            return repolist

        filters = []
        if modified_since:
            filters.append((self._filter_modified_since, [modified_since_dt]))

        if len(repos) == 1:
            # treat single-label like a regex, get all matching names
            repos = self.find_repos_by_regex(repos[0])

        repos = list(set(repos))

        repo_details = {}
        for label in repos:
            for repo_id in self.cache.repolabel2ids.get(label, []):
                repo_details[label] = self.cache.repo_detail[repo_id]
        filters.append((filter_item_if_exists, [repo_details]))

        actual_page_size = 0
        repo_page_to_process, pagination_response = paginate(repos,
                                                             page,
                                                             page_size,
                                                             filters=filters)
        for label in repo_page_to_process:
            for repo_id in self.cache.repolabel2ids.get(label, []):
                repo_detail = self.cache.repo_detail[repo_id]
                if not modified_since_dt or self._modified_since(
                        repo_detail, modified_since_dt):
                    repolist.setdefault(label, []).append({
                        "label":
                        label,
                        "name":
                        repo_detail[REPO_NAME],
                        "url":
                        repo_detail[REPO_URL],
                        "basearch":
                        none2empty(repo_detail[REPO_BASEARCH]),
                        "releasever":
                        none2empty(repo_detail[REPO_RELEASEVER]),
                        "product":
                        repo_detail[REPO_PRODUCT],
                        "revision":
                        repo_detail[REPO_REVISION]
                    })
            actual_page_size += len(repolist[label])

        response = {
            'repository_list': repolist,
        }

        pagination_response['page_size'] = actual_page_size
        response.update(pagination_response)
        if modified_since:
            response["modified_since"] = modified_since

        return response
Exemplo n.º 17
0
 def test_none2empty(self):
     """Test 'None' to "" conversion."""
     assert utils.none2empty(None) == ""