Beispiel #1
0
    def test_xml_parser_repomd_repo01_nowarningcb(self):

        repomd = cr.Repomd()
        cr.xml_parse_repomd(REPO_01_REPOMD, repomd)
        self.assertEqual(repomd.revision, "1334667230")
        self.assertEqual(repomd.repo_tags, [])
        self.assertEqual(repomd.distro_tags, [])
        self.assertEqual(repomd.content_tags, [])
        self.assertEqual(len(repomd.records), 3)
    def test_xml_parser_repomd_repo01_nowarningcb(self):

        repomd = cr.Repomd()
        cr.xml_parse_repomd(REPO_01_REPOMD, repomd)
        self.assertEqual(repomd.revision, "1334667230")
        self.assertEqual(repomd.repo_tags, [])
        self.assertEqual(repomd.distro_tags, [])
        self.assertEqual(repomd.content_tags, [])
        self.assertEqual(len(repomd.records), 3)
def third_method():
    """Parsing main metadata types (primary, filelists, other) at the same time.
    This approach significantly reduces memory footprint because we don't need
    to keep all the packages in memory, user can handle them one by one.

    The API reflects xml_parse_primary/filelists/other except that it handles
    all of them at the same time.

    """
    def warningcb(warning_type, message):
        print("PARSER WARNING: %s" % message)
        return True

    repomd = cr.Repomd()
    cr.xml_parse_repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"), repomd,
                        warningcb)

    primary_xml_path = None
    filelists_xml_path = None
    other_xml_path = None
    for record in repomd.records:
        if record.type == "primary":
            primary_xml_path = os.path.join(REPO_PATH, record.location_href)
        elif record.type == "filelists":
            filelists_xml_path = os.path.join(REPO_PATH, record.location_href)
        elif record.type == "other":
            other_xml_path = os.path.join(REPO_PATH, record.location_href)

    #
    # Main XML metadata parsing (primary, filelists, other)
    #

    def pkgcb(pkg):
        # Called when whole package entry from all 3 metadata xml files is parsed
        print_package_info(pkg)

    cr.xml_parse_main_metadata_together(primary_xml_path, filelists_xml_path,
                                        other_xml_path, None, pkgcb, warningcb,
                                        False)
def streaming_iterator():
    """Parsing main metadata types (primary, filelists, other) at the same time.
    This approach significantly reduces memory footprint because we don't need
    to keep all the packages in memory, user can handle them one by one.

    This is the most flexible method, and the recommended one if you need all of the
    RPM metadata. If you only need to parse one file it might not be the most efficient.
    """
    def warningcb(warning_type, message):
        print("PARSER WARNING: %s" % message)
        return True

    repomd = cr.Repomd()
    cr.xml_parse_repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"), repomd,
                        warningcb)

    primary_xml_path = None
    filelists_xml_path = None
    other_xml_path = None
    for record in repomd.records:
        if record.type == "primary":
            primary_xml_path = os.path.join(REPO_PATH, record.location_href)
        elif record.type == "filelists":
            filelists_xml_path = os.path.join(REPO_PATH, record.location_href)
        elif record.type == "other":
            other_xml_path = os.path.join(REPO_PATH, record.location_href)

    #
    # Main XML metadata parsing (primary, filelists, other)
    #
    package_iterator = cr.PackageIterator(primary_path=primary_xml_path,
                                          filelists_path=filelists_xml_path,
                                          other_path=other_xml_path,
                                          warningcb=warningcb)

    for pkg in package_iterator:
        # Called when whole package entry from all 3 metadata xml files is parsed
        print_package_info(pkg)
Beispiel #5
0
    def test_xml_parser_repomd_repo01(self):

        warnings = []

        def warningcb(warn_type, msg):
            warnings.append((warn_type, msg))

        repomd = cr.Repomd()

        cr.xml_parse_repomd(REPO_01_REPOMD, repomd, warningcb)

        self.assertEqual(warnings, [])

        self.assertEqual(repomd.revision, "1334667230")
        self.assertEqual(repomd.repo_tags, [])
        self.assertEqual(repomd.distro_tags, [])
        self.assertEqual(repomd.content_tags, [])
        self.assertEqual(len(repomd.records), 3)

        self.assertEqual(repomd.records[0].type, "filelists")
        self.assertEqual(repomd.records[0].location_real, None)
        self.assertEqual(
            repomd.records[0].location_href,
            "repodata/c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c-filelists.xml.gz"
        )
        self.assertEqual(
            repomd.records[0].checksum,
            "c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c")
        self.assertEqual(repomd.records[0].checksum_type, "sha256")
        self.assertEqual(
            repomd.records[0].checksum_open,
            "85bc611be5d81ac8da2fe01e98ef741d243d1518fcc46ada70660020803fbf09")
        self.assertEqual(repomd.records[0].checksum_open_type, "sha256")
        self.assertEqual(repomd.records[0].timestamp, 1334667230)
        self.assertEqual(repomd.records[0].size, 273)
        self.assertEqual(repomd.records[0].size_open, 389)
        self.assertEqual(repomd.records[0].db_ver, 0)

        self.assertEqual(repomd.records[1].type, "other")
        self.assertEqual(repomd.records[1].location_real, None)
        self.assertEqual(
            repomd.records[1].location_href,
            "repodata/b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef-other.xml.gz"
        )
        self.assertEqual(
            repomd.records[1].checksum,
            "b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef")
        self.assertEqual(repomd.records[1].checksum_type, "sha256")
        self.assertEqual(
            repomd.records[1].checksum_open,
            "da6096c924349af0c326224a33be0cdb26897fbe3d25477ac217261652449445")
        self.assertEqual(repomd.records[1].checksum_open_type, "sha256")
        self.assertEqual(repomd.records[1].timestamp, 1334667230)
        self.assertEqual(repomd.records[1].size, 332)
        self.assertEqual(repomd.records[1].size_open, 530)
        self.assertEqual(repomd.records[1].db_ver, 0)

        self.assertEqual(repomd.records[2].type, "primary")
        self.assertEqual(repomd.records[2].location_real, None)
        self.assertEqual(
            repomd.records[2].location_href,
            "repodata/6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e-primary.xml.gz"
        )
        self.assertEqual(
            repomd.records[2].checksum,
            "6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e")
        self.assertEqual(repomd.records[2].checksum_type, "sha256")
        self.assertEqual(
            repomd.records[2].checksum_open,
            "0fc6cadf97d515e87491d24dc9712d8ddaf2226a21ae7f131ff42d71a877c496")
        self.assertEqual(repomd.records[2].checksum_open_type, "sha256")
        self.assertEqual(repomd.records[2].timestamp, 1334667230)
        self.assertEqual(repomd.records[2].size, 782)
        self.assertEqual(repomd.records[2].size_open, 2085)
        self.assertEqual(repomd.records[2].db_ver, 0)
    def test_xml_parser_repomd_repo01(self):

        warnings = []

        def warningcb(warn_type, msg):
            warnings.append((warn_type, msg))

        repomd = cr.Repomd()

        cr.xml_parse_repomd(REPO_01_REPOMD, repomd, warningcb)

        self.assertEqual(warnings, [])

        self.assertEqual(repomd.revision, "1334667230")
        self.assertEqual(repomd.repo_tags, [])
        self.assertEqual(repomd.distro_tags, [])
        self.assertEqual(repomd.content_tags, [])
        self.assertEqual(len(repomd.records), 3)

        self.assertEqual(repomd.records[0].type, "filelists")
        self.assertEqual(repomd.records[0].location_real, None)
        self.assertEqual(repomd.records[0].location_href,
            "repodata/c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c-filelists.xml.gz")
        self.assertEqual(repomd.records[0].checksum,
            "c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c")
        self.assertEqual(repomd.records[0].checksum_type, "sha256")
        self.assertEqual(repomd.records[0].checksum_open,
            "85bc611be5d81ac8da2fe01e98ef741d243d1518fcc46ada70660020803fbf09")
        self.assertEqual(repomd.records[0].checksum_open_type, "sha256")
        self.assertEqual(repomd.records[0].timestamp, 1334667230)
        self.assertEqual(repomd.records[0].size, 273)
        self.assertEqual(repomd.records[0].size_open, 389)
        self.assertEqual(repomd.records[0].db_ver, 0)

        self.assertEqual(repomd.records[1].type, "other")
        self.assertEqual(repomd.records[1].location_real, None)
        self.assertEqual(repomd.records[1].location_href,
            "repodata/b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef-other.xml.gz")
        self.assertEqual(repomd.records[1].checksum,
            "b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef")
        self.assertEqual(repomd.records[1].checksum_type, "sha256")
        self.assertEqual(repomd.records[1].checksum_open,
            "da6096c924349af0c326224a33be0cdb26897fbe3d25477ac217261652449445")
        self.assertEqual(repomd.records[1].checksum_open_type, "sha256")
        self.assertEqual(repomd.records[1].timestamp, 1334667230)
        self.assertEqual(repomd.records[1].size, 332)
        self.assertEqual(repomd.records[1].size_open, 530)
        self.assertEqual(repomd.records[1].db_ver, 0)

        self.assertEqual(repomd.records[2].type, "primary")
        self.assertEqual(repomd.records[2].location_real, None)
        self.assertEqual(repomd.records[2].location_href,
            "repodata/6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e-primary.xml.gz")
        self.assertEqual(repomd.records[2].checksum,
            "6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e")
        self.assertEqual(repomd.records[2].checksum_type, "sha256")
        self.assertEqual(repomd.records[2].checksum_open,
            "0fc6cadf97d515e87491d24dc9712d8ddaf2226a21ae7f131ff42d71a877c496")
        self.assertEqual(repomd.records[2].checksum_open_type, "sha256")
        self.assertEqual(repomd.records[2].timestamp, 1334667230)
        self.assertEqual(repomd.records[2].size, 782)
        self.assertEqual(repomd.records[2].size_open, 2085)
        self.assertEqual(repomd.records[2].db_ver, 0)
Beispiel #7
0
def second_method():
    """Prefered method for repodata parsing.

    Important callbacks for repodata parsing:

    newpkgcb
    --------
    Via newpkgcb (Package callback) you could directly
    affect if the current package element shoud be parsed
    or not. This decision could be based on
    three values that are available as attributtes
    in the <package> element. This values are:
     - pkgId (package checksum)
     - name (package name)
     - arch (package architecture)
    (Note: This is applicable only for filelists.xml and other.xml,
     primary.xml doesn't contain this information in <package> element)

    If newpkgcb returns a package object, the parsed data
    will be loaded to this package object. If it returns a None,
    package element is skiped.

    This could help you to reduce a memory requirements because
    non wanted packages could be skiped without need to
    store them into the memory.

    If no newpkgcb is specified, default callback returning
    a new package object is used.

    pkgcb
    -----
    Callback called when a <package> element parsing is done.
    Its argument is a package object that has been previously
    returned by the newpkgcb.
    This function should return True if parsing should continue
    or False if parsing should be interrupted.

    Note: Both callbacks are optional, BUT at least one
          MUST be used (newpkgcb or pkgcb)!

    warningcb
    ---------
    Warning callbacks is called when a non-fatal oddity of prased XML
    is detected.
    If True is returned, parsing continues. If return value is False,
    parsing is terminated.
    This callback is optional.
    """

    primary_xml_path   = None
    filelists_xml_path = None
    other_xml_path     = None

    #
    # repomd.xml parsing
    #

    # Parse repomd.xml to get paths (1. Method - Repomd object based)
    #   Pros: Easy to use
    repomd = cr.Repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"))

    # Parse repomd.xml (2. Method - Parser based)
    #   Pros: Warning callback could be specified
    def warningcb(warning_type, message):
        """Optional callback for warnings about
        wierd stuff and formatting in XML.

        :param warning_type: Integer value. One from
                             the XML_WARNING_* constants.
        :param message: String message.
        """
        print "PARSER WARNING: %s" % message
        return True

    repomd2 = cr.Repomd()
    cr.xml_parse_repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"),
                                     repomd2, warningcb)

    # Get stuff we need
    #   (repomd or repomd2 could be used, both have the same values)
    for record in repomd.records:
        if record.type == "primary":
            primary_xml_path = record.location_href
        elif record.type == "filelists":
            filelists_xml_path = record.location_href
        elif record.type == "other":
            other_xml_path = record.location_href


    #
    # Main XML metadata parsing (primary, filelists, other)
    #

    packages = {}

    def pkgcb(pkg):
        # Called when whole package entry in xml is parsed
        packages[pkg.pkgId] = pkg

    def newpkgcb(pkgId, name, arch):
        # Called when new package entry is encountered
        # And only opening <package> element is parsed
        # This function has to return a package to which
        # parsed data will be added or None if this package
        # should be skiped.
        return packages.get(pkgId, None)

    # Option do_files tells primary parser to skip <file> element of package.
    # If you plan to parse filelists.xml after the primary.xml, always
    # set do_files to False.
    cr.xml_parse_primary(os.path.join(REPO_PATH, primary_xml_path),
                         pkgcb=pkgcb,
                         do_files=False,
                         warningcb=warningcb)

    cr.xml_parse_filelists(os.path.join(REPO_PATH, filelists_xml_path),
                           newpkgcb=newpkgcb,
                           warningcb=warningcb)

    cr.xml_parse_other(os.path.join(REPO_PATH, other_xml_path),
                       newpkgcb=newpkgcb,
                       warningcb=warningcb)

    for pkg in packages.itervalues():
        print_package_info(pkg)
def oneshot_callback():
    """Parse one file at a time into a set of packages.

    Use of this method is discouraged.

    newpkgcb
    --------
    Via newpkgcb (Package callback) you could directly
    affect if the current package element should be parsed
    or not. This decision could be based on
    three values that are available as attributtes
    in the <package> element. This values are:
     - pkgId (package checksum)
     - name (package name)
     - arch (package architecture)
    (Note: This is applicable only for filelists.xml and other.xml,
     primary.xml doesn't contain this information in <package> element)

    If newpkgcb returns a package object, the parsed data
    will be loaded to this package object. If it returns a None,
    package element is skiped.

    This could help you to reduce a memory requirements because
    non wanted packages could be skiped without need to
    store them into the memory.

    If no newpkgcb is specified, default callback returning
    a new package object is used.

    pkgcb
    -----
    Callback called when a <package> element parsing is done.
    Its argument is a package object that has been previously
    returned by the newpkgcb.
    This function should return True if parsing should continue
    or False if parsing should be interrupted.

    Note: Both callbacks are optional, BUT at least one
          MUST be used (newpkgcb or pkgcb)!

    warningcb
    ---------
    Warning callbacks is called when a non-fatal oddity of prased XML
    is detected.
    If True is returned, parsing continues. If return value is False,
    parsing is terminated.
    This callback is optional.
    """

    primary_xml_path = None
    filelists_xml_path = None
    other_xml_path = None

    #
    # repomd.xml parsing
    #

    # Parse repomd.xml to get paths (1. Method - Repomd object based)
    #   Pros: Easy to use
    repomd = cr.Repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"))

    # Parse repomd.xml (2. Method - Parser based)
    #   Pros: Warning callback could be specified
    def warningcb(warning_type, message):
        """Optional callback for warnings about
        wierd stuff and formatting in XML.

        :param warning_type: Integer value. One from
                             the XML_WARNING_* constants.
        :param message: String message.
        """
        print("PARSER WARNING: %s" % message)
        return True

    repomd2 = cr.Repomd()
    cr.xml_parse_repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"),
                        repomd2, warningcb)

    # Get stuff we need
    #   (repomd or repomd2 could be used, both have the same values)
    for record in repomd.records:
        if record.type == "primary":
            primary_xml_path = record.location_href
        elif record.type == "filelists":
            filelists_xml_path = record.location_href
        elif record.type == "other":
            other_xml_path = record.location_href

    #
    # Main XML metadata parsing (primary, filelists, other)
    #

    packages = {}

    def pkgcb(pkg):
        # Called when whole package entry in xml is parsed
        packages[pkg.pkgId] = pkg

    def newpkgcb(pkgId, name, arch):
        # Called when new package entry is encountered
        # And only opening <package> element is parsed
        # This function has to return a package to which
        # parsed data will be added or None if this package
        # should be skiped.
        return packages.get(pkgId, None)

    # Option do_files tells primary parser to skip <file> element of package.
    # If you plan to parse filelists.xml after the primary.xml, always
    # set do_files to False.
    cr.xml_parse_primary(os.path.join(REPO_PATH, primary_xml_path),
                         pkgcb=pkgcb,
                         do_files=False,
                         warningcb=warningcb)

    cr.xml_parse_filelists(os.path.join(REPO_PATH, filelists_xml_path),
                           newpkgcb=newpkgcb,
                           warningcb=warningcb)

    cr.xml_parse_other(os.path.join(REPO_PATH, other_xml_path),
                       newpkgcb=newpkgcb,
                       warningcb=warningcb)

    for pkg in packages.values():
        print_package_info(pkg)
Beispiel #9
0
    def _load_cached_updateinfo(self):
        """
        Load the cached updateinfo.xml from '../{tag}.repocache/repodata'
        """
        seen_ids = set()
        from_cache = set()
        existing_ids = set()

        # Parse the updateinfo out of the repomd
        updateinfo = None
        repomd_xml = os.path.join(self.cached_repodata, 'repomd.xml')
        repomd = cr.Repomd()
        cr.xml_parse_repomd(repomd_xml, repomd)
        for record in repomd.records:
            if record.type == 'updateinfo':
                updateinfo = os.path.join(os.path.dirname(
                    os.path.dirname(self.cached_repodata)),
                    record.location_href)
                break

        assert updateinfo, 'Unable to find updateinfo'

        # Load the metadata with createrepo_c
        log.info('Loading cached updateinfo: %s', updateinfo)
        uinfo = cr.UpdateInfo(updateinfo)

        # Determine which updates are present in the cache
        for update in uinfo.updates:
            existing_ids.add(update.id)

        # Generate metadata for any new builds
        for update in self.updates:
            seen_ids.add(update.alias)
            if not update.alias:
                self.missing_ids.append(update.title)
                continue
            if update.alias in existing_ids:
                notice = None
                for value in uinfo.updates:
                    if value.title == update.title:
                        notice = value
                        break
                if not notice:
                    log.warn('%s ID in cache but notice cannot be found', update.title)
                    self.add_update(update)
                    continue
                if notice.updated_date:
                    if notice.updated_date < update.date_modified:
                        log.debug('Update modified, generating new notice: %s' % update.title)
                        self.add_update(update)
                    else:
                        log.debug('Loading updated %s from cache' % update.title)
                        from_cache.add(update.alias)
                elif update.date_modified:
                    log.debug('Update modified, generating new notice: %s' % update.title)
                    self.add_update(update)
                else:
                    log.debug('Loading %s from cache' % update.title)
                    from_cache.add(update.alias)
            else:
                log.debug('Adding new update notice: %s' % update.title)
                self.add_update(update)

        # Add all relevant notices from the cache to this document
        for notice in uinfo.updates:
            if notice.id in from_cache:
                log.debug('Keeping existing notice: %s', notice.title)
                self.uinfo.append(notice)
            else:
                # Keep all security notices in the stable repo
                if self.request is not UpdateRequest.testing:
                    if notice.type == 'security':
                        if notice.id not in seen_ids:
                            log.debug('Keeping existing security notice: %s',
                                      notice.title)
                            self.uinfo.append(notice)
                        else:
                            log.debug('%s already added?', notice.title)
                    else:
                        log.debug('Purging cached stable notice %s', notice.title)
                else:
                    log.debug('Purging cached testing update %s', notice.title)
Beispiel #10
0
    def _load_cached_updateinfo(self):
        """
        Load the cached updateinfo.xml from '../{tag}.repocache/repodata'
        """
        seen_ids = set()
        from_cache = set()
        existing_ids = set()

        # Parse the updateinfo out of the repomd
        updateinfo = None
        repomd_xml = os.path.join(self.cached_repodata, 'repomd.xml')
        repomd = cr.Repomd()
        cr.xml_parse_repomd(repomd_xml, repomd)
        for record in repomd.records:
            if record.type == 'updateinfo':
                updateinfo = os.path.join(os.path.dirname(
                    os.path.dirname(self.cached_repodata)),
                    record.location_href)
                break

        assert updateinfo, 'Unable to find updateinfo'

        # Load the metadata with createrepo_c
        log.info('Loading cached updateinfo: %s', updateinfo)
        uinfo = cr.UpdateInfo(updateinfo)

        # Determine which updates are present in the cache
        for update in uinfo.updates:
            existing_ids.add(update.id)

        # Generate metadata for any new builds
        for update in self.updates:
            seen_ids.add(update.alias)
            if not update.alias:
                self.missing_ids.append(update.title)
                continue
            if update.alias in existing_ids:
                notice = None
                for value in uinfo.updates:
                    if value.title == update.title:
                        notice = value
                        break
                if not notice:
                    log.warn('%s ID in cache but notice cannot be found', update.title)
                    self.add_update(update)
                    continue
                if notice.updated_date:
                    if notice.updated_date < update.date_modified:
                        log.debug('Update modified, generating new notice: %s' % update.title)
                        self.add_update(update)
                    else:
                        log.debug('Loading updated %s from cache' % update.title)
                        from_cache.add(update.alias)
                elif update.date_modified:
                    log.debug('Update modified, generating new notice: %s' % update.title)
                    self.add_update(update)
                else:
                    log.debug('Loading %s from cache' % update.title)
                    from_cache.add(update.alias)
            else:
                log.debug('Adding new update notice: %s' % update.title)
                self.add_update(update)

        # Add all relevant notices from the cache to this document
        for notice in uinfo.updates:
            if notice.id in from_cache:
                log.debug('Keeping existing notice: %s', notice.title)
                self.uinfo.append(notice)
            else:
                # Keep all security notices in the stable repo
                if self.request is not UpdateRequest.testing:
                    if notice.type == 'security':
                        if notice.id not in seen_ids:
                            log.debug('Keeping existing security notice: %s',
                                      notice.title)
                            self.uinfo.append(notice)
                        else:
                            log.debug('%s already added?', notice.title)
                    else:
                        log.debug('Purging cached stable notice %s', notice.title)
                else:
                    log.debug('Purging cached testing update %s', notice.title)