Example #1
0
    def get_package_real(self, obs_name, upstream_name):
        m = Message()
        f = Fetcher()

        base = "%s/%s" % (self.base_url, upstream_name)
        url = "%s/cache.json" % base
        fp = f.get(url)
        j = json.load(fp)

        stable = []
        unstable = []

        if upstream_name in PackageSource.no_odd_even_rule:
            versions = j[2][upstream_name]
        else:
            for version in j[2][upstream_name]:
                v = version.split(".")
                major = int(v[0])
                minor = int(v[1])

                if obs_name in self.gnome2 and (major != 2 or minor >= 90):
                    continue

                if minor % 2 == 0:
                    stable.append(version)
                else:
                    unstable.append(version)

            versions = stable

        if len(versions) == 0:
            m.warn("could not parse json data: %s" % j[2])
            return None

        return Package(obs_name, versions)
Example #2
0
    def get_package_real(self, obs_name, upstream_name):
        m = Message()
        f = Fetcher()

        base = "%s/%s" % (self.base_url, upstream_name)
        url = "%s/cache.json" % base
        fp = f.get(url)
        j = json.load(fp)

        stable = []
        unstable = []

        if upstream_name in PackageSource.no_odd_even_rule:
            versions = j[2][upstream_name]
        else:
            for version in j[2][upstream_name]:
                v = version.split(".")
                major = int(v[0])
                minor = int(v[1])

                if obs_name in self.gnome2 and (major != 2 or minor >= 90):
                    continue

                if minor % 2 == 0:
                    stable.append(version)
                else:
                    unstable.append(version)

            versions = stable

        if len(versions) == 0:
            m.warn("could not parse json data: %s" % j[2])
            return None

        return Package(obs_name, versions)
Example #3
0
    def expand_subdirs(self, url, glob_char="*"):
        """ Expand glob_char in the given URL with the latest dir at that level
            Example URL: http://www.example.com/foo/*/

            The globbing char needs to be enclosed by slashes like "/*/".
        """
        f = Fetcher()

        glob_pattern = "/%s/" % glob_char
        glob_pos = url.find(glob_pattern)

        # url until first slash before glob_char
        url_prefix = url[0:glob_pos + 1]

        # everything after the slash after glob_char
        url_suffix = url[glob_pos + len(glob_pattern):]

        if url_prefix != "":
            dir_listing = f.get(url_prefix).read()
            if not dir_listing:
                return url
            subdirs = []
            regex = url.startswith("ftp://") and \
                    Index.__text_regex or Index.__html_regex
            for match in regex.finditer(dir_listing):
                subdir = match.group(1)
                if subdir not in (".", ".."):
                    subdirs.append(subdir)
            if not subdirs:
                return url
            latest = upstream_max(subdirs)

            url = "%s%s/%s" % (url_prefix, latest, url_suffix)
            return self.expand_subdirs(url, glob_char)
        return url
Example #4
0
    def expand_subdirs(self, url, glob_char="*"):
        """ Expand glob_char in the given URL with the latest dir at that level
            Example URL: http://www.example.com/foo/*/

            The globbing char needs to be enclosed by slashes like "/*/".
        """
        f = Fetcher()

        glob_pattern = "/%s/" % glob_char
        glob_pos = url.find(glob_pattern)

        # url until first slash before glob_char
        url_prefix = url[0:glob_pos+1]

        # everything after the slash after glob_char
        url_suffix = url[glob_pos+len(glob_pattern):]

        if url_prefix != "":
            dir_listing = f.get(url_prefix).read()
            if not dir_listing:
                return url
            subdirs = []
            regex = url.startswith("ftp://") and \
                    Index.__text_regex or Index.__html_regex
            for match in regex.finditer(dir_listing):
                subdir = match.group(1)
                if subdir not in (".", ".."):
                    subdirs.append(subdir)
            if not subdirs:
                return url
            latest = upstream_max(subdirs)

            url = "%s%s/%s" % (url_prefix, latest, url_suffix)
            return self.expand_subdirs(url, glob_char)
        return url
Example #5
0
    def parse_repo_data(self):
        f = Fetcher()

        url = self.get_repomd_path()
        md = f.get(url)
        tree = etree.ElementTree()
        tree.parse(md)
        ns = "http://linux.duke.edu/metadata/repo"
        configs = tree.findall('{%s}data' %ns)
        for c in configs:
            if c.attrib.get("type") == "primary":
                loc = c.find('{%s}location' %ns)
                dbfile = loc.attrib.get("href")
                fpgz = f.get("%s/%s" % (self.url, dbfile))
                local = open("primary.xml.gz", "w")
                local.write(fpgz.read())
                local.close()
                root = etree.parse("primary.xml.gz")
                ns2 = "http://linux.duke.edu/metadata/common"
                for s in root.findall('{%s}package' % ns2):
                    arch = s.find('{%s}arch' % ns2).text
                    if arch == 'src':
                        name = s.find('{%s}name' % ns2)
                        version = s.find('{%s}version' % ns2)
                        ver = version.attrib.get("ver")
                        self.packages[name.text] = ver
Example #6
0
    def get_package_real(self, obs_name, upstream_name):
        f = Fetcher()

        regex = self.urldb[obs_name][0]
        url = self.urldb[obs_name][1]

        url = self.expand_subdirs(url)
        page = f.get(url)
        upstream_versions = re.findall(regex, page.read())
        if not upstream_versions:
            m.warn("could not parse upstream versions for %s" % obs_name)
            return None
        return Package(obs_name, upstream_versions)
Example #7
0
    def get_package_real(self, obs_name, upstream_name):
        f = Fetcher()

        regex = self.urldb[obs_name][0]
        url = self.urldb[obs_name][1]

        url = self.expand_subdirs(url)
        page = f.get(url)
        upstream_versions = re.findall(regex, page.read())
        if not upstream_versions:
            m.warn("could not parse upstream versions for %s" % obs_name)
            return None
        return Package(obs_name, upstream_versions)
Example #8
0
    def parse_repo_data(self):
        # Simple, yet effective caching...
        if os.path.exists("./primary.xml.gz"):
            self.parse_packages("primary.xml.gz")
            return

        f = Fetcher()
        url = self.get_repomd_path()
        md = f.get(url)
        tree = etree.ElementTree()
        tree.parse(md)
        ns = "http://linux.duke.edu/metadata/repo"
        configs = tree.findall('{%s}data' % ns)
        for c in configs:
            if c.attrib.get("type") == "primary":
                loc = c.find('{%s}location' % ns)
                dbfile = loc.attrib.get("href")
                fpgz = f.get("%s/%s" % (self.url, dbfile))
                local = open("primary.xml.gz", "w")
                local.write(fpgz.read())
                local.close()
                self.parse_packages("primary.xml.gz")
Example #9
0
    def parse_repo_data(self):
        # Simple, yet effective caching... 
        if os.path.exists("./primary.xml.gz"):
            self.parse_packages("primary.xml.gz")
            return

        f = Fetcher()
        url = self.get_repomd_path()
        md = f.get(url)
        tree = etree.ElementTree()
        tree.parse(md)
        ns = "http://linux.duke.edu/metadata/repo"
        configs = tree.findall('{%s}data' %ns)
        for c in configs:
            if c.attrib.get("type") == "primary":
                loc = c.find('{%s}location' %ns)
                dbfile = loc.attrib.get("href")
                fpgz = f.get("%s/%s" % (self.url, dbfile))
                local = open("primary.xml.gz", "w")
                local.write(fpgz.read())
                local.close()
                self.parse_packages("primary.xml.gz")