Exemplo n.º 1
0
def sources():
    """Return an ordered dictionary of the sources as BuildsURL objects.
       Only return sources which are relevant for the system.
       The GUI will show the sources in the order defined here.
    """
    _sources = OrderedDict()
    if openelec.OS_RELEASE['NAME'] == "OpenELEC":
        builds_url = BuildsURL("http://snapshots.openelec.tv",
                            info_extractors=[CommitInfoExtractor()])
        _sources["Official Snapshot Builds"] = builds_url

        if arch.startswith("RPi"):
            builds_url = BuildsURL("http://resources.pichimney.com/OpenELEC/dev_builds",
                                   info_extractors=[CommitInfoExtractor()])
            _sources["Chris Swan RPi Builds"] = builds_url

        _sources["Official Releases"] = BuildsURL(
            "http://{dist}.mirrors.uk2.net".format(dist=openelec.dist()),
            extractor=OfficialReleaseLinkExtractor)

    _sources["Official Archive"] = BuildsURL(
        "http://archive.{dist}.tv".format(dist=openelec.dist()), extractor=ReleaseLinkExtractor)

    _sources["Milhouse Builds"] = MilhouseBuildsURL()

    if openelec.debug_system_partition():
        _sources["Milhouse Builds (debug)"] = MilhouseBuildsURL(subdir="debug")

    return _sources
Exemplo n.º 2
0
def get_milhouse_build_info_extractors():
    if openelec.dist() == "openelec":
        if arch.startswith("RPi"):
            threads = [224025, 231092, 250817]
        else:
            threads = [238393]
    elif openelec.dist() == "libreelec":
        if arch.startswith("RPi"):
            threads = [269814, 298461]
        else:
            threads = [269815, 298462]

    for thread_id in threads:
        yield MilhouseBuildInfoExtractor.from_thread_id(thread_id)
Exemplo n.º 3
0
    def __iter__(self):
        html = self._text()
        args = ['a']
        if self.CSS_CLASS is not None:
            args.append(self.CSS_CLASS)

        self.build_re = re.compile(self.BUILD_RE.format(dist=openelec.dist(), arch=arch), re.I)

        soup = BeautifulSoup(html, 'html.parser',
                             parse_only=SoupStrainer(*args, href=self.build_re))

        for link in soup.contents:
            l = self._create_link(link)
            if l:
                yield l
Exemplo n.º 4
0
 def maybe_get_tags(cls):
     if cls.tags is None:
         cls.tags = {}
         releases_url = "http://github.com/{dist}/{dist}.tv/tags".format(
                             dist=openelec.dist())
         html = requests.get(releases_url).text
         while True:
             cls.tags.update(cls.get_tags_page_dict(html))
             soup = BeautifulSoup(html, 'html.parser',
                                  parse_only=SoupStrainer(cls.pagination_match))
             next_page_link = soup.find('a', text='Next')
             if next_page_link:
                 href = next_page_link['href']
                 version = [int(p) for p in href.split('=')[-1].split('.')]
                 if version < cls.MIN_VERSION:
                     break
                 html = requests.get(href).text
             else:
                 break
Exemplo n.º 5
0
 def __init__(self, subdir="master"):
     self.subdir = subdir
     url = "http://milhouse.{dist}.tv/builds/".format(dist=openelec.dist().lower())
     super(MilhouseBuildsURL, self).__init__(
         url, os.path.join(subdir, arch.split('.')[0]),
         MilhouseBuildLinkExtractor, list(get_milhouse_build_info_extractors()))
Exemplo n.º 6
0
class OfficialReleaseLinkExtractor(ReleaseLinkExtractor):
    BASE_URL = "http://releases.{dist}.tv".format(dist=openelec.dist())