Пример #1
0
 def test_defaults(self):
     default = torrent.Torrent("name", "tracker")
     expected = torrent.Torrent(
         "name",
         "tracker",
         torrent=None,
         magnet=None,
         size="Unknown",
         seeds=-1,
         leeches=None,
         date=None,
     )
     assert default == expected
Пример #2
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:  # pylint: disable=too-many-locals
        """Parse result page.

        Args:
          raw: Raw HTML results to parse.

        Returns:
            List of torrent names with magnet links and URLs.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        try:
            contents = soup.select("table")[5].select("tr")[1:]
            for content in contents:
                data = content.select("td")
                name = str(data[1].a.string)
                tracker = self.__class__.__name__

                url_stub = data[2].a.get("href")
                url = f"http://bt.etree.org/{url_stub}"

                size = str(data[6].string)
                date = str(data[5].string)
                seeds = parse.number(data[8].a.string)
                leeches = parse.number(data[9].a.string)

                yield torrent.Torrent(
                    name, tracker, torrent=url, size=size, date=date, seeds=seeds, leeches=leeches
                )
        except IndexError:
            yield from []
Пример #3
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:  # pylint: disable=too-many-locals
        """Parse result page.

        Args:
          raw: Raw HTML results page to parse.

        Returns:
            List of torrent names with magnet links.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        content = soup.find_all("table", {"class": "lista", "width": "100%"})
        for torrent_ in content[3].select("td.lista"):
            try:
                name = str(torrent_.font.a.string)
                tracker = self.__class__.__name__

                links = torrent_.find_all("td", {"align": "right"})[0]
                magnet = links.find_all("a")[0]["href"]
                stub = links.find_all("a")[1]["href"]
                url = f"http://linuxtracker.org/{stub}"

                details = torrent_.find_all("tr")
                date = details[0].get_text().strip().split()[2]
                size = " ".join(details[1].get_text().split()[1:])
                raw_seeds = details[2].get_text().split()[1]
                seeds = parse.number(raw_seeds)
                raw_leeches = details[3].get_text().split()[1]
                leeches = parse.number(raw_leeches)

                yield torrent.Torrent(
                    name,
                    tracker,
                    torrent=url,
                    magnet=magnet,
                    date=date,
                    size=size,
                    seeds=seeds,
                    leeches=leeches,
                )
            except AttributeError:
                pass
Пример #4
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:  # pylint: disable=too-many-locals
        """Parse result page.

        Args:
          raw: Raw HTML results to parse.

        Returns:
            List of torrent names with magnet links and URLs.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        try:
            contents = soup.select("tr.default")
            for content in contents:
                data = content.select("td")
                # Sometimes title is preceded by comments, so choose the last <a>
                name = str(data[1].select("a")[-1].string)
                tracker = self.__class__.__name__

                links = data[2].select("a")
                url_stub = links[0].get("href")
                url = f"https://nyaa.si{url_stub}"
                magnet = links[1].get("href")

                size = str(data[3].string)
                date = str(data[4].string)
                seeds = parse.number(data[5].string)
                leeches = parse.number(data[6].string)

                yield torrent.Torrent(
                    name,
                    tracker,
                    magnet=magnet,
                    torrent=url,
                    size=size,
                    date=date,
                    seeds=seeds,
                    leeches=leeches,
                )
        except IndexError:
            yield from []
Пример #5
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:  # pylint: disable=too-many-locals
        """Parse result page.

        Args:
          raw: Raw HTML results to parse.

        Returns:
            List of torrent names with magnet links and URLs.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        try:
            contents = soup.select("div.results")[0].select("div.item-ia")[1:]
            for content in contents:
                name = str(content.select("div.ttl")[0].string.strip())
                tracker = self.__class__.__name__

                url_stub = content.get("data-id")
                url = f"https://archive.org/download/{url_stub}/{url_stub}_archive.torrent"

                yield torrent.Torrent(name, tracker, torrent=url)
        except IndexError:
            yield from []
Пример #6
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:
        """Parse result page.

        Args:
          raw: Raw HTML page.

        Returns:
            List of torrent names with URLs.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        try:
            content = soup.find("table", cellpadding="5").find_all("tr")[1:]
            for line in content:
                info = line.select("td.torrent")[1]
                name = str(info.a.string.lower())
                url_stub = info.a.get("href")
                url = f"https://distrowatch.com/{url_stub}"
                tracker = self.__class__.__name__
                date = str(line.select("td.torrentdate")[0].string)
                yield torrent.Torrent(name, tracker, torrent=url, date=date)
        except AttributeError:
            yield from ()
Пример #7
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:  # pylint: disable=too-many-locals
        """Parse result page.

        Args:
          raw: Raw HTML results to parse.

        Returns:
            List of torrent names with magnet links and URLs.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        try:
            contents = soup.select("tbody#torrentListResults tr")
            for content in contents:
                data = content.select("td")
                name = str(data[1].select("a")[0].string).strip()
                tracker = self.__class__.__name__

                links = data[2].select("a")
                magnet = links[0].get("href")
                url = links[1].get("href")

                size = str(data[3].string).strip()
                seeds = parse.number(data[4].text)
                leeches = parse.number(data[5].text)
                date = str(data[7].string)

                yield torrent.Torrent(
                    name,
                    tracker,
                    magnet=magnet,
                    torrent=url,
                    size=size,
                    seeds=seeds,
                    leeches=leeches,
                    date=date,
                )
        except IndexError:
            yield from []
Пример #8
0
    def _parse(self, raw: str) -> Iterator[torrent.Torrent]:  # pylint: disable=too-many-locals
        """Parse result page.

        Args:
          raw: Raw HTML results to parse.

        Returns:
            List of torrent names with magnet links and URLs.
        """
        soup = bs4.BeautifulSoup(raw, "lxml")
        contents = soup.select("tr.category_0")
        for content in contents:
            try:
                # Even lines
                torrent_ = content.select(".desc-top")[0]
                links = torrent_.select("a")
                magnet = links[0].get("href")
                url = links[1].get("href")
                name = TokyoTosho._parse_name(links[1].contents)
                tracker = self.__class__.__name__
                result = torrent.Torrent(name,
                                         tracker,
                                         magnet=magnet,
                                         torrent=url)
            except IndexError:
                # Odd lines
                details = content.select("td.desc-bot")[0].get_text().split(
                    "|")
                result.size = details[1].split(":")[1].strip()
                result.date = details[2].split()[1]

                stats = content.select("td.stats")[0].select("span")
                raw_seeds = stats[0].string
                result.seeds = parse.number(raw_seeds)
                raw_leeches = stats[1].string
                result.leeches = parse.number(raw_leeches)
                yield result
Пример #9
0
 def torrents(self):
     return [
         torrent.Torrent("name", "tracker", date="today", seeds=1, leeches=10),
         torrent.Torrent("name", "tracker", date="2018-01-01", seeds=2, leeches=10),
     ]
Пример #10
0
 def torrent1(self, request):
     return torrent.Torrent("name", "tracker", seeds=request.param)
Пример #11
0
 def torrent2(self):
     return torrent.Torrent("name", "tracker", seeds=20)