Example #1
0
def test_update_download(mock_download_driver: mock.Mock):
    """TODO: mock HTTP requests in this test"""
    name = "hello world"
    mock_website = mock.Mock()
    mock_website.get_maximum_episode = mock.Mock(
        return_value=(
            4,
            [
                Episode(episode=3, download="magnet:mm", title="t 720p", name=name),
                Episode(episode=4, download="magnet:4", title="t 1080p", name=name),
            ],
        )
    )

    Bangumi(name=name, subtitle_group="", keyword=name, cover="").save()
    Followed(bangumi_name=name, episode=2).save()

    with mock.patch("bgmi.lib.controllers.website", mock_website):
        update([], download=True, not_ignore=False)

    mock_download_driver.add_download.assert_has_calls(
        [
            mock.call(url="magnet:mm", save_path=os.path.join(SAVE_PATH, name, "3")),
            mock.call(url="magnet:4", save_path=os.path.join(SAVE_PATH, name, "4")),
        ]
    )
Example #2
0
File: mikan.py Project: scjtqs/BGmi
 def search_by_keyword(self, keyword, count=None):
     result = []
     r = get_text(server_root + "Home/Search",
                  params={"searchstr": keyword})
     s = BeautifulSoup(r, "html.parser")
     td_list = s.find_all("tr", attrs={"class": "js-search-results-row"})
     for tr in td_list:
         title = tr.find("a", class_="magnet-link-wrap").text
         time_string = tr.find_all("td")[2].string
         result.append(
             Episode(
                 **{
                     "download":
                     tr.find("a", class_="magnet-link").attrs.get(
                         "data-clipboard-text", ""),
                     "name":
                     keyword,
                     "title":
                     title,
                     "episode":
                     self.parse_episode(title),
                     "time":
                     int(
                         time.mktime(
                             time.strptime(time_string, "%Y/%m/%d %H:%M"))),
                 }))
     return result
Example #3
0
def test_download_torrent(call: mock.Mock):
    call.return_value = {"result": "rr"}

    DelugeRPC(
        download_obj=Episode(name="n", title="t", download="d.torrent"),
        save_path="save_path_1",
    ).download()

    call.assert_has_calls([
        mock.call("auth.login", [_token]),
        mock.call("web.download_torrent_from_url", ["d.torrent"]),
        mock.call(
            "web.add_torrents",
            [[{
                "path": "rr",
                "options": {
                    "add_paused": False,
                    "compact_allocation": False,
                    "move_completed": False,
                    "download_location": "save_path_1",
                    "max_connections": -1,
                    "max_download_speed": -1,
                    "max_upload_slots": -1,
                    "max_upload_speed": -1,
                },
            }]],
        ),
    ])
Example #4
0
def test_search_with_filter(mock_download_driver: mock.Mock):
    mock_website = mock.Mock()
    mock_website.search_by_keyword = mock.Mock(
        return_value=[
            Episode(episode=3, download="magnet:mm", title="t 720p", name="海贼王"),
            Episode(episode=4, download="magnet:4", title="t 1080p", name="海贼王"),
        ]
    )

    with mock.patch("bgmi.lib.controllers.website", mock_website):
        main("search 海贼王 --download --regex .*720.*".split())

    mock_website.search_by_keyword.assert_called_once_with("海贼王", count=MAX_PAGE)

    mock_download_driver.add_download.assert_called_once_with(
        url="magnet:mm", save_path=os.path.join(SAVE_PATH, "海贼王", "3")
    )
Example #5
0
def test_download(client_mock):
    add_torrent = mock.Mock()
    client_mock.return_value.add_torrent = add_torrent

    TransmissionRPC(
        download_obj=Episode(name="n", title="t", download="d"),
        save_path="save_path",
    ).download()

    client_mock.assert_called_once()
    add_torrent.assert_called_with("d", download_dir="save_path")
Example #6
0
def test_download():
    with mock.patch("xmlrpc.client.ServerProxy") as m1:
        addUri = mock.Mock()
        m1.return_value.aria2.addUri = addUri
        m1.return_value.aria2.getVersion.return_value = {"version": "1.19.1"}

        Aria2DownloadRPC(
            download_obj=Episode(name="n", title="t", download="d"),
            save_path="save_path",
        ).download()

        addUri.assert_called_with(_token, ["d"], {"dir": "save_path"})
Example #7
0
def test_remove_dupe():
    e = Episode.remove_duplicated_bangumi([
        Episode(name="1", title="1", download="1", episode=1),
        Episode(name="1", title="1", download="1", episode=1),
        Episode(name="2", title="2", download="2", episode=2),
        Episode(name="2", title="2", download="2", episode=2),
        Episode(name="3", title="3", download="3", episode=3),
        Episode(name="5", title="5", download="5", episode=5),
    ])
    assert len(e) == 4, e
    assert {x.episode for x in e} == {1, 2, 3, 5}
Example #8
0
def test_include():
    e = Filter(include="2,3,5").apply_on_episodes([
        Episode(name="1", title="1", download="1", episode=1),
        Episode(name="1", title="1", download="2", episode=1),
        Episode(name="2", title="2", download="3", episode=2),
        Episode(name="2", title="2", download="4", episode=2),
        Episode(name="3", title="3", download="5", episode=3),
        Episode(name="5", title="5", download="6", episode=5),
    ])
    assert len(e) == 4, e
    assert {x.download for x in e} == set("3456")
Example #9
0
def test_exclude():
    e = Filter(exclude="2,3,5").apply_on_episodes([
        Episode(title="1", download="1", episode=1),
        Episode(title="1", download="2", episode=2),
        Episode(title="2", download="3", episode=1),
        Episode(title="2", download="4", episode=2),
        Episode(title="3", download="5", episode=3),
        Episode(title="5", download="6", episode=5),
    ])
    assert len(e) == 2, e
    assert {x.download for x in e} == {"1", "2"}
Example #10
0
    def fetch_episode_of_bangumi(
        self,
        bangumi_id: str,
        max_page: int,
        subtitle_list: Optional[List[str]] = None,
    ) -> List[Episode]:
        response_data = []
        ret = []
        if subtitle_list:
            for subtitle_id in subtitle_list:
                data = {
                    "tag_id": [bangumi_id, subtitle_id, BANGUMI_TAG]
                }  # type: Dict[str, Any]
                response = get_response(DETAIL_URL, "POST", json=data)
                response_data.extend(response["torrents"])
        else:
            for i in range(max_page):
                if max_page > 1:
                    print_info(f"Fetch page {i + 1} ...")
                data = {
                    "tag_id": [bangumi_id, BANGUMI_TAG],
                    "p": i + 1,
                }
                response = get_response(DETAIL_URL, "POST", json=data)
                if response:
                    response_data.extend(response["torrents"])
        for index, bangumi in enumerate(response_data):
            ret.append(
                Episode(
                    download=TORRENT_URL + bangumi["_id"] +
                    "/download.torrent",
                    subtitle_group=bangumi["team_id"],
                    title=bangumi["title"],
                    episode=self.parse_episode(bangumi["title"]),
                    time=int(
                        datetime.datetime.strptime(
                            bangumi["publish_time"].split(".")[0],
                            "%Y-%m-%dT%H:%M:%S").timestamp()),
                ))

            if os.environ.get("DEBUG"):
                print(ret[index].download)

        return ret
Example #11
0
def test_episode_regex():
    e = episode_filter_regex(
        [
            Episode(name="1", title="720", download="1", episode=1),
            Episode(name="2", title="1080", download="1", episode=1),
            Episode(name="2", title="23", download="2", episode=2),
            Episode(name="1", title="17202", download="2", episode=2),
            Episode(name="3", title="..71..", download="3", episode=3),
            Episode(name="5", title="no", download="5", episode=5),
        ],
        ".*720.*",
    )
    assert len(e) == 2, e
    assert {x.name for x in e} == {"1"}
Example #12
0
    def search_by_keyword(self,
                          keyword: str,
                          count: Optional[int] = None) -> list:
        if not count:
            count = 3

        rows = []
        result = []

        for i in range(count):
            data = get_response(SEARCH_URL,
                                "POST",
                                json={
                                    "query": keyword,
                                    "p": i + 1
                                })
            if "torrents" not in data:
                print_warning("No torrents in response data, please re-run")
                return []
            rows.extend(data["torrents"])

        for info in rows:
            result.append(
                Episode(
                    download=TORRENT_URL + info["_id"] + "/download.torrent",
                    name=keyword,
                    subtitle_group=info["team_id"],
                    title=info["title"],
                    episode=self.parse_episode(info["title"]),
                    time=int(
                        time.mktime(
                            datetime.datetime.strptime(
                                info["publish_time"].split(".")[0],
                                "%Y-%m-%dT%H:%M:%S").timetuple())),
                ))

        # Avoid bangumi collection.
        # It's ok but it will waste your traffic and bandwidth.
        result = result[::-1]
        return result
Example #13
0
def test_download_magnet(call):
    DelugeRPC(
        download_obj=Episode(name="n", title="t", download="magnet://233"),
        save_path="save_path_1",
    ).download()

    call.assert_called_with(
        "web.add_torrents",
        [[{
            "path": "magnet://233",
            "options": {
                "add_paused": False,
                "compact_allocation": False,
                "move_completed": False,
                "download_location": "save_path_1",
                "max_connections": -1,
                "max_download_speed": -1,
                "max_upload_slots": -1,
                "max_upload_speed": -1,
            },
        }]],
    )
Example #14
0
    def fetch_episode_of_bangumi(
        self, bangumi_id, max_page=MAX_PAGE, subtitle_list=None
    ):
        """
        get all episode by bangumi id
        example
        ```
            [
                {
                    "download": "magnet:?xt=urn:btih:e43b3b6b53dd9fd6af1199e112d3c7ff15",
                    "subtitle_group": "58a9c1c9f5dc363606ab42ec",
                    "title": "【喵萌奶茶屋】★七月新番★[来自深渊/Made in Abyss][07][GB][720P]",
                    "episode": 0,
                    "time": 1503301292
                },
            ]
        ```

        :param bangumi_id: bangumi_id
        :param subtitle_list: list of subtitle group
        :type subtitle_list: list
        :param max_page: how many page you want to crawl if there is no subtitle list
        :type max_page: int
        :return: list of bangumi
        :rtype: list[dict]
        """
        result = []
        keyword = bangumi_id
        search_url = base_url + "/topics/list/"
        for i in range(max_page):

            url = search_url + "?keyword=" + keyword + "&page=" + str(i + 1)

            if os.environ.get("DEBUG", False):  # pragma: no cover
                print(url)

            r = fetch_url(url)
            bs = BeautifulSoup(r, "html.parser")

            table = bs.find("table", {"id": "topic_list"})
            if table is None:
                break
            tr_list = table.tbody.find_all("tr", {"class": ""})
            for tr in tr_list:

                td_list = tr.find_all("td")

                if td_list[1].a["class"][0] != "sort-2":
                    continue

                time_string = td_list[0].span.string
                name = keyword
                title = td_list[2].find("a", {"target": "_blank"}).get_text(strip=True)
                download = td_list[3].a["href"]
                episode = self.parse_episode(title)
                time = int(Time.mktime(Time.strptime(time_string, "%Y/%m/%d %H:%M")))
                subtitle_group = ""

                tag_list = td_list[2].find_all("span", {"class": "tag"})

                for tag in tag_list:

                    href = tag.a.get("href")
                    if href is None:
                        continue

                    team_id_raw = re.findall(r"team_id\/(.*)$", href)
                    if len(team_id_raw) == 0:
                        continue
                    subtitle_group = team_id_raw[0]

                if subtitle_list:
                    if subtitle_group not in subtitle_list:
                        continue

                if os.environ.get("DEBUG", False):  # pragma: no cover
                    print(name, title, subtitle_group, download, episode, time)

                result.append(
                    Episode(
                        title=title,
                        subtitle_group=subtitle_group,
                        download=download,
                        episode=episode,
                        time=time,
                    )
                )

        return result
Example #15
0
    def search_by_keyword(self, keyword, count=None):
        """
        return a list of dict with at least 4 key: download, name, title, episode
        example:
        ```
            [
                {
                    'name':"路人女主的养成方法",
                    'download': 'magnet:?xt=urn:btih:what ever',
                    'title': "[澄空学园] 路人女主的养成方法 第12话 MP4 720p  完",
                    'episode': 12
                },
            ]
        ```
        :param keyword: search key word
        :type keyword: str
        :param count: how many page to fetch from website
        :type count: int

        :return: list of episode search result
        :rtype: list[dict]
        """
        if count is None:
            count = 3

        result = []
        search_url = base_url + "/topics/list/"
        for i in range(count):

            params = {"keyword": keyword, "page": i + 1}

            if os.environ.get("DEBUG", False):  # pragma: no cover
                print(search_url, params)

            r = fetch_url(search_url, params=params)
            bs = BeautifulSoup(r, "html.parser")

            table = bs.find("table", {"id": "topic_list"})
            if table is None:
                break
            tr_list = table.tbody.find_all("tr", {"class": ""})
            for tr in tr_list:

                td_list = tr.find_all("td")

                if td_list[1].a["class"][0] != "sort-2":
                    continue

                time_string = td_list[0].span.string
                name = keyword
                title = td_list[2].find("a", {"target": "_blank"}).get_text(strip=True)
                download = td_list[3].a["href"]
                episode = self.parse_episode(title)
                time = int(Time.mktime(Time.strptime(time_string, "%Y/%m/%d %H:%M")))

                result.append(
                    Episode(
                        name=name,
                        title=title,
                        download=download,
                        episode=episode,
                        time=time,
                    )
                )

        return result
Example #16
0
File: mikan.py Project: scjtqs/BGmi
def parse_episodes(content, bangumi_id, subtitle_list=None) -> List[Episode]:
    result = []
    soup = BeautifulSoup(content, "html.parser")
    container = soup.find("div", class_="central-container")  # type:bs4.Tag
    episode_container_list = {}
    expand_subtitle_map = {}
    for tag in container.contents:
        if not hasattr(tag, "attrs"):
            continue

        class_names = tag.attrs.get("class")
        if class_names is not None and "episode-expand" in class_names:
            expand_subtitle_map[tag.attrs.get("data-subtitlegroupid",
                                              None)] = True

        subtitle_id = tag.attrs.get("id", False)
        if subtitle_list:
            if subtitle_id in subtitle_list:
                episode_container_list[tag.attrs.get(
                    "id", None)] = tag.find_next_sibling("table")
        else:
            if subtitle_id:
                episode_container_list[tag.attrs.get(
                    "id", None)] = tag.find_next_sibling("table")

    for subtitle_id, container in episode_container_list.items():
        _container = container
        if subtitle_id in expand_subtitle_map.keys():
            expand_r = requests.get(
                bangumi_episode_expand_api,
                params={
                    "bangumiId": bangumi_id,
                    "subtitleGroupId": subtitle_id,
                    "take": 200,
                },
            ).text
            expand_soup = BeautifulSoup(expand_r, "html.parser")
            _container = expand_soup.find("table")

        for tr in _container.find_all("tr")[1:]:
            title = tr.find("a", class_="magnet-link-wrap").text
            time_string = tr.find_all("td")[2].string
            result.append(
                Episode(
                    **{
                        "download":
                        server_root[:-1] +
                        tr.find_all("td")[-1].find("a").attrs.get("href", ""),
                        "subtitle_group":
                        str(subtitle_id),
                        "title":
                        title,
                        "episode":
                        parse_episode(title),
                        "time":
                        int(
                            time.mktime(
                                time.strptime(time_string, "%Y/%m/%d %H:%M"))),
                    }))

    return result
Example #17
0
def test_episode_exclude_word():
    assert Episode(title="a b c", download="").contains_any_words(["a"])
    assert Episode(title="A B c", download="").contains_any_words(["a", "b"])
    assert not Episode(title="a b c", download="").contains_any_words(
        ["d", "ab"])
Example #18
0
def test_init(call):
    DelugeRPC(
        download_obj=Episode(name="n", title="t", download="d"),
        save_path="save_path",
    )
    call.assert_called_with("auth.login", [_token])