Exemplo n.º 1
0
    def get_download_url(self):
        # fetch and return dict
        # ignore they are not same bangumi.
        resp = [
            {
                "title":
                "[c.c动漫][4月新番][影之诗][ShadowVerse][01][简日][HEVC][1080P][MP4]",
                "link": "http://example.com/Bangumi/1/1.torrent",
            },
            {
                "title":
                "[YMDR][慕留人 -火影忍者新时代-][2017][2][AVC][JAP][BIG5][MP4][1080P]",
                "link": "http://example.com/Bangumi/1/2.torrent",
            },
            {
                "title": "[ZXSUB仲夏动漫字幕组][博人传-火影忍者次世代][03][720P繁体][MP4]",
                "link": "magnet:?xt=urn:btih:233",
            },
        ]

        ret = {}
        for item in resp:
            e = parse_episode(item["title"])
            if e:
                ret[e] = item["link"]

        return ret
 def post_fetch(cls, target, data):
     for i in data:
         title = i['title']
         if i['episode'] != 2019:
             continue
         title = title.replace('2019', '')
         episode = parse_episode(title)
         i['episode'] = episode
     pass
Exemplo n.º 3
0
    def get_download_url(self):
        # fetch and return dict
        resp = requests.get('http://www.kirikiri.tv/?m=vod-play-id-4414-src-1-num-2.html').text
        data = re.findall("mac_url=unescape\('(.*)?'\)", resp)
        if not data:
            print_error('No data found, maybe the script is out-of-date.', exit_=False)
            return {}

        data = unquote(json.loads('["{}"]'.format(data[0].replace('%u', '\\u')))[0])

        ret = {}
        for i in data.split('#'):
            title, url = i.split('$')
            ret[parse_episode(title)] = url

        return ret
Exemplo n.º 4
0
    def get_download_url(self):
        # fetch and return dict
        resp = requests.get(
            'http://www.kirikiri.tv/?m=vod-play-id-4414-src-1-num-2.html').text
        data = re.findall("mac_url=unescape\('(.*)?'\)", resp)
        if not data:
            print_error('No data found, maybe the script is out-of-date.',
                        exit_=False)
            return {}

        data = unquote(
            json.loads('["{}"]'.format(data[0].replace('%u', '\\u')))[0])

        ret = {}
        for i in data.split('#'):
            title, url = i.split('$')
            ret[parse_episode(title)] = url

        return ret
 def post_fetch(cls, target, data):
     for i in data:
         title = i['title']
         if i['episode'] != 0:
             continue
         matchs = re.findall(r'([\d]+)', title)
         if not matchs:
             continue
         for match in matchs:
             index = title.index(match)
             if index == 0:
                 continue
             title_list = list(title)
             title_list.insert(index, ' ')
             title = ''.join(title_list)
             pass
         pass
         episode = parse_episode(title)
         i['episode'] = episode
     pass
Exemplo n.º 6
0
    def get_download_url(self) -> Dict[int, str]:
        # fetch and return dict
        resp = [
            {
                "title":
                "[YMDR][慕留人 -火影忍者新时代-][2017][2][AVC][JAP][BIG5][MP4][1080P]",
                "link": "http://example.com/Bangumi/1/2.torrent",
            },
            {
                "title": "[ZXSUB仲夏动漫字幕组][博人传-火影忍者次世代][03][720P繁体][MP4]",
                "link": "magnet:?xt=urn:btih:233",
            },
        ]

        ret = {}
        for item in resp:
            e = parse_episode(item["title"])
            if e:
                ret[e] = item["link"]

        return ret
Exemplo n.º 7
0
Arquivo: mikan.py Projeto: scjtqs/BGmi
def parse_episodes(content, bangumi_id, subtitle_list=None) -> List[Episode]:
    result = []
    soup = BeautifulSoup(content, "html.parser")
    container = soup.find("div", class_="central-container")  # type:bs4.Tag
    episode_container_list = {}
    expand_subtitle_map = {}
    for tag in container.contents:
        if not hasattr(tag, "attrs"):
            continue

        class_names = tag.attrs.get("class")
        if class_names is not None and "episode-expand" in class_names:
            expand_subtitle_map[tag.attrs.get("data-subtitlegroupid",
                                              None)] = True

        subtitle_id = tag.attrs.get("id", False)
        if subtitle_list:
            if subtitle_id in subtitle_list:
                episode_container_list[tag.attrs.get(
                    "id", None)] = tag.find_next_sibling("table")
        else:
            if subtitle_id:
                episode_container_list[tag.attrs.get(
                    "id", None)] = tag.find_next_sibling("table")

    for subtitle_id, container in episode_container_list.items():
        _container = container
        if subtitle_id in expand_subtitle_map.keys():
            expand_r = requests.get(
                bangumi_episode_expand_api,
                params={
                    "bangumiId": bangumi_id,
                    "subtitleGroupId": subtitle_id,
                    "take": 200,
                },
            ).text
            expand_soup = BeautifulSoup(expand_r, "html.parser")
            _container = expand_soup.find("table")

        for tr in _container.find_all("tr")[1:]:
            title = tr.find("a", class_="magnet-link-wrap").text
            time_string = tr.find_all("td")[2].string
            result.append(
                Episode(
                    **{
                        "download":
                        server_root[:-1] +
                        tr.find_all("td")[-1].find("a").attrs.get("href", ""),
                        "subtitle_group":
                        str(subtitle_id),
                        "title":
                        title,
                        "episode":
                        parse_episode(title),
                        "time":
                        int(
                            time.mktime(
                                time.strptime(time_string, "%Y/%m/%d %H:%M"))),
                    }))

    return result
Exemplo n.º 8
0
def test_episode_parse(title, episode):
    assert (
        parse_episode(title) == episode
    ), f"\ntitle: {title!r}\nepisode: {episode}\nparsed episode: {parse_episode(title)}"
Exemplo n.º 9
0
 def test_print_config(self):
     title = (
         "[YMDR][哥布林殺手][Goblin Slayer][2018][01][1080p][AVC][JAP][BIG5][MP4-AAC][繁中]"
     )
     self.assertEqual(1, parse_episode(title))
Exemplo n.º 10
0
def test_print_config():
    title = "[YMDR][哥布林殺手][Goblin Slayer][2018][01][1080p][AVC][JAP][BIG5][MP4-AAC][繁中]"
    episode = parse_episode(title)
    assert episode == 1, episode