def __init__(self,
              url,
              start_episode,
              end_episode,
              session,
              gui=None,
              resolution="720",
              is_dub=False):
     super().__init__(url, start_episode, end_episode, session, gui)
     self.is_dub = is_dub
     self.resolution = resolution
     self.base_url = "https://www1.animeultima.to"
     self.extractor = JWPlayerExtractor(None, None)
Exemple #2
0
    def __init__(self,
                 url,
                 start_episode,
                 end_episode,
                 session,
                 gui=None,
                 resolution="480"):
        super().__init__(url, start_episode, end_episode, session, gui)
        self.resolution = resolution
        self.extractor = JWPlayerExtractor(None, self.session)
        self.anime_id = None
        self.api_link_bases = [
            'https://ajax.gogocdn.net/ajax/load-list-episode',
            'https://ajax.apimovie.xyz/ajax/load-list-episode'
        ]

        self.__set_anime_id()
Exemple #3
0
    def __init__(self,
                 url,
                 start_episode,
                 end_episode,
                 session,
                 gui=None,
                 resolution="720",
                 is_dub=False):
        super().__init__(url, start_episode, end_episode, session, gui)
        self.resolution = resolution
        self.is_dub = is_dub
        url_data = re.search("(.*)/shows/(.*)", self.url)
        self.url_base = url_data.group(1)
        self.slug = url_data.group(2).split("/")[0]
        self.extractor = JWPlayerExtractor(None, None)

        self.anime_id = None
        self.__set_anime_id()
Exemple #4
0
class JsUnpacker:
    def __init__(self):
        self.jwp_extractor = JWPlayerExtractor(None, None)

    def eval(self, func):
        val = js.beautify(func)
        return val

    def extract_link(self, func):
        src = ""
        data = self.eval(func)
        # print(data)
        if "jwplayer" in data:
            print("jwplayer source will be returned")
            links = self.jwp_extractor.extract_sources(data)
            if links is not None and len(links) > 0:
                src = links[0]
            else:
                print("no sources found")
                return None

        else:
            print("Any anchor href will be returned")
            anch = BeautifulSoup(data, "html.parser").find("a")
            if anch is not None:
                src = anch['href'].replace('\"',
                                           '').replace('\'',
                                                       '').replace('\\', '')
            else:
                print("No anchor links found")
                return None

        # print(src)
        return src


# if __name__ == "__main__":
#     fun = '''
#     eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('3 9="5://1d.r.2/i.o.2/f/8/e.j?u=y%v.F.z.2&A=B&C=D%x%E%G%H%I%J%K%L%M%w%k%l%m%a%a";3 c="5://p-q-h.i.2/s/f/8/e.j/O.15";3 4=[{b:9},{b:c}];!6 t(){17{!6 t(n){1===(""+n/n).N&&n%19!=0||6(){}.1a("1b")(),t(++n)}(0)}1c(n){1e(t,1f)}}();3 7=1g(\'7\');7.1h({1i:\'16 14 (8)<P /><g 13="12-11:10;">Z 1</g>\',Y:\'d%\',X:\'d%\',W:\'5://V.U.T/h-S/R.Q\',18:{},4:4,});',62,81,'||com|var|sources|https|function|player|dub|fone|3D|file|ftwo|100|1_1807|116|span|vod|auengine|mp4|2Fo|2B1m9o4E21cFRaGmn6sqip5a0cGoab1lPjNlUB7s07TRdZ|2BVFXLfoXNCRLQ||appspot|s1|na|googleapis|hls2||GoogleAccessId|40auengine|2Bdb6DVVQ7nTjJ9jgqnmJmEHSdEmv6019e0YBwh|2BsspInuB|auevod|gserviceaccount|Expires|1586640060|Signature|SO9JeiA|2FZljXmaCZFHiwn1miyMU|iam|2FYBkDGoICGeUlaWqpe20SqAHBnillgfl03rc|2FxeI3MoCQn4Eps3gaxHdgp7GjXPIRWxHVAr1uMzuX5RiNvLGarTrndZ|2BxL9B0kpK2rgHf5BEQoQgwuAOhwltn30ikKP0nNYMQj64mErafXWkmvP90t8Qhom|2BUOhgjXpJcZqW9fOly|2BDqttGgU96b|2F0Z3u0m1rYGvtpYJ8M9QcBZM|2F|length|playlist|br|jpg|vs3b1Z2XacICszjz|thumbnails|tv|animeultima|cdn|image|height|width|Episode|12px|size|font|style|Piece|m3u8|One|try|cast|20|constructor|debugger|catch|storage|setTimeout|1e3|jwplayer|setup|title'.split('|'),0,{}))
#     '''
#
#     print(JsUnpacker().extract_link(fun))
class AnimeUltimaScraper(BaseScraper):
    def __init__(self,
                 url,
                 start_episode,
                 end_episode,
                 session,
                 gui=None,
                 resolution="720",
                 is_dub=False):
        super().__init__(url, start_episode, end_episode, session, gui)
        self.is_dub = is_dub
        self.resolution = resolution
        self.base_url = "https://www1.animeultima.to"
        self.extractor = JWPlayerExtractor(None, None)

    def get_anime_id(self):
        page = self.session.get(self.url).content
        soup_html = BeautifulSoup(page, "html.parser")

        # print(soup_html)

        button_with_id = soup_html.find("button", attrs={"class": "button"})

        if button_with_id:
            return button_with_id["data-id"]

        else:
            meta_tag = soup_html.find("meta", attrs={"property": "og:image"})
            if meta_tag:
                content_data = meta_tag["content"].split("/")
                return content_data[-2]

        return None

    def get_start_and_end_page(self, anime_id):
        # print("start end page")
        start_page = 0
        end_page = 0

        data = self.session.get(
            "https://www1.animeultima.to/api/episodeList?animeId=" +
            anime_id).json()

        # print("start end data")
        # print(data)
        last_page = data["last_page"]
        max_total_epis = last_page * 50

        if self.end_episode >= max_total_epis:
            start_page = 0
        elif (max_total_epis - self.end_episode) % 50 == 0:
            start_page = round((max_total_epis - self.end_episode) / 50) - 1
        else:
            start_page = round((max_total_epis - self.end_episode) / 50)

        if (max_total_epis - self.start_episode) % 50 == 0:
            end_page = round((max_total_epis - self.start_episode) / 50) - 1
        else:
            end_page = round((max_total_epis - self.start_episode) / 50)

        return start_page, end_page

    def get_page_url(self, url):
        # print("get page url")
        page = self.session.get(url).content

        soup_html = BeautifulSoup(page, "html.parser")
        iframe = soup_html.find("iframe")

        if iframe:
            return self.base_url + iframe["src"]

        return None

    def set_stream_url(self, episode):
        # print("set stream")
        self.extractor.url = episode.page_url
        stream_url = self.extractor.extract_stream_link(self.resolution)
        print("Stream URL : " + stream_url)
        episode.download_url = stream_url

    def set_direct_url(self, episode, page_url):
        page = self.session.get(page_url).text
        func = re.search("eval\(.*\)", page).group(0)
        eval_data = JsUnpacker().eval(func)
        link = re.search('fone\s+=\s+\"(.*)\"', eval_data).group(1)
        # print(link)
        episode.download_url = link

    def collect_episodes(self, anime_id, start_page, end_page):
        # print("collect epis")
        base_url = "https://www1.animeultima.to/api/episodeList?animeId=" + anime_id + "&page="
        page_counter = start_page

        while page_counter <= end_page:
            url = base_url + str(page_counter)

            data = self.session.get(url).json()
            # print("data")
            # print(data)

            has_dub = data["anime"]["hasDub"]
            epis = data["episodes"]

            for epi in epis:
                epi_no = int(epi["episode_num"])
                # print(str(epi_no))

                if epi_no < self.start_episode or epi_no > self.end_episode:
                    continue

                title = epi["title"]
                page_url = None
                if not self.is_dub:
                    # print("sub")
                    page_url = epi["urls"]["sub"]
                elif has_dub:
                    page_url = epi["urls"]["dub"]
                else:
                    print("Dubbed episodes not available")

                if page_url is not None:
                    page_url = self.get_page_url(page_url)

                episode = Episode(title, "Episode - " + str(epi_no))
                episode.page_url = page_url
                # print(episode.page_url)
                if "animeultima.to/e/" not in page_url:
                    episode.is_direct = False
                    self.set_stream_url(episode)
                else:
                    print(
                        "Only direct url found, will use default resolution to download"
                    )
                    self.set_direct_url(episode, page_url)

                self.episodes.append(episode)

                print("Episode -", str(epi_no), "-", title)

            page_counter += 1

    def get_direct_links(self):
        # print("direct links")
        anime_id = self.get_anime_id()
        # print("anime id :", anime_id)
        if anime_id is None:
            anime_id = self.get_anime_id()
            if anime_id is None:
                anime_id = self.get_anime_id()

        try:
            # print(anime_id)
            start_page, end_page = self.get_start_and_end_page(anime_id)

            # print(start_page, end_page)
            self.collect_episodes(anime_id, start_page, end_page)

            return self.episodes
        except Exception as ex:
            trace = traceback.format_exc()
            print(trace)
            return None
Exemple #6
0
class AnimeFlixScraper(BaseScraper):
    def __init__(self,
                 url,
                 start_episode,
                 end_episode,
                 session,
                 gui=None,
                 resolution="720",
                 is_dub=False):
        super().__init__(url, start_episode, end_episode, session, gui)
        self.resolution = resolution
        self.is_dub = is_dub
        url_data = re.search("(.*)/shows/(.*)", self.url)
        self.url_base = url_data.group(1)
        self.slug = url_data.group(2).split("/")[0]
        self.extractor = JWPlayerExtractor(None, None)

        self.anime_id = None
        self.__set_anime_id()

    def __set_anime_id(self):
        api_url = "{base}/api/anime/detail?slug={slug}".format(
            base=self.url_base, slug=self.slug)
        data = self.session.get(api_url).json()
        self.anime_id = data["data"]["id"]

    def __get_start_end_page(self):
        limit = 50

        api_url = "{base}/api/episodes?anime_id={id}&limit={limit}".format(
            base=self.url_base, id=self.anime_id, limit=str(limit))
        data = self.session.get(api_url).json()

        last_page = data["meta"]["last_page"]

        start_page = ((self.start_episode - 1) // limit) + 1
        end_page = ((self.end_episode - 1) // limit) + 1

        if end_page > last_page:
            end_page = last_page

        return start_page, last_page, limit

    def __set_download_link(self, episode):
        api_url = "{base}/api/videos?episode_id={id}".format(
            base=self.url_base, id=str(episode.id))
        url_data = self.session.get(api_url).json()
        for src_data in url_data:
            if self.is_dub:
                if src_data["lang"] == "dub" and src_data["type"] != "hls":
                    episode.download_url = src_data["file"]
                    return
            else:
                if src_data["lang"] == "sub" and src_data[
                        "hardsub"] and src_data["type"] == "hls":
                    master = src_data["file"]
                    # print("master")
                    # print(master)
                    res_stream_link = self.extractor.get_resolution_link(
                        master, self.resolution)
                    episode.download_url = res_stream_link
                    episode.is_direct = False
                    return

    def __collect_episodes(self):
        if self.anime_id is None:
            return None

        episodes = []

        start_page, end_page, limit = self.__get_start_end_page()
        curr_page = start_page
        while curr_page <= end_page:
            api_url = "{base}/api/episodes?anime_id={id}&limit={limit}&page={page}".format(
                base=self.url_base,
                id=self.anime_id,
                limit=str(limit),
                page=str(curr_page))
            curr_page += 1

            api_data = self.session.get(api_url).json()
            for epi in api_data["data"]:
                epi_no = int(epi["episode_num"])

                if epi_no < self.start_episode or epi_no > self.end_episode:
                    continue

                if self.is_dub and epi["dub"] == 0:
                    print("No dubbed version for Episode - {epi}".format(
                        epi=str(epi_no)))
                    continue

                title = epi["title"]
                id = epi["id"]
                episode = Episode(title,
                                  "Episode - {epi}".format(epi=str(epi_no)))
                episode.id = id

                self.__set_download_link(episode)

                episodes.append(episode)

        return episodes

    def get_direct_links(self):
        try:
            episodes = self.__collect_episodes()
            return episodes
        except Exception as ex:
            trace = traceback.format_exc()
            print(trace)
            return None
Exemple #7
0
 def __init__(self):
     self.jwp_extractor = JWPlayerExtractor(None, None)
class AnimeUltimaScraper(BaseScraper):
    def __init__(self,
                 url,
                 start_episode,
                 end_episode,
                 session,
                 gui=None,
                 resolution="720",
                 is_dub=False):
        super().__init__(url, start_episode, end_episode, session, gui)
        self.is_dub = False
        self.resolution = resolution
        self.base_url = "https://www1.animeultima.to"
        self.extractor = JWPlayerExtractor(None, self.session)

    def get_anime_id(self):
        page = self.session.get(self.url).content
        soup_html = BeautifulSoup(page, "html.parser")

        # print(soup_html)

        button_with_id = soup_html.find("button", attrs={"class": "button"})

        if button_with_id:
            return button_with_id["data-id"]

        else:
            meta_tag = soup_html.find("meta", attrs={"property": "og:image"})
            if meta_tag:
                content_data = meta_tag["content"].split("/")
                return content_data[-2]

    def get_start_and_end_page(self, anime_id):
        # print("start end page")
        start_page = 0
        end_page = 0

        data = self.session.get(
            "https://www1.animeultima.to/api/episodeList?animeId=" +
            anime_id).json()

        last_page = data["last_page"]
        max_total_epis = last_page * 50

        if self.end_episode >= max_total_epis:
            start_page = 0
        elif (max_total_epis - self.end_episode) % 50 == 0:
            start_page = round((max_total_epis - self.end_episode) / 50) - 1
        else:
            start_page = round((max_total_epis - self.end_episode) / 50)

        if (max_total_epis - self.start_episode) % 50 == 0:
            end_page = round((max_total_epis - self.start_episode) / 50) - 1
        else:
            end_page = round((max_total_epis - self.start_episode) / 50)

        return start_page, end_page

    def get_page_url(self, url):
        # print("get page url")
        page = self.session.get(url).content

        soup_html = BeautifulSoup(page, "html.parser")
        iframe = soup_html.find("iframe")

        if iframe:
            return self.base_url + iframe["src"]

        return None

    def collect_episodes(self, anime_id, start_page, end_page):
        # print("collect epis")
        base_url = "https://www1.animeultima.to/api/episodeList?animeId=" + anime_id + "&page="
        page_counter = start_page

        while page_counter <= end_page:
            url = base_url + str(page_counter)

            data = self.session.get(url).json()
            has_dub = data["anime"]["hasDub"]
            epis = data["episodes"]

            for epi in epis:
                epi_no = int(epi["episode_num"])

                if epi_no < self.start_episode or epi_no > self.end_episode:
                    continue

                title = epi["title"]
                page_url = None
                if not self.is_dub:
                    page_url = epi["urls"]["sub"]
                elif has_dub:
                    page_url = epi["urls"]["dub"]
                else:
                    print("Dubbed episodes not available")

                if page_url:
                    page_url = self.get_page_url(page_url)

                episode = Episode(title, "Episode - " + str(epi_no))
                episode.page_url = page_url
                episode.is_direct = False
                self.set_stream_url(episode)

                self.episodes.append(episode)

                print("Episode -", str(epi_no), "-", title)

            page_counter += 1

    def set_stream_url(self, episode):
        # print("set stream")
        self.extractor.url = episode.page_url
        stream_url = self.extractor.extract_stream_link(self.resolution)
        print("Stream URL : " + stream_url)
        episode.download_url = stream_url

    def set_stream_urls(self):
        extractor = JWPlayerExtractor(None, self.session)
        for episode in self.episodes:
            extractor.url = episode.page_url
            stream_url = extractor.extract_stream_link(self.resolution)
            episode.dowload_url = stream_url

    def get_direct_links(self):
        # print("direct links")
        anime_id = self.get_anime_id()
        start_page, end_page = self.get_start_and_end_page(anime_id)

        # print(anime_id)
        # print(start_page, end_page)

        try:
            self.collect_episodes(anime_id, start_page, end_page)

            return self.episodes
        except Exception as ex:
            print(ex)
            return None
 def set_stream_urls(self):
     extractor = JWPlayerExtractor(None, self.session)
     for episode in self.episodes:
         extractor.url = episode.page_url
         stream_url = extractor.extract_stream_link(self.resolution)
         episode.dowload_url = stream_url
Exemple #10
0
class GoGoAnimeScraper(BaseScraper):
    def __init__(self,
                 url,
                 start_episode,
                 end_episode,
                 session,
                 gui=None,
                 resolution="480"):
        super().__init__(url, start_episode, end_episode, session, gui)
        self.resolution = resolution
        self.extractor = JWPlayerExtractor(None, self.session)
        self.anime_id = None
        self.api_link_bases = [
            'https://ajax.gogocdn.net/ajax/load-list-episode',
            'https://ajax.apimovie.xyz/ajax/load-list-episode'
        ]

        self.__set_anime_id()

    def __set_anime_id(self):
        response = self.session.get(self.url)
        if response.status_code == 200:
            soup_html = BeautifulSoup(response.content, "html.parser")
            movie_id_tag = soup_html.find("input", attrs={"id": "movie_id"})
            if movie_id_tag is not None:
                self.anime_id = movie_id_tag["value"]

    def __get_episode_data(self):
        for base_link in self.api_link_bases:
            api_link = base_link + "?ep_start=" + str(
                self.start_episode) + "&ep_end=" + str(
                    self.end_episode) + "&id=" + self.anime_id
            response = self.session.get(api_link)
            if response.status_code == 200:
                return response.content

        return None

    def __get_page_url(self, href):
        base_url = re.search("(.*)/category/", self.url).group(1)
        # print(base_url)
        src = base_url + href
        # print(src)

        return src

    def __set_stream_url(self, episode):
        response = self.session.get(episode.page_url)
        if response.status_code == 200:
            soup_html = BeautifulSoup(response.content, "html.parser")
            item_tag = soup_html.find("li", attrs={"class": "anime"}).find("a")
            streamer_url = item_tag["data-video"]
            if "https" not in streamer_url:
                streamer_url = "https:" + streamer_url

            streamer_resp = self.session.get(streamer_url)
            if streamer_resp.status_code == 200:
                sources = self.extractor.extract_sources(streamer_resp.text)
                src = ""
                for source in sources:
                    if "m3u8" in source:
                        src = source
                        break

                if src != "":
                    res_link_id = self.extractor.get_resolution_link(
                        src, self.resolution)
                    stream_base = re.search("(.*)/[\S]+\.m3u8", src).group(1)
                    episode.download_url = stream_base + "/" + res_link_id
                    print("stream url:", episode.download_url)

                    return True

        return False

    def __collect_episodes(self):
        printer("INFO", "Extracting page URLs...", self.gui)
        episodes = []
        if self.anime_id is not None:
            data = self.__get_episode_data()
            if data is not None:
                soup_html = BeautifulSoup(data, "html.parser")
                anchor_tags = soup_html.findAll("a", href=True)
                for anchor in anchor_tags:
                    href = anchor["href"].strip()
                    epi_no = int(href.split("-")[-1])

                    if epi_no < self.start_episode or epi_no > self.end_episode:
                        continue

                    episode = Episode("Episode - " + str(epi_no),
                                      "Episode - " + str(epi_no))
                    episode.is_direct = False
                    episode.page_url = self.__get_page_url(href)
                    val = self.__set_stream_url(episode)
                    if val:
                        episodes.append(episode)
                    else:
                        printer(
                            "ERROR", "Failed to collect download link for " +
                            episode.title, self.gui)

        return episodes

    def get_direct_links(self):
        try:
            episodes = self.__collect_episodes()
            if len(episodes) > 0:
                return episodes
            else:
                return None

        except Exception as ex:
            printer("ERROR", str(ex), self.gui)
            return None