コード例 #1
0
    def __collect_episodes(self):
        printer("INFO", "Collecting episodes...", self.gui)

        page_count = self.start_page
        while page_count <= self.end_page:
            api_url = "https://animepahe.com/api?m=release&id=" + self.id + "&sort=episode_asc&page=" + str(page_count)
            api_data = self.__get_page_data(api_url)["data"]

            for data in api_data:
                epi_no = data["episode"]
                if epi_no < self.start_episode or epi_no > self.end_episode:
                    continue

                is_canon = data["filler"] == 0

                # AnimePahe is not having valid fillers list (always 0). Added for the completion
                if not self.is_filler and not is_canon:
                    print("Episode", str(epi_no), "is filler.. skipping...")
                    continue

                episode = Episode("Episode - " + str(epi_no), "Episode - " + str(epi_no))
                episode.id = data["session"]
                self.episodes.append(episode)

            page_count += 1
コード例 #2
0
    def __collect_episodes(self):
        printer("INFO", "Extracting page URLs...", self.gui)
        episodes = []
        if self.anime_id is not None:
            data = self.__get_episode_data()
            if data is not None:
                soup_html = BeautifulSoup(data, "html.parser")
                anchor_tags = soup_html.findAll("a", href=True)
                for anchor in anchor_tags:
                    href = anchor["href"].strip()
                    epi_no = int(href.split("-")[-1])

                    if epi_no < self.start_episode or epi_no > self.end_episode:
                        continue

                    episode = Episode("Episode - " + str(epi_no),
                                      "Episode - " + str(epi_no))
                    episode.is_direct = False
                    episode.page_url = self.__get_page_url(href)
                    val = self.__set_stream_url(episode)
                    if val:
                        episodes.append(episode)
                    else:
                        printer(
                            "ERROR", "Failed to collect download link for " +
                            episode.title, self.gui)

        return episodes
コード例 #3
0
    def get_direct_links(self):
        try:
            self.__collect_episodes()
            self.__set_kwik_links()

            return self.episodes
        except Exception as ex:
            printer("ERROR", ex, self.gui)
            return None
コード例 #4
0
    def get_direct_links(self):
        try:
            episodes = self.__collect_episodes()
            if len(episodes) > 0:
                return episodes
            else:
                return None

        except Exception as ex:
            printer("ERROR", str(ex), self.gui)
            return None
コード例 #5
0
 def __get_token(self, response):
     printer("INFO", "Collecting access token...", self.gui)
     page = response.text
     # print(page)
     try:
         token = re.search("value\|(.*)\|([a-zA-Z])",
                           page).group(1).split("|")[0]
         # print("TOKEN :", token)
         return token
     except Exception as ex:
         printer("ERROR", ex, self.gui)
         # print(page)
         return None
コード例 #6
0
    def __get_cookie_and_response(self, episode):
        printer("INFO", "Collecting request header values...", self.gui)

        head = {
            "referer": episode.page_url,
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69"
        }
        response = self.session.get(episode.page_url, headers=head)
        cookie = []
        try:
            cookie.append(response.headers["set-cookie"])
            cookie.append(response)
        except Exception as ex:
            printer("ERROR", ex, self.gui)
            return None

        return cookie
コード例 #7
0
    def set_direct_link(self, episode):
        cookie = self.__get_cookie_and_response(episode)
        if cookie is None:
            printer("INFO", "Retrying header retrieval...", self.gui)
            sleep(2)
            cookie = self.__get_cookie_and_response(episode)

        if cookie is None:
            printer("ERROR", "Couldn't find headers needed ...", self.gui)
            return False

        # token = self.__get_token(cookie[1])

        if self.token is None:
            self.__set_token(cookie[1].text)

            if self.token is None:
                printer("ERROR", "No token found... skipping", self.gui)
                return False

        # print(cookie[0])
        head = {
            "origin": "https://kwik.cx",
            "referer": episode.page_url,
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69",
            "cookie": cookie[0]
        }

        payload = {
            "_token": self.token
        }

        post_url = "https://kwik.cx/d/" + episode.id

        # print(head)
        # print(payload)
        # print(post_url)

        resp_headers = self.session.post(post_url, data=payload, headers=head, allow_redirects=False)
        # print(resp_headers)
        try:
            episode.download_url = resp_headers.headers["location"]
            # print(resp_headers.headers["location"])
        except Exception as ex:
            # print(resp_headers)
            # printer("ERROR", ex, self.gui)
            self.token = None
            printer("ERROR", "Failed to retrieve direct url for " + episode.title, self.gui)
            return False

        return True
コード例 #8
0
def extract_episode_names(url, is_filler, start_epi, end_epi, gui=None):
    printer("INFO", "Collecting episode names...", gui)
    episodes = []

    session = requests.Session()

    page = session.get(url).content
    soup_html = BeautifulSoup(page, "html.parser")

    table = soup_html.find("table", attrs={
        "class": "EpisodeList"
    }).find("tbody")

    if is_filler:
        epis = table.findAll("tr")
    else:
        epis = table.findAll(
            "tr",
            attrs={
                "class": ["anime_canon", "mixed_canon/filler", "manga_canon"]
            })

    for epi in epis:
        epi_no = int(epi.find("td", attrs={"class": "Number"}).text)

        if epi_no < start_epi:
            continue

        if epi_no > end_epi:
            break

        title = epi.find("td", attrs={"class": "Title"}).find("a").text
        episode = Episode(title, "Episode - " + str(epi_no))

        episodes.append(episode)
        # print(episode.episode, ":", episode.title)

    printer("INFO", "Successfully collected episode names!")
    return episodes
コード例 #9
0
    def download(self):
        # print("FFMPEG", which("ffmpeg"))
        if self.episode.download_url is None:
            printer("ERROR", "Download URL is not set for " + self.episode.episode + ", skipping...", self.gui)
            return

        if which("ffmpeg") is None:
            printer("ERROR", "FFMPEG not found! Please install and add to system path to download!", self.gui)
            return

        printer("INFO", "Downloading " + self.episode.episode + "...", self.gui)

        if system() == "Windows":
            self.episode.title = self.__clean_file_name(self.episode.title)

        file_name = self.directory + self.episode.episode + " - " + self.episode.title + ".mp4"

        code = subprocess.call(
            ['ffmpeg', '-i', self.episode.download_url, '-c', 'copy', '-bsf:a', 'aac_adtstoasc', file_name])

        if code == 0:
            printer("INFO", self.episode.episode + " finished downloading...", self.gui)
        else:
            printer("ERROR", self.episode.episode + " failed to download!", self.gui)
コード例 #10
0
    def __collect_episodes(self):
        printer("INFO", "Extracting page URLs...", self.gui)
        episodes = []
        response = self.session.get(self.url)
        if response.status_code == 200:
            soup_html = BeautifulSoup(response.content, "html.parser")
            epi_tags = soup_html.findAll("ul", attrs={"class": "check-list"
                                                      })[1].findAll("a",
                                                                    href=True)

            for epi_tag in epi_tags:
                href = epi_tag["href"]
                # print(href)
                epi_no = int(href.split("-")[-1])
                # print(epi_no)

                if epi_no < self.start_episode or epi_no > self.end_episode:
                    continue

                episode = Episode("Episode - " + str(epi_no),
                                  "Episode - " + str(epi_no))
                episode.page_url = href

                try:
                    res = self.__set_download_link(episode)
                    if res:
                        episodes.append(episode)
                    else:
                        printer(
                            "ERROR", "Failed to collect download link for " +
                            episode.title, self.gui)

                except Exception as ex:
                    printer("ERROR", str(ex), self.gui)

        return episodes
コード例 #11
0
def download(anime_url,
             names_url,
             start_epi,
             end_epi,
             is_filler,
             is_titles,
             token,
             threads,
             directory,
             gui,
             resolution="720",
             is_dub=False):
    global max_val

    session = cloudscraper.create_scraper()
    scraper = None
    episodes = []

    anime_url = anime_url.lower()

    try:
        if "9anime.to" in anime_url:
            printer("INFO", "9Anime URL detected...", gui)
            scraper = NineAnimeScraper(anime_url, start_epi, end_epi, session,
                                       gui, token)

        elif "4anime.to" in anime_url:
            printer("INFO", "4Anime URL detected...", gui)
            scraper = FourAnimeScraper(anime_url, start_epi, end_epi, session,
                                       gui)

        elif "animeultima.to" in anime_url:
            printer("INFO", "AnimeUltima URL detected...", gui)
            scraper = AnimeUltimaScraper(anime_url, start_epi, end_epi,
                                         session, gui, resolution, is_dub)

        elif "animepahe.com" in anime_url:
            printer("INFO", "AnimePahe URL detected...", gui)
            scraper = AnimePaheScraper(anime_url, start_epi, end_epi, session,
                                       gui, resolution, is_filler)

        else:
            printer("ERROR", "Incorrect URL provided!", gui)
            return

        printer("INFO", "Collecting download links...", gui)
        episodes = scraper.get_direct_links()

        if episodes is None:
            printer("INFO", "Retrying to collect download links...", gui)
            sleep(5)
            episodes = scraper.get_direct_links()

        if episodes:
            if is_titles:
                printer("INFO", "Setting episode titles...", gui)
                episodes = EpisodeNamesCollector(
                    names_url, start_epi, end_epi, is_filler,
                    episodes).collect_episode_names()

        else:
            printer("ERROR", "Failed to retrieve download links!", gui)
            return

        max_val = len(episodes)
        # print("is titles", is_titles)
        downloader = Downloader(directory, episodes, threads, gui, is_titles)
        downloader.download()

    except Exception as ex:
        printer("ERROR", ex, gui)
        printer(
            "ERROR",
            "Something went wrong! Please close and restart Anime Downloader to retry!",
            gui)
コード例 #12
0
    def __set_kwik_links(self):
        printer("INFO", "Collecting kwik links...", self.gui)

        api_url = "https://animepahe.com/api?m=embed&p=kwik&id="
        for episode in self.episodes:
            temp_url = api_url + self.id + "&session=" + episode.id
            # print(temp_url)
            api_data = self.__get_page_data(temp_url)["data"]

            links = list(api_data.keys())

            # 720p
            link = api_data[links[0]]["720"]["url"]
            id = link.split("/")[-1]

            try:
                # 1080p
                if self.resolution == "1080":
                    link = api_data[links[1]]["1080"]["url"]
                    id = link.split("/")[-1]
            except Exception as ex:
                printer("ERROR", "1080p not available!", self.gui)
                printer("INFO", "Continuing with 720p link...", self.gui)

            episode.id = id
            page_url = "https://kwik.cx/f/" + id
            episode.page_url = page_url

            if not self.extractor.set_direct_link(episode):  # try setting at retrieval
                printer("INFO", "Second download link retrieval attempt", self.gui)
                if not self.extractor.set_direct_link(episode):
                    printer("INFO", "Third download link retrieval attempt", self.gui)
                    if not self.extractor.set_direct_link(episode):
                        printer("ERROR", "Failed all attempts to retrieve download link for " + episode.title, self.gui)
コード例 #13
0
def download(anime_url,
             names_url,
             start_epi,
             end_epi,
             is_filler,
             is_titles,
             token,
             threads,
             directory,
             gui,
             resolution="720",
             is_dub=False):
    global max_val

    session = cloudscraper.create_scraper()
    scraper = None
    episodes = []

    anime_url = anime_url.lower()

    try:
        if "9anime.to" in anime_url:
            printer("INFO", "9Anime URL detected...", gui)
            scraper = NineAnimeScraper(anime_url, start_epi, end_epi, session,
                                       gui, token)

        elif "4anime.to" in anime_url:
            printer("INFO", "4Anime URL detected...", gui)
            scraper = FourAnimeScraper(anime_url, start_epi, end_epi, session,
                                       gui)

        elif "animeultima.to" in anime_url:
            printer("INFO", "AnimeUltima URL detected...", gui)
            scraper = AnimeUltimaScraper(anime_url, start_epi, end_epi,
                                         session, gui, resolution, is_dub)

        elif "animeflix" in anime_url:
            printer("INFO", "AnimeFlix URL detected...", gui)
            scraper = AnimeFlixScraper(anime_url, start_epi, end_epi, session,
                                       gui, resolution, is_dub)

        elif "gogoanime" in anime_url:
            printer("INFO", "GoGoAnime URL detected...", gui)
            if "gogoanime.pro" in anime_url:
                printer(
                    "ERROR",
                    "goganime.pro links are not supported yet try gogoanime.io or gogoanime.video",
                    gui)
                return

            scraper = GoGoAnimeScraper(anime_url, start_epi, end_epi, session,
                                       gui, resolution)

        elif "animefreak" in anime_url:
            printer("INFO", "AnimeFreak URL detected...", gui)
            scraper = AnimeFreakScraper(anime_url, start_epi, end_epi, session,
                                        gui, is_dub)

        elif "twist" in anime_url:
            printer("INFO", "Twist URL detected...", gui)
            scraper = TwistScraper(anime_url, start_epi, end_epi, session, gui)

        elif "animepahe.com" in anime_url:
            printer("INFO", "AnimePahe URL detected...", gui)
            api_key = ""
            try:
                with open("settings.json") as (json_file):
                    data = json.load(json_file)
                    api_key = data["api_key"]
            except:
                api_key = ""

            if api_key != "" and api_key != "insert_2captcha_api_key":
                session = cloudscraper.create_scraper(recaptcha={
                    'provider': '2captcha',
                    'api_key': api_key
                })

            else:
                printer(
                    "ERROR",
                    "You need 2captcha API key to download from AnimePahe!",
                    gui)
                printer(
                    "ERROR",
                    "Set 2captcha API key in 'settings.json' file to download from AnimePahe!",
                    gui)
                return

            scraper = AnimePaheScraper(anime_url, start_epi, end_epi, session,
                                       gui, resolution, is_filler)

        else:
            printer("ERROR", "Incorrect URL provided!", gui)
            return

        printer("INFO", "Collecting download links...", gui)
        episodes = scraper.get_direct_links()

        if episodes is None:
            printer("INFO", "Retrying to collect download links...", gui)
            sleep(5)
            episodes = scraper.get_direct_links()

        if episodes:
            if is_titles:
                printer("INFO", "Setting episode titles...", gui)
                episodes = EpisodeNamesCollector(
                    names_url, start_epi, end_epi, is_filler,
                    episodes).collect_episode_names()

        else:
            printer("ERROR", "Failed to retrieve download links!", gui)
            return

        max_val = len(episodes)
        # print("is titles", is_titles)
        downloader = Downloader(directory, episodes, threads, gui, is_titles)
        downloader.download()

    except Exception as ex:
        trace = traceback.format_exc()
        print(trace)
        printer("ERROR", ex, gui)
        printer(
            "ERROR",
            "Something went wrong! Please close and restart Anime Downloader to retry!",
            gui)
コード例 #14
0
    def __extract_direct_links(self):
        printer("INFO", "Collecting download links...", self.gui)

        for episode in self.episodes:
            cookie = self.__get_cookie_and_response(episode)
            if cookie is None:
                printer("INFO", "Retrying ...", self.gui)
                sleep(2)
                cookie = self.__get_cookie_and_response(episode)

            if cookie is None:
                printer("ERROR", "Skipping ...", self.gui)
                continue

            token = self.__get_token(cookie[1])

            if not token:
                printer("ERROR", "No token found... skipping", self.gui)
                continue

            head = {
                "origin": "https://kwik.cx",
                "referer": episode.page_url,
                "sec-fetch-dest": "document",
                "sec-fetch-mode": "navigate",
                "sec-fetch-site": "same-origin",
                "sec-fetch-user": "******",
                "upgrade-insecure-requests": "1",
                "user-agent":
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69",
                "cookie": cookie[0]
            }

            payload = {"_token": token}

            post_url = "https://kwik.cx/d/" + episode.id

            print(head)

            resp_headers = self.session.post(post_url,
                                             data=payload,
                                             headers=head,
                                             allow_redirects=False).headers
            try:
                episode.download_url = resp_headers["location"]
            except Exception as ex:
                print(resp_headers)
                printer("ERROR", ex, self.gui)
                printer("ERROR",
                        "Failed to retrieve direct url for " + episode.title,
                        self.gui)
                continue