def __init__(self, url, start_episode, end_episode, session, gui=None, token=None): super().__init__(url, start_episode, end_episode, session, gui) self.token = token self.api_key = None self.ts_no = None self.server_id = None self.site_key = "6LfEtpwUAAAAABoJ_595sf-Hh0psstoatwZpLex1" self.server_name = "Mp4upload" self.nine_anime_url = "https://9anime.to" self.episodes_url = "https://9anime.to/ajax/film/servers/" + url.split( ".")[2].split("/")[0] if not token: try: with open("settings.json") as (json_file): data = json.load(json_file) self.api_key = data["api_key"] except: Color.printer( "ERROR", "Reading settings file failed! Continue without API key...", self.gui) self.api_key = ""
def get_token(url): global session, site_key, api_key, gui try: captcha_id = \ session.post("http://2captcha.com/in.php?key={}&method=userrecaptcha&googlekey={}&pageurl={}&invisible=1" .format(api_key, site_key, url)).text.split('|')[1] recaptcha_answer = session.get( "http://2captcha.com/res.php?key={}&action=get&id={}".format( api_key, captcha_id)).text while 'CAPCHA_NOT_READY' in recaptcha_answer: sleep(5) recaptcha_answer = session.get( "http://2captcha.com/res.php?key={}&action=get&id={}".format( api_key, captcha_id)).text recaptcha_answer = recaptcha_answer.split('|')[1] # print("[Recaptcha answer] : {",recaptcha_answer,"}") return recaptcha_answer except Exception: Color.printer("ERROR", 'Failed to solve ReCaptcha!', gui) return None
def __extract_page_urls(self): Color.printer("INFO", "Extracting page URLs...", self.gui) page = self.session.get(self.url).content soup_html = BeautifulSoup(page, "html.parser") try: server = soup_html.findAll("div", attrs={"class": "server"})[0] epi_ranges = server.findAll("ul", attrs={"class": "episodes"}) for epi_range in epi_ranges: epi_tags = epi_range.findAll("a", href=True) for epi_tag in epi_tags: epi_number = int(epi_tag.text) if epi_number < self.start_episode or epi_number > self.end_episode: continue episode = Episode(str(epi_number), "Episode - " + str(epi_number)) episode.page_url = epi_tag["href"] self.episodes.append(episode) except Exception as ex: print(ex) return None return self.episodes
def extract_download_urls(): global session, gui down_base = "https://9anime.to/ajax/episode/info?" Color.printer("INFO", "Extracting download URLs...", gui) for episode in episodes: if (episode.id is None): episode.download_url = None continue url = down_base + "ts=" + ts_no + "&id=" + episode.id + "&server=" + server_id target = session.get(url).json()["target"] episode.page_url = target download_url = Mp4UploadExtractor(target, session).extract_direct_url() # video_page = session.get(target).content # # string = video_page.decode("utf-8") # # www_base = re.search("false\|(.*)\|devicePixelRatio",string).group(1) # url_id = re.search("video\|(.*)\|282", string).group(1) # # download_url = "https://"+www_base+".mp4upload.com:282/d/"+url_id+"/video.mp4" episode.download_url = download_url
def run(self): while True: func, arg, kargs = self.tasks.get() try: func(*arg, **kargs) except Exception as ex: Color.printer("ERROR", ex, self.gui) finally: self.tasks.task_done()
def write_data(): global episodes, gui Color.printer("INFO", "Writing results to results.csv file...", gui) data_file = open("results.csv", "w") for episode in episodes: data_file.write(episode.episode + "," + episode.download_url + "\n") data_file.close()
def __download_episode(self, episode): if system() == "Windows": episode.title = self.__clean_file_name(episode.title) if episode.is_direct: if episode.download_url is None: Color.printer( "ERROR", "Download URL is not set for " + episode.episode + ", skipping...", self.gui) return Color.printer("INFO", "Downloading " + episode.episode + "...", self.gui) # print(self.is_titles) # print(episode.title) if self.is_titles: # print("with title") file_name = self.directory + episode.episode + " - " + episode.title + ".mp4" else: # print("without title") file_name = self.directory + episode.episode + ".mp4" with requests.get(episode.download_url, headers=episode.request_headers, stream=True, verify=False) as r: with open(file_name, 'wb') as f: shutil.copyfileobj(r.raw, f, length=16 * 1024 * 1024) Color.printer("INFO", episode.episode + " finished downloading...", self.gui) else: Color.printer( "INFO", "HLS link found. Using custom HLSDownloader to download...", self.gui) try: HLSDownloader(episode, self.directory, requests.session(), self.gui).download() except Exception as ex: trace = traceback.format_exc() print(trace) Color.printer( "ERROR", "Custom HLS Downloader failed! Using FFMPEG to download...", self.gui) FFMPEGDownloader(episode, self.directory, self.gui).download()
def __extract_download_urls(self): down_base = "https://9anime.to/ajax/episode/info?" Color.printer("INFO", "Extracting download URLs...", self.gui) for episode in self.episodes: if (episode.id is None): episode.download_url = None continue url = down_base + "ts=" + self.ts_no + "&id=" + episode.id + "&server=" + self.server_id target = self.session.get(url).json()["target"] episode.page_url = target download_url = Mp4UploadExtractor( target, self.session).extract_direct_url() episode.download_url = download_url
def download(self): try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default pass else: # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = _create_unverified_https_context Color.printer("INFO", "Downloading started...", self.gui) pool = ThreadPool(self.threads, gui) pool.map(self.__download_episode, self.episodes) pool.wait_completion() Color.printer("INFO", "Downloading finished!", self.gui)
def main(start_episode=-1, end_episode=-1, token=None): global episodes, download_9anime_url, episodes_url, api_key, gui start_episode = int(start_episode) end_episode = int(end_episode) if not token: with open("settings.json") as (json_file): data = json.load(json_file) api_key = data["api_key"] if not download_9anime_url: download_9anime_url = input("Anime URL : ") if start_episode == -1: start_episode = int(input("Enter Start Episode : ")) if end_episode == -1: end_episode = int(input("Enter End Episode : ")) episodes_url = episodes_url + download_9anime_url.split(".")[2].split( "/")[0] episodes = extract_page_urls(start_episode, end_episode, token) if episodes is None: return if title_url: set_titles(start_episode, end_episode) else: Color.printer( "INFO", "animefiller.com URL not provided to collect episode names...", gui) Color.printer("INFO", "Skipping collecting episode names...", gui) extract_download_urls() write_data()
def __init__(self, url, start_episode, end_episode, session, gui=None, token=None): super().__init__(url, start_episode, end_episode, session, gui) self.token = token self.api_key = None self.ts_no = None self.server_id = None self.site_key = "6LfEtpwUAAAAABoJ_595sf-Hh0psstoatwZpLex1" self.server_name = "Mp4upload" self.nine_anime_url = "https://9anime.to" self.headers = { "origin": self.nine_anime_url, "referer": url, "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36 Edg/80.0.361.109" } self.episodes_url = "https://9anime.to/ajax/film/servers/" + url.split( ".")[2].split("/")[0] if not token: try: with open("settings.json") as (json_file): data = json.load(json_file) self.api_key = data["api_key"] except: Color.printer( "ERROR", "Reading settings file failed! Continue without API key...", self.gui) self.api_key = ""
def __download_episode(self, episode): if episode.is_direct: if episode.download_url is None: Color.printer( "ERROR", "Download URL is not set for " + episode.episode + ", skipping...", self.gui) return Color.printer("INFO", "Downloading " + episode.episode + "...", self.gui) if system() == "Windows": episode.title = self.__clean_file_name(episode.title) # print(self.is_titles) # print(episode.title) if self.is_titles: # print("with title") file_name = self.directory + episode.episode + " - " + episode.title + ".mp4" else: # print("without title") file_name = self.directory + episode.episode + ".mp4" with requests.get(episode.download_url, stream=True, verify=False) as r: with open(file_name, 'wb') as f: shutil.copyfileobj(r.raw, f, length=16 * 1024 * 1024) Color.printer("INFO", episode.episode + " finished downloading...", self.gui) else: Color.printer("INFO", "HLS link found. Using FFMPEG to download...", self.gui) FFMPEGDownloader(episode, self.directory, self.gui).download()
def __extract_download_urls(self): Color.printer("INFO", "Extracting download URLs...", self.gui) success = True for episode in self.episodes: page = self.session.get(episode.page_url).content soup_html = BeautifulSoup(page, "html.parser") video_tag = soup_html.find("video", attrs={"id": "video1"}) # print(video_tag) if video_tag is None: # print("checking div") video_tag = soup_html.find("div", attrs={"id": "video1"}) # print(video_tag) if video_tag is None: # print("checking video") video_tag = soup_html.find("video") # print(video_tag) if video_tag is None: Color.printer("ERROR", "Download link not found for " + episode.episode, self.gui) success = False continue # print("----------------------------") try: episode.download_url = video_tag["src"] success = True except KeyError: # print(soup_html) Color.printer( "ERROR", "Failed to retrieve download link not found for " + episode.episode, self.gui) continue return success
def print_banner(): banner = text2art("Anime Downloader") Color.printer("BANNER", banner)
def extract_page_urls(start_episode, end_episode, token): global session, episodes, nine_anime_url, download_9anime_url, ts_no, episodes, api_key, cookies, gui session.headers.update({ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36' }) if token is None: if api_key is None: Color.printer("ERROR", "No API Key Provided!", gui) sys.exit(0) if api_key != "" and api_key != "insert_2captcha_api_key": Color.printer("INFO", "Solving recaptcha...", gui) token = get_token("https://9anime.to/waf-verify") if not token: Color.printer("ERROR", "Captcha solving failed!", gui) Color.printer("INFO", "Trying to continue ...", gui) # sys.exit(0) if token: verify(token) else: Color.printer("INFO", "No API key or token given, trying to continue...", gui) # cookies = bc.load() #collect all browser cookies # session.cookies = cookies #set browser cookies for requests Color.printer("INFO", "Extracting page URLs...", gui) anime_page = session.get(download_9anime_url).content soup_html = BeautifulSoup(anime_page, "html.parser") ts_no = soup_html.find("html")["data-ts"] eps_url = episodes_url + "?ts=" + ts_no epi_data = session.get(eps_url).json()["html"] soup = BeautifulSoup(epi_data, "html.parser") servers_container = soup.find("span", attrs={"class": "tabs"}) mp4upload_index = get_mp4upload_index(servers_container) if mp4upload_index is None: return None mp4upload_server = soup.findAll("div", attrs={"class": "server"})[mp4upload_index] episode_ranges = mp4upload_server.findAll("ul", attrs={"class": "episodes"}) for episode_range in episode_ranges: eps = episode_range.findAll("a", href=True) for episode in eps: epi_number = int(episode.text) if epi_number < start_episode or epi_number > end_episode: continue # epi = get_episode(epi_number) # if epi == None : # continue epi = Episode(str(epi_number), "Episode - " + str(epi_number)) epi.page_url = nine_anime_url + episode["href"] epi.id = episode["data-id"] episodes.append(epi) return episodes
def __extract_download_urls(self): Color.printer("INFO", "Extracting download URLs...", self.gui) success = True for episode in self.episodes: page = self.session.get(episode.page_url).content soup_html = BeautifulSoup(page, "html.parser") # print(soup_html) video_tag = soup_html.find("video", attrs={"id": "video1"}) # print(video_tag) if video_tag is None: # print("checking div") video_tag = soup_html.find("div", attrs={"id": "video1"}) # print(video_tag) if video_tag is None: # print("checking video") video_tag = soup_html.find("video") # print(video_tag) if video_tag is None or video_tag["src"] == '': print("checking for packed data") packed_funcs = self.__get_packed(page.decode('utf-8')) # print(packed_funcs) if len(packed_funcs) > 0: src = JsUnpacker().extract_link(packed_funcs[0]) if src is not None: episode.download_url = src success = True else: try: src = JsUnpacker().extract_link(packed_funcs[1]) if src is not None: episode.download_url = src success = True continue except: Color.printer( "ERROR", "Download link not found for " + episode.episode, self.gui) success = False else: Color.printer( "ERROR", "Download link not found for " + episode.episode, self.gui) success = False continue if video_tag is None: Color.printer("ERROR", "Download link not found for " + episode.episode, self.gui) success = False continue # print("----------------------------") try: episode.download_url = video_tag["src"] success = True except KeyError: # print(soup_html) Color.printer( "ERROR", "Failed to retrieve download link not found for " + episode.episode, self.gui) continue return success
def __extract_page_urls(self): d = self.session.get("https://9anime.to/waf-verify", headers=self.headers, allow_redirects=True) self.headers["cookie"] = d.headers["set-cookie"] if self.token is None: if self.api_key != "" and self.api_key != "insert_2captcha_api_key": Color.printer("INFO", "Solving recaptcha...", self.gui) captcha_solver = TwoCaptchaSolver( "https://9anime.to/waf-verify", self.site_key, self.api_key, self.session) self.token = captcha_solver.solve() if not self.token: Color.printer("ERROR", "Captcha solving failed!", self.gui) Color.printer("INFO", "Trying to continue ...", self.gui) if self.token: # print(self.token) self.__verify() else: Color.printer("INFO", "No API key or token given, trying to continue...", self.gui) Color.printer("INFO", "Extracting page URLs...", self.gui) data = self.session.get(self.url, headers=self.headers) anime_page = data.content soup_html = BeautifulSoup(anime_page, "html.parser") try: self.ts_no = soup_html.find("html")["data-ts"] eps_url = self.episodes_url + "?ts=" + self.ts_no self.headers["referer"] = eps_url resp = self.session.get(eps_url, headers=self.headers, allow_redirects=False) epi_data = resp.json()["html"] # print(epi_data) soup = BeautifulSoup(epi_data, "html.parser") servers_container = soup.find("span", attrs={"class": "tabs"}) mp4upload_index = self.__get_mp4upload_index(servers_container) if mp4upload_index is None: return None mp4upload_server = soup.findAll("div", attrs={"class": "server"})[mp4upload_index] episode_ranges = mp4upload_server.findAll( "ul", attrs={"class": "episodes"}) for episode_range in episode_ranges: eps = episode_range.findAll("a", href=True) for episode in eps: epi_number = int(episode.text) if epi_number < self.start_episode or epi_number > self.end_episode: continue epi = Episode(str(epi_number), "Episode - " + str(epi_number)) epi.page_url = self.nine_anime_url + episode["href"] epi.id = episode["data-id"] self.episodes.append(epi) except Exception as ex: Color.printer("ERROR", ex, self.gui) return None return self.episodes