def streamtape(url: str) -> str: """ Streamtape direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/SlamDevs/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url=bypasser.bypass_streamtape(url) return dl_url
def antfiles(url: str) -> str: """ Antfiles direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/SlamDevs/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url=bypasser.bypass_antfiles(url) return dl_url
def anonfiles(url: str) -> str: """ Anonfiles direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/breakdowns/slam-aria-mirror-bot """ bypasser = lk21.Bypass() dl_url=bypasser.bypass_anonfiles(url) return dl_url
def hxfile(url: str) -> str: """ Hxfile direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/breakdowns/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url = bypasser.bypass_filesIm(url) return dl_url
def sbembed(link: str) -> str: """ Sbembed direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/SlamDevs/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url=bypasser.bypass_sbembed(link) count = len(dl_url) lst_link = [dl_url[i] for i in dl_url] return lst_link[count-1]
def letsupload(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*letsupload\.io\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Letsupload links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url
def streamtape(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*streamtape\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Streamtape links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_streamtape(text_url) return dl_url
def anon(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*anonfiles\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No anonfiles links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url
def nanibiz(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*naniplay\.biz\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url
def fembed720(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*layarkacaxxi\.icu\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url["720p/mp4"]
def sbembed(link: str) -> str: """ Sbembed direct link generator Based on https://github.com/breakdowns/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url = bypasser.bypass_sbembed(link) lst_link = [] count = len(dl_url) for i in dl_url: lst_link.append(dl_url[i]) return lst_link[count - 1]
def letsupload(url: str) -> str: """ Letsupload direct link generator Based on https://github.com/breakdowns/slam-mirrorbot """ dl_url = '' try: link = re.findall(r'\bhttps?://.*letsupload\.io\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Letsupload links found\n") bypasser = lk21.Bypass() dl_url = bypasser.bypass_url(link) return dl_url
def anon(url: str) -> str: """ Anonfiles direct links generator based on https://github.com/breakdowns/slam-mirrorbot """ dl_url = '' try: link = re.findall(r'\bhttps?://.*anonfiles\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Anonfiles links found`\n") bypasser = lk21.Bypass() dl_url = bypasser.bypass_url(link) return dl_url
def fembed(link: str) -> str: """ Fembed direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/SlamDevs/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url=bypasser.bypass_fembed(link) lst_link = [] count = len(dl_url) for i in dl_url: lst_link.append(dl_url[i]) return lst_link[count-1]
def fembed(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*fembed\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_fembed(text_url) lst_link = [] count = len(dl_url) for i in dl_url: lst_link.append(dl_url[i]) return lst_link[count-1]
def layarkacaxxi(url: str) -> str: """ Fembed direct links generator based on https://github.com/breakdowns/slam-mirrorbot """ dl_url = '' try: link = re.findall(r'\bhttps?://.*layarkacaxxi\.icu\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Fembed links found\n") bypasser = lk21.Bypass() dl_url = bypasser.bypass_fembed(link) lst_link = [] count = len(dl_url) for i in dl_url: lst_link.append(dl_url[i]) return lst_link[count - 1]
def racaty(url: str) -> str: """ Racaty direct link generator Based on https://github.com/breakdowns/slam-mirrorbot """ bypasser = lk21.Bypass() dl_url = bypasser.bypass_filesIm(url) return dl_url
def _download(client, message): user_id = message.from_user.id if not message.media: sent_message = message.reply_text('🕵️**Checking link...**', quote=True) if message.command: link = message.command[1] else: link = message.text if 'drive.google.com' in link: sent_message.edit(Messages.CLONING.format(link)) LOGGER.info(f'Copy:{user_id}: {link}') msg = GoogleDrive(user_id).clone(link) sent_message.edit(msg) elif 'mediafire.com' in link: """ MediaFire """ if '|' in link: link, filename = link.split('|') link = link.strip() filename.strip() dl_path = os.path.join( f'{Config.DOWNLOAD_DIRECTORY}/{filename}') else: link = link.strip() filename = os.path.basename(link) dl_path = Config.DOWNLOAD_DIRECTORY try: link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0] except IndexError: return sent_message.edit( "No MediaFire links found\nMight Be File/Files Deleted. \nOpen The Link And Check" ) page = BeautifulSoup(requests.get(link).content, 'lxml') info = page.find('a', {'aria-label': 'Download file'}) link = info.get('href') sent_message.edit(Messages.DOWNLOADING.format(link)) result, file_path = download_file(link, dl_path) if result == True: sent_message.edit( Messages.DOWNLOADED_SUCCESSFULLY.format( os.path.basename(file_path), humanbytes(os.path.getsize(file_path)))) msg = GoogleDrive(user_id).upload_file(file_path) sent_message.edit(msg) # LOGGER.info(f'Deleteing: {file_path}') os.remove(file_path) else: sent_message.edit( Messages.DOWNLOAD_ERROR.format(file_path, link)) elif 'zippyshare.com' in link: """ ZippyShare """ if '|' in link: link, filename = link.split('|') link = link.strip() filename.strip() dl_path = os.path.join( f'{Config.DOWNLOAD_DIRECTORY}/{filename}') else: link = link.strip() filename = os.path.basename(link) dl_path = Config.DOWNLOAD_DIRECTORY try: link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Zippyshare links found") try: base_url = re.search('http.+.zippyshare.com', link).group() response = requests.get(link).content pages = BeautifulSoup(response, "lxml") try: js_script = pages.find("div", { "class": "center" }).find_all("script")[1] except IndexError: js_script = pages.find("div", { "class": "right" }).find_all("script")[0] js_content = re.findall(r'\.href.=."/(.*?)";', str(js_script)) js_content = 'var x = "/' + js_content[0] + '"' evaljs = EvalJs() setattr(evaljs, "x", None) evaljs.execute(js_content) js_content = getattr(evaljs, "x") link = base_url + js_content except IndexError: raise sent_message.edit("ERROR: Can't find download button") sent_message.edit(Messages.DOWNLOADING.format(link)) result, file_path = download_file(link, dl_path) if result == True: sent_message.edit( Messages.DOWNLOADED_SUCCESSFULLY.format( os.path.basename(file_path), humanbytes(os.path.getsize(file_path)))) msg = GoogleDrive(user_id).upload_file(file_path) sent_message.edit(msg) # LOGGER.info(f'Deleteing: {file_path}') os.remove(file_path) else: sent_message.edit( Messages.DOWNLOAD_ERROR.format(file_path, link)) elif 'anonfiles.com' in link: """ Anonfiles """ if '|' in link: link, filename = link.split('|') link = link.strip() filename.strip() dl_path = os.path.join( f'{Config.DOWNLOAD_DIRECTORY}/{filename}') else: link = link.strip() filename = os.path.basename(link) dl_path = Config.DOWNLOAD_DIRECTORY bypasser = lk21.Bypass() link = bypasser.bypass_anonfiles(link) sent_message.edit(Messages.DOWNLOADING.format(link)) result, file_path = download_file(link, dl_path) if result == True: sent_message.edit( Messages.DOWNLOADED_SUCCESSFULLY.format( os.path.basename(file_path), humanbytes(os.path.getsize(file_path)))) msg = GoogleDrive(user_id).upload_file(file_path) sent_message.edit(msg) # LOGGER.info(f'Deleteing: {file_path}') os.remove(file_path) else: sent_message.edit( Messages.DOWNLOAD_ERROR.format(file_path, link)) elif 'bayfiles.com' in link: """ Bayfiles """ if '|' in link: link, filename = link.split('|') link = link.strip() filename.strip() dl_path = os.path.join( f'{Config.DOWNLOAD_DIRECTORY}/{filename}') else: link = link.strip() filename = os.path.basename(link) dl_path = Config.DOWNLOAD_DIRECTORY bypasser = lk21.Bypass() link = bypasser.bypass_anonfiles(link) sent_message.edit(Messages.DOWNLOADING.format(link)) result, file_path = download_file(link, dl_path) if result == True: sent_message.edit( Messages.DOWNLOADED_SUCCESSFULLY.format( os.path.basename(file_path), humanbytes(os.path.getsize(file_path)))) msg = GoogleDrive(user_id).upload_file(file_path) sent_message.edit(msg) # LOGGER.info(f'Deleteing: {file_path}') os.remove(file_path) elif 'racaty.net' in link: """ Racaty """ if '|' in link: link, filename = link.split('|') link = link.strip() filename.strip() dl_path = os.path.join( f'{Config.DOWNLOAD_DIRECTORY}/{filename}') else: link = link.strip() filename = os.path.basename(link) dl_path = Config.DOWNLOAD_DIRECTORY dl_url = '' try: link = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise ("No Racaty links found\n") scraper = cfscrape.create_scraper() r = scraper.get(url) soup = BeautifulSoup(r.text, "lxml") op = soup.find("input", {"name": "op"})["value"] ids = soup.find("input", {"name": "id"})["value"] rpost = scraper.post(url, data={"op": op, "id": ids}) rsoup = BeautifulSoup(rpost.text, "lxml") dl_url = rsoup.find("a", {"id": "uniqueExpirylink"})["href"].replace( " ", "%20") link = dl_url result, file_path = download_file(link, dl_path) if result == True: sent_message.edit( Messages.DOWNLOADED_SUCCESSFULLY.format( os.path.basename(file_path), humanbytes(os.path.getsize(file_path)))) msg = GoogleDrive(user_id).upload_file(file_path) sent_message.edit(msg) LOGGER.info(f'Deleteing: {file_path}') os.remove(file_path) else: sent_message.edit( Messages.DOWNLOAD_ERROR.format(file_path, link)) # elif '1fichier.com' in link: elif 'ouo.press' in link or 'ouo.io' in link: """ Ouo Bypass """ bypasser = lk21.Bypass() link = bypasser.bypass_ouo(link) sent_message.edit( f"Bypassed The Ouo Link That U Provided.\n\nLink = `{link}`") # elif is_gdtot_link(link): else: if '|' in link: link, filename = link.split('|') link = link.strip() filename.strip() dl_path = os.path.join( f'{Config.DOWNLOAD_DIRECTORY}/{filename}') else: link = link.strip() filename = os.path.basename(link) dl_path = Config.DOWNLOAD_DIRECTORY LOGGER.info(f'Download:{user_id}: {link}') sent_message.edit(Messages.DOWNLOADING.format(link)) result, file_path = download_file(link, dl_path) if result == True: sent_message.edit( Messages.DOWNLOADED_SUCCESSFULLY.format( os.path.basename(file_path), humanbytes(os.path.getsize(file_path)))) msg = GoogleDrive(user_id).upload_file(file_path) sent_message.edit(msg) LOGGER.info(f'Deleteing: {file_path}') os.remove(file_path) else: sent_message.edit( Messages.DOWNLOAD_ERROR.format(file_path, link))