def racaty(url: str) -> str: try: link = re.findall(r"\bhttps?://.*racaty\.net\S+", url)[0] except IndexError: raise DirectDownloadLinkException("`No Racaty links found`\n") reqs = requests.get(link) bss = BeautifulSoup(reqs.text, "html.parser") op = bss.find("input", {"name": "op"})["value"] id = bss.find("input", {"name": "id"})["value"] rep = requests.post(link, data={"op": op, "id": id}) bss2 = BeautifulSoup(rep.text, "html.parser") return bss2.find("a", {"id": "uniqueExpirylink"})["href"]
def yandex_disk(url: str) -> str: """ Yandex.Disk direct links generator Based on https://github.com/wldhx/yadisk-direct """ try: link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0] except IndexError: return "No Yandex.Disk links found\n" api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}' try: return requests.get(api.format(link)).json()['href'] except KeyError: raise DirectDownloadLinkException("ERROR: File not found/Download limit reached\n")
def direct_link_generator(link: str): """ direct links generator """ if not link: raise DirectDownloadLinkException("`No links found!`") elif 'zippyshare.com' in link: return zippy_share(link) elif 'yadi.sk' in link: return yandex_disk(link) elif 'cloud.mail.ru' in link: return cm_ru(link) elif 'mediafire.com' in link: return mediafire(link) elif 'uptobox.com' in link: return uptobox(link) elif 'osdn.net' in link: return osdn(link) elif 'github.com' in link: return github(link) else: raise DirectDownloadLinkException( f'No Direct link function found for {link}')
def letsupload(url: str) -> str: """ Letsupload direct link generator Based on https://github.com/zevtyardt/lk21 https://github.com/SlamDevs/slam-mirrorbot """ dl_url = '' try: link = re.findall(r'\bhttps?://.*letsupload\.io\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Letsupload links found\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(link) return dl_url
def pixeldrain(url: str) -> str: """ Based on https://github.com/yash-dk/TorToolkit-Telegram """ url = url.strip("/ ") file_id = url.split("/")[-1] info_link = f"https://pixeldrain.com/api/file/{file_id}/info" dl_link = f"https://pixeldrain.com/api/file/{file_id}" resp = requests.get(info_link).json() if resp["success"]: return dl_link else: raise DirectDownloadLinkException( "ERROR: Cant't download due {}.".format(resp.text["value"]))
def direct_link_generator(link: str): """ direct links generator """ if not link: raise DirectDownloadLinkException("`No links found!`") elif "zippyshare.com" in link: return zippy_share(link) elif "yadi.sk" in link: return yandex_disk(link) elif "cloud.mail.ru" in link: return cm_ru(link) elif "mediafire.com" in link: return mediafire(link) elif "osdn.net" in link: return osdn(link) elif "github.com" in link: return github(link) elif "racaty.net" in link: return racaty(link) else: raise DirectDownloadLinkException( f"No Direct link function found for {link}")
def krakenfiles(page_link: str) -> str: """ krakenfiles direct link generator Based on https://github.com/tha23rd/py-kraken By https://github.com/junedkh """ page_resp = requests.session().get(page_link) soup = BeautifulSoup(page_resp.text, "lxml") try: token = soup.find("input", id="dl-token")["value"] except: raise DirectDownloadLinkException(f"Page link is wrong: {page_link}") hashes = [ item["data-file-hash"] for item in soup.find_all("div", attrs={"data-file-hash": True}) ] if not hashes: raise DirectDownloadLinkException(f"Hash not found for : {page_link}") dl_hash = hashes[0] payload = f'------WebKitFormBoundary7MA4YWxkTrZu0gW\r\nContent-Disposition: form-data; name="token"\r\n\r\n{token}\r\n------WebKitFormBoundary7MA4YWxkTrZu0gW--' headers = { "content-type": "multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW", "cache-control": "no-cache", "hash": dl_hash, } dl_link_resp = requests.session().post( f"https://krakenfiles.com/download/{hash}", data=payload, headers=headers) dl_link_json = dl_link_resp.json() if "url" in dl_link_json: return dl_link_json["url"] else: raise DirectDownloadLinkException( f"Failed to acquire download URL from kraken for : {page_link}")
def gdtot(url: str) -> str: """ Gdtot google drive link generator By https://github.com/xcscxr """ if CRYPT is None: raise DirectDownloadLinkException("ERROR: CRYPT cookie not provided") match = re.findall(r'https?://(.+)\.gdtot\.(.+)\/\S+\/\S+', url)[0] with requests.Session() as client: client.cookies.update({'crypt': CRYPT}) res = client.get(url) res = client.get( f"https://{match[0]}.gdtot.{match[1]}/dld?id={url.split('/')[-1]}") matches = re.findall('gd=(.*?)&', res.text) try: decoded_id = b64decode(str(matches[0])).decode('utf-8') except: raise DirectDownloadLinkException( "ERROR: Try in your broswer, mostly file not found or user limit exceeded!" ) return f'https://drive.google.com/open?id={decoded_id}'
def yandex_disk(url: str) -> str: """ Yandex.Disk direct links generator Based on https://github.com/wldhx/yadisk-direct""" try: link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0] except IndexError: reply = "`link Yandex.Disk ngk ada/hangus`\n" return reply api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}' try: dl_url = requests.get(api.format(link)).json()['href'] return dl_url except KeyError: raise DirectDownloadLinkException("`Error: File ngk ada/kena limit`\n")
def racaty(url: str) -> str: dl_url = '' try: link = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Racaty links found`\n") reqs=requests.get(link) bss=BeautifulSoup(reqs.text,'html.parser') op=bss.find('input',{'name':'op'})['value'] id=bss.find('input',{'name':'id'})['value'] rep=requests.post(link,data={'op':op,'id':id}) bss2=BeautifulSoup(rep.text,'html.parser') dl_url=bss2.find('a',{'id':'uniqueExpirylink'})['href'] return dl_url
def zippy_share(url: str) -> str: """ ZippyShare direct link generator Based on https://github.com/KenHV/Mirror-Bot https://github.com/jovanzers/WinTenCermin """ try: link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("ERROR: No Zippyshare links found") try: base_url = re.search('http.+.zippyshare.com/', link).group() response = requests.get(link).content pages = BeautifulSoup(response, "lxml") try: js_script = pages.find("div", {"class": "center"}) if js_script is not None: js_script = js_script.find_all("script")[1] else: raise DirectDownloadLinkException( "ERROR: No Zippyshare links found") except IndexError: js_script = pages.find("div", {"class": "right"}) if js_script is not None: js_script = js_script.find_all("script")[0] else: raise DirectDownloadLinkException( "ERROR: No Zippyshare links found") js_content = re.findall(r'\.href.=."/(.*?)";', str(js_script)) js_content = str(js_content[0]).split('"') n = str(js_script).split('var n = ')[1].split(';')[0].split('%') n = int(n[0]) % int(n[1]) b = str(js_script).split('var b = ')[1].split(';')[0].split('%') b = int(b[0]) % int(b[1]) z = int(str(js_script).split('var z = ')[1].split(';')[0]) math_ = str(n + b + z - 3) return base_url + str(js_content[0]) + math_ + str(js_content[2]) except IndexError: raise DirectDownloadLinkException("ERROR: Can't find download button")
def onedrive(link: str) -> str: """ Onedrive direct link generator Based on https://github.com/UsergeTeam/Userge """ link_without_query = urlparse(link)._replace(query=None).geturl() direct_link_encoded = str( standard_b64encode(bytes(link_without_query, "utf-8")), "utf-8") direct_link1 = f"https://api.onedrive.com/v1.0/shares/u!{direct_link_encoded}/root/content" resp = requests.head(direct_link1) if resp.status_code != 302: raise DirectDownloadLinkException( "ERROR: Unauthorized link, the link may be private") dl_link = resp.next.url file_name = dl_link.rsplit("/", 1)[1] resp2 = requests.head(dl_link) return dl_link
def sourceforge(url: str) -> str: """ SourceForge direct links generator """ try: link = re.findall(r'\bhttps?://.*sourceforge\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No SourceForge links found`\n") file_path = re.findall(r'files(.*)/download', link)[0] project = re.findall(r'projects?/(.*?)/files', link)[0] mirrors = f'https://sourceforge.net/settings/mirror_choices?' \ f'projectname={project}&filename={file_path}' page = BeautifulSoup(requests.get(mirrors).content, 'html.parser') info = page.find('ul', {'id': 'mirrorList'}).findAll('li') for mirror in info[1:]: dl_url = f'https://{mirror["id"]}.dl.sourceforge.net/project/{project}/{file_path}' return dl_url
def layarkacaxxi(url: str) -> str: """ Fembed direct links generator based on https://github.com/breakdowns/slam-mirrorbot """ dl_url = '' try: link = re.findall(r'\bhttps?://.*layarkacaxxi\.icu\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Fembed links found\n") bypasser = lk21.Bypass() dl_url = bypasser.bypass_fembed(link) lst_link = [] count = len(dl_url) for i in dl_url: lst_link.append(dl_url[i]) return lst_link[count - 1]
def yandex_disk(url: str) -> str: """Yandex.Disk direct links generator Based on https://github.com/wldhx/yadisk-direct""" try: link = re.findall(r"\bhttps?://.*yadi\.sk\S+", url)[0] except IndexError: reply = "`No Yandex.Disk links found`\n" return reply api = "https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}" try: dl_url = requests.get(api.format(link)).json()["href"] return dl_url except KeyError: raise DirectDownloadLinkException( "`Error: File not found / Download limit reached`\n")
def osdn(url: str) -> str: """OSDN direct links generator""" osdn_link = "https://osdn.net" try: link = re.findall(r"\bhttps?://.*osdn\.net\S+", url)[0] except IndexError: raise DirectDownloadLinkException("`No OSDN links found`\n") page = BeautifulSoup(requests.get(link, allow_redirects=True).content, "lxml") info = page.find("a", {"class": "mirror_link"}) link = urllib.parse.unquote(osdn_link + info["href"]) mirrors = page.find("form", {"id": "mirror-select-form"}).findAll("tr") urls = [] for data in mirrors[1:]: mirror = data.find("input")["value"] urls.append(re.sub(r"m=(.*)&f", f"m={mirror}&f", link)) return urls[0]
def osdn(url: str) -> str: """ OSDN direct links generator """ osdn_link = 'https://osdn.net' try: link = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No OSDN links found`\n") page = BeautifulSoup( requests.get(link, allow_redirects=True).content, 'lxml') info = page.find('a', {'class': 'mirror_link'}) link = urllib.parse.unquote(osdn_link + info['href']) mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr') urls = [] for data in mirrors[1:]: mirror = data.find('input')['value'] urls.append(re.sub(r'm=(.*)&f', f'm={mirror}&f', link)) return urls[0]
def racaty(url: str) -> str: """ Racaty direct links generator based on https://github.com/SlamDevs/slam-mirrorbot """ dl_url = '' try: link = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Racaty links found\n") scraper = cfscrape.create_scraper() r = scraper.get(url) soup = BeautifulSoup(r.text, "lxml") op = soup.find("input", {"name": "op"})["value"] ids = soup.find("input", {"name": "id"})["value"] rpost = scraper.post(url, data = {"op": op, "id": ids}) rsoup = BeautifulSoup(rpost.text, "lxml") dl_url = rsoup.find("a", {"id": "uniqueExpirylink"})["href"].replace(" ", "%20") return dl_url
def uptobox(url: str) -> str: try: link = re.findall(r'\bhttps?://.*uptobox\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Uptobox links found`\n") if UPTOBOX_TOKEN is None: logging.error('UPTOBOX_TOKEN not provided!') else: check = 'https://uptobox.com/api/user/me?token=%s' % (UPTOBOX_TOKEN) request = requests.get(check) info = request.json() premium = info["data"]["premium"] try: link = re.findall(r'\bhttp?://.*uptobox\.com/dl\S+', url)[0] logging.info('Uptobox direct link') dl_url = url except: if premium == 1: file_id = re.findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0] file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s' % ( UPTOBOX_TOKEN, file_id) req = requests.get(file_link) result = req.json() dl_url = result['data']['dlLink'] else: file_id = re.findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0] file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s' % ( UPTOBOX_TOKEN, file_id) req = requests.get(file_link) result = req.json() waiting_time = result["data"]["waiting"] + 1 waiting_token = result["data"]["waitingToken"] _countdown(waiting_time) file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s&waitingToken=%s' % ( UPTOBOX_TOKEN, file_id, waiting_token) req = requests.get(file_link) result = req.json() dl_url = result['data']['dlLink'] return dl_url
def uptobox(url: str) -> str: try: link = re.findall(r'\bhttps?://.*uptobox\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Uptobox links found`\n") if UPTOBOX_TOKEN is None: logging.error('UPTOBOX_TOKEN not provided!') dl_url = url else: try: link = re.findall(r'\bhttp?://.*uptobox\.com/dl\S+', url)[0] logging.info('Uptobox direct link') dl_url = url except: file_id = re.findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0] file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s' % ( UPTOBOX_TOKEN, file_id) req = requests.get(file_link) result = req.json() dl_url = result['data']['dlLink'] return dl_url
def uptobox(url: str) -> str: """ Uptobox direct links generator based on https://github.com/jovanzers/WinTenCermin """ try: link = re.findall(r'\bhttps?://.*uptobox\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("No Uptobox links found\n") if UPTOBOX_TOKEN is None: LOGGER.error('UPTOBOX_TOKEN not provided!') dl_url = link else: try: link = re.findall(r'\bhttp?://.*uptobox\.com/dl\S+', url)[0] dl_url = link except: file_id = re.findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0] file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s' % (UPTOBOX_TOKEN, file_id) req = requests.get(file_link) result = req.json() dl_url = result['data']['dlLink'] return dl_url
def zippy_share(url: str) -> str: """ ZippyShare direct links generator Based on https://github.com/LameLemon/ziggy""" dl_url = '' try: link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No ZippyShare links found`\n") session = requests.Session() base_url = re.search('http.+.com', link).group() response = session.get(link) page_soup = BeautifulSoup(response.content, "lxml") scripts = page_soup.find_all("script", {"type": "text/javascript"}) for script in scripts: if "getElementById('dlbutton')" in script.text: url_raw = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);', script.text).group('url') math = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);', script.text).group('math') dl_url = url_raw.replace(math, '"' + str(eval(math)) + '"') break dl_url = base_url + eval(dl_url) name = urllib.parse.unquote(dl_url.split('/')[-1]) return dl_url
def fichier(link: str) -> str: """ 1Fichier direct links generator Based on https://github.com/Maujar https://github.com/SlamDevs/slam-mirrorbot """ regex = r"^([http:\/\/|https:\/\/]+)?.*1fichier\.com\/\?.+" gan = re.match(regex, link) if not gan: raise DirectDownloadLinkException("ERROR: The link you entered is wrong!") if "::" in link: pswd = link.split("::")[-1] url = link.split("::")[-2] else: pswd = None url = link try: if pswd is None: req = requests.post(url) else: pw = {"pass": pswd} req = requests.post(url, data=pw) except: raise DirectDownloadLinkException("ERROR: Unable to reach 1fichier server!") if req.status_code == 404: raise DirectDownloadLinkException("ERROR: File not found/The link you entered is wrong!") soup = BeautifulSoup(req.content, 'lxml') if soup.find("a", {"class": "ok btn-general btn-orange"}) is not None: dl_url = soup.find("a", {"class": "ok btn-general btn-orange"})["href"] if dl_url is None: raise DirectDownloadLinkException("ERROR: Unable to generate Direct Link 1fichier!") else: return dl_url elif len(soup.find_all("div", {"class": "ct_warn"})) == 2: str_2 = soup.find_all("div", {"class": "ct_warn"})[-1] if "you must wait" in str(str_2).lower(): numbers = [int(word) for word in str(str_2).split() if word.isdigit()] if not numbers: raise DirectDownloadLinkException("ERROR: 1fichier is on a limit. Please wait a few minutes/hour.") else: raise DirectDownloadLinkException(f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.") elif "protect access" in str(str_2).lower(): raise DirectDownloadLinkException("ERROR: This link requires a password!\n\n<b>This link requires a password!</b>\n- Insert sign <b>::</b> after the link and write the password after the sign.\n\n<b>Example:</b>\n<code>/mirror https://1fichier.com/?smmtd8twfpm66awbqz04::love you</code>\n\n* No spaces between the signs <b>::</b>\n* For the password, you can use a space!") else: raise DirectDownloadLinkException("ERROR: Error trying to generate Direct Link from 1fichier!") elif len(soup.find_all("div", {"class": "ct_warn"})) == 3: str_1 = soup.find_all("div", {"class": "ct_warn"})[-2] str_3 = soup.find_all("div", {"class": "ct_warn"})[-1] if "you must wait" in str(str_1).lower(): numbers = [int(word) for word in str(str_1).split() if word.isdigit()] if not numbers: raise DirectDownloadLinkException("ERROR: 1fichier is on a limit. Please wait a few minutes/hour.") else: raise DirectDownloadLinkException(f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.") elif "bad password" in str(str_3).lower(): raise DirectDownloadLinkException("ERROR: The password you entered is wrong!") else: raise DirectDownloadLinkException("ERROR: Error trying to generate Direct Link from 1fichier!") else: raise DirectDownloadLinkException("ERROR: Error trying to generate Direct Link from 1fichier!")
def direct_link_generator(link: str): """ direct links generator """ if not link: raise DirectDownloadLinkException("No links found!") elif 'youtube.com' in link or 'youtu.be' in link: raise DirectDownloadLinkException(f"Use /{BotCommands.WatchCommand} to mirror Youtube link\nUse /{BotCommands.TarWatchCommand} to make tar of Youtube playlist") elif 'zippyshare.com' in link: return zippy_share(link) elif 'yadi.sk' in link: return yandex_disk(link) elif 'mediafire.com' in link: return mediafire(link) elif 'uptobox.com' in link: return uptobox(link) elif 'osdn.net' in link: return osdn(link) elif 'github.com' in link: return github(link) elif 'hxfile.co' in link: return hxfile(link) elif 'anonfiles.com' in link: return anonfiles(link) elif 'letsupload.io' in link: return letsupload(link) elif 'fembed.net' in link: return fembed(link) elif 'fembed.com' in link: return fembed(link) elif 'femax20.com' in link: return fembed(link) elif 'fcdn.stream' in link: return fembed(link) elif 'feurl.com' in link: return fembed(link) elif 'naniplay.nanime.in' in link: return fembed(link) elif 'naniplay.nanime.biz' in link: return fembed(link) elif 'naniplay.com' in link: return fembed(link) elif 'layarkacaxxi.icu' in link: return fembed(link) elif 'sbembed.com' in link: return sbembed(link) elif 'streamsb.net' in link: return sbembed(link) elif 'sbplay.org' in link: return sbembed(link) elif '1drv.ms' in link: return onedrive(link) elif 'pixeldrain.com' in link: return pixeldrain(link) elif 'antfiles.com' in link: return antfiles(link) elif 'streamtape.com' in link: return streamtape(link) elif 'bayfiles.com' in link: return anonfiles(link) elif 'racaty.net' in link: return racaty(link) elif '1fichier.com' in link: return fichier(link) elif 'solidfiles.com' in link: return solidfiles(link) else: raise DirectDownloadLinkException(f'No Direct link function found for {link}')