def direct_link_generator(text_url: str): """ direct links generator """ if not text_url: raise DirectDownloadLinkException("`No links found!`") elif 'zippyshare.com' in text_url: return zippy_share(text_url) elif 'yadi.sk' in text_url: return yandex_disk(text_url) elif 'cloud.mail.ru' in text_url: return cm_ru(text_url) elif 'mediafire.com' in text_url: return mediafire(text_url) elif 'osdn.net' in text_url: return osdn(text_url) elif 'github.com' in text_url: return github(text_url) elif 'racaty.net' in text_url: return racaty(text_url) elif 'letsupload.io' in text_url: return letsupload(text_url) elif 'hxfile.co' in text_url: return hxfile(text_url) elif 'layarkacaxxi.icu' in text_url: return fembed720(text_url) elif 'femax20.com' in text_url: return fembed480(text_url) elif 'anonfiles.com' in text_url: return anon(text_url) else: raise DirectDownloadLinkException(f'No Direct link function found for {text_url}')
def github(url: str) -> str: """ GitHub direct links generator """ try: text_url = re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No GitHub Releases links found`\n") download = requests.get(text_url, stream=True, allow_redirects=False) try: dl_url = download.headers["location"] return dl_url except KeyError: raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
def cm_ru(url: str) -> str: """ cloud.mail.ru direct links generator Using https://github.com/JrMasterModelBuilder/cmrudl.py""" reply = '' try: text_url = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No cloud.mail.ru links found`\n") command = f'vendor/cmrudl.py/cmrudl -s {text_url}' result = popen(command).read() result = result.splitlines()[-1] try: data = json.loads(result) except json.decoder.JSONDecodeError: raise DirectDownloadLinkException("`Error: Can't extract the link`\n") dl_url = data['download'] return dl_url
def nanibiz(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*naniplay\.biz\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url
def fembed720(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*layarkacaxxi\.icu\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url["720p/mp4"]
def fembed480(url: str) -> str: dl_url = '' try: link = re.findall(r'\bhttps?://.*femax20\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(link) return dl_url["480p/mp4"]
def hxfile(url: str) -> str: dl_url = '' try: link = re.findall(r'\bhttps?://.*hxfile\.co\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No HXFile links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(link) return dl_url
def streamtape(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*streamtape\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Streamtape links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_streamtape(text_url) return dl_url
def anon(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*anonfiles\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No anonfiles links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url
def letsupload(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*letsupload\.io\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Letsupload links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_url(text_url) return dl_url
def direct_link_generator(text_url: str): """ direct links generator """ if not text_url: raise DirectDownloadLinkException("`No links found!`") elif 'zippyshare.com' in text_url: return zippy_share(text_url) elif 'yadi.sk' in text_url: return yandex_disk(text_url) elif 'cloud.mail.ru' in text_url: return cm_ru(text_url) elif 'mediafire.com' in text_url: return mediafire(text_url) elif 'osdn.net' in text_url: return osdn(text_url) elif 'github.com' in text_url: return github(text_url) else: raise DirectDownloadLinkException( f'No Direct link function found for {text_url}')
def mediafire(url: str) -> str: """ MediaFire direct links generator """ try: text_url = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No MediaFire links found`\n") page = BeautifulSoup(requests.get(text_url).content, 'lxml') info = page.find('a', {'aria-label': 'Download file'}) dl_url = info.get('href') return dl_url
def fembed(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*fembed\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Fembed links found`\n") bypasser = lk21.Bypass() dl_url=bypasser.bypass_fembed(text_url) lst_link = [] count = len(dl_url) for i in dl_url: lst_link.append(dl_url[i]) return lst_link[count-1]
def racaty(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Racaty links found`\n") reqs=requests.get(text_url) bss=BeautifulSoup(reqs.text,'html.parser') op=bss.find('input',{'name':'op'})['value'] id=bss.find('input',{'name':'id'})['value'] rep=requests.post(text_url,data={'op':op,'id':id}) bss2=BeautifulSoup(rep.text,'html.parser') dl_url=bss2.find('a',{'id':'uniqueExpirylink'})['href'] return dl_url
def yandex_disk(url: str) -> str: """ Yandex.Disk direct links generator Based on https://github.com/wldhx/yadisk-direct""" try: text_url = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0] except IndexError: reply = "`No Yandex.Disk links found`\n" return reply api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}' try: dl_url = requests.get(api.format(text_url)).json()['href'] return dl_url except KeyError: raise DirectDownloadLinkException("`Error: File not found / Download limit reached`\n")
def racaty(url: str) -> str: dl_url = '' try: text_url = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No Racaty links found`\n") scraper = cfscrape.create_scraper() r = scraper.get(url) soup = BeautifulSoup(r.text, "lxml") op = soup.find("input", {"name": "op"})["value"] ids = soup.find("input", {"name": "id"})["value"] rpost = scraper.post(url, data = {"op": op, "id": ids}) rsoup = BeautifulSoup(rpost.text, "lxml") dl_url = rsoup.find("a", {"id": "uniqueExpirylink"})["href"].replace(" ", "%20") return dl_url
def osdn(url: str) -> str: """ OSDN direct links generator """ osdn_link = 'https://osdn.net' try: text_url = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No OSDN links found`\n") page = BeautifulSoup( requests.get(text_url, allow_redirects=True).content, 'lxml') info = page.find('a', {'class': 'mirror_link'}) text_url = urllib.parse.unquote(osdn_link + info['href']) mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr') urls = [] for data in mirrors[1:]: mirror = data.find('input')['value'] urls.append(re.sub(r'm=(.*)&f', f'm={mirror}&f', text_url)) return urls[0]
def onedrive(url: str) -> str: """ Onedrive direct link generator Based on https://github.com/UsergeTeam/Userge """ dl_url = '' try: text_url = re.findall(r'\bhttps?://.*1drv\.ms\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No OneDrive links found`\n") link_without_query = urlparse(text_url)._replace(query=None).geturl() direct_link_encoded = str(standard_b64encode(bytes(link_without_query, "utf-8")), "utf-8") direct_link1 = f"https://api.onedrive.com/v1.0/shares/u!{direct_link_encoded}/root/content" resp = requests.head(direct_link1) if resp.status_code != 302: return "`Error: Unauthorized link, the link may be private`" dl_link = resp.next.url file_name = dl_link.rsplit("/", 1)[1] resp2 = requests.head(dl_link) return dl_link
def zippy_share(url: str) -> str: """ ZippyShare direct links generator Based on https://github.com/LameLemon/ziggy""" dl_url = '' try: text_url = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0] except IndexError: raise DirectDownloadLinkException("`No ZippyShare links found`\n") session = requests.Session() base_url = re.search('http.+.com', text_url).group() response = session.get(text_url) page_soup = BeautifulSoup(response.content, "lxml") scripts = page_soup.find_all("script", {"type": "text/javascript"}) for script in scripts: if "getElementById('dlbutton')" in script.text: url_raw = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);', script.text).group('url') math = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);', script.text).group('math') dl_url = url_raw.replace(math, '"' + str(eval(math)) + '"') break dl_url = base_url + eval(dl_url) name = urllib.parse.unquote(dl_url.split('/')[-1]) return dl_url