def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all import xbmc global_var = [] if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + show_original_year ] s_type = 'movies' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'tv' all_links = [] thread = [] for itt in search_url: x = requests.get('http://tse-api.gianlu.xyz/search?q=' + (itt), headers=base_header, timeout=10).json() for items in x['result']: if stop_all == 1: break ur = ('http://tse-api.gianlu.xyz/getTorrent?e=%s&url=%s' % (items['engine'], items['url'].encode('base64'))).replace( ' ', '').replace('\n', '').replace('\r', '').replace('\t', '') thread.append(Thread(get_results, ur, all_links)) thread[len(thread) - 1].setName('fill_table') thread[len(thread) - 1].start() still_alive = True while (still_alive): still_alive = False for trd in thread: if trd.isAlive(): still_alive = True if stop_all == 1: break xbmc.sleep(100) return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_url = ( '%s %s' % (clean_name(original_title, 1), show_original_year)).lower() else: search_url = ( '%s s%se%s' % (clean_name(original_title, 1), season_n, episode_n)).lower() for page in range(0, 4): params = ( ('sort', 'seeders'), ('q', search_url), ('category', 'all'), ('skip', str(page * 40)), ('fuv', 'yes'), ) x = get_html('https://solidtorrents.net/api/v1/search', headers=base_header, params=params, timeout=10).json() for items in x['results']: if stop_all == 1: break link = items['magnet'] size = float(items['size']) / (1024 * 1024 * 1024) title = items['title'] if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'tv': return [] headers = { 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 9.0.1; samsung Build/AXXXXXXX)', 'Connection': 'Keep-Alive' } c_name = clean_name(original_title, 1).lower() url = 'https://yumovfreemov.com/salam/hangat.php?cai=%s&tadondo=com.yumovies.mushdomovidev' % clean_name( original_title, 1).replace(' ', '%20') x = get_html(url, headers=headers).json() logging.warning(x) for items in x['STREAME']: if c_name in items['channel_title'].lower( ) and show_original_year in items['channel_title']: url = 'https://yumovfreemov.com/salam/hangat.php?channel_id=%s&tadondo=com.yumovies.mushdomovidev' % items[ 'id'] y = get_html(url, headers=headers).json() for itt in y['STREAME']: size = 0 try: try_head = get_html(itt['channel_url'], headers=base_header, stream=True, verify=False, timeout=3) if 'Content-Length' in try_head.headers: if int(try_head.headers['Content-Length']) > (1024 * 1024): size = (round( float(try_head.headers['Content-Length']) / (1024 * 1024 * 1024), 2)) except: size = 0 all_links.append( (clean_name(original_title, 1), 'Direct_link$$$' + itt['channel_url'], str(size), '720')) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all import xbmc, sys path = xbmc.translatePath( 'special://home/addons/script.module.resolveurl/lib') sys.path.append(path) path = xbmc.translatePath('special://home/addons/script.module.six/lib') sys.path.append(path) path = xbmc.translatePath( 'special://home/addons/script.module.kodi-six/libs') sys.path.append(path) import resolveurl all_links = [] if tv_movie == 'movie': return [] y = get_html( 'http://www1.watchserieshd.tv/series/%s-season-%s-episode-%s' % (clean_name(original_title, 1).replace(' ', '-'), season, episode), headers=base_header).content() regex = 'data-video="(.+?)"' lk_pre = re.compile(regex, re.DOTALL).findall(y) for f_lk in lk_pre: f_lk_r = False if 'vev.io' in f_lk.lower() or 'vidup.me' in f_lk.lower(): continue try: if '#caption=' in f_lk: f_lk = f_lk.split('#caption=')[0] f_lk_r = resolveurl.resolve(f_lk) except: pass size = 0 title = clean_name(original_title, 1) + '.S%sE%s' % (season_n, episode_n) if f_lk_r: all_links.append( (title, 'Direct_link$$$' + f_lk_r, str(size), 'HD')) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if Addon.getSetting("provider.furk") == 'false': return [] all_links = [] search_name = _search_name(tv_movie, str(show_original_year), season, episode, clean_name(original_title, 1)) files = Furk.search(search_name) headers = { 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Sec-Fetch-Site': 'none', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-User': '******', 'Sec-Fetch-Dest': 'document', 'Accept-Language': 'en-US,en;q=0.9', } for items in files: if 'is_ready' in items: if items['is_ready'] == '1': if stop_all == 1: break title = items['name'] if items['type'] == 'video': if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' lk = items['url_pls'] #head=urllib.urlencode(headers) #lk=lk+"|"+head size = float(int(items['size'])) / 1073741824 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all all_links=[] if tv_movie=='tv': search_url=('{0}%20s{1}e{2}'.format(clean_name(original_title,1).replace(' ','%20'),season_n,episode_n)).lower() else: search_url=clean_name(original_title,1).replace(' ','%20')+'%20'+show_original_year if 1: x=get_html('https://api.magsearch.net/search?keywords=%s&itemn=200&start=0&filetype=video&sortby=hot&userid=99999999999999999999999999999999'%(search_url.replace(' ','%20')),headers=base_header,timeout=10).json() max_size=int(Addon.getSetting("size_limit")) dev_num=1024*1024*1024 for items in x: title=items['name'] lk=items['url'] size=(float(items['length'])/dev_num) if int(size)<max_size: if '2160' in title: res='2160' if '1080' in title: res='1080' elif '720' in title: res='720' elif '480' in title: res='480' elif '360' in title: res='360' else: res='HD' all_links.append((title,lk,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': return [] imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages') allow_debrid = True search_url = ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower() for pages in range(0, 3): x = get_html( 'https://eztv.re/api/get-torrents?imdb_id=%s&limit=100&page=%s' % (imdb_id.replace('tt', ''), str(pages)), headers=base_header, timeout=10).json() max_size = int(Addon.getSetting("size_limit")) dev_num = 1024 * 1024 * 1024 for items in x['torrents']: title = items['filename'] if 's%se%s.' % (season_n, episode_n) not in title.lower(): continue lk = items['magnet_url'] size = (float(items['size_bytes']) / dev_num) if int(size) < max_size: if '2160' in title: res = '2160' if '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': return [] all_links = [] search_string = clean_name(original_title, 1) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers', } data = {'q': search_string} response = get_html('https://sezonlukdizi.vip/ajax/arama.asp', headers=headers, data=data).json() new_url = None regex = '<tr>(.+?)</tr>' regex1 = re.compile(regex, re.DOTALL) regex2 = re.compile('src="(.+?)"') regex = 'data-options="(.+?)" data-player-container' regex3 = re.compile(regex) for itt in response['results']['diziler']['results']: c_title = itt['title'] if '(' in itt['title']: c_title = itt['title'].split(' (')[0] if not c_title.lower() == clean_name(original_title, 1).lower(): continue next_add = 'https://sezonlukdizi.vip' + itt['url'].replace( 'diziler', 'bolumler') x = get_html(next_add, headers=headers).content() m = regex1.findall(x) for item in m: regex = "a href='(.+?)'" m2 = re.compile(regex, re.DOTALL).findall(item) if len(m2) > 0: if '/%s-sezon-%s-' % (season, episode) in m2[0]: new_url = 'https://sezonlukdizi.vip' + m2[0] break if new_url: y = get_html(new_url, headers=headers).content() regex = 'div bid="(.+?)"' idd = re.compile(regex).findall(y)[0] headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'https://sezonlukdizi.vip', 'Connection': 'keep-alive', 'Referer': new_url, 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers', } data = {'bid': idd, 'dil': '1'} response = get_html('https://sezonlukdizi.vip/ajax/dataAlternatif.asp', headers=headers, data=data).json() for itt in response['data']: data = {'id': itt['id']} response = get_html('https://sezonlukdizi.vip/ajax/dataEmbed.asp', headers=headers, data=data).content() e_url = regex2.findall(response) e_url = e_url[0].replace('odnoklassniki', 'ok') if 'http' not in e_url: e_url = 'http:' + e_url if 'ok.ru' not in e_url: continue headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'TE': 'Trailers', } z = get_html(e_url, headers=headers).content() sHtmlContent = regex3.findall(z)[0] sHtmlContent = removeHtmlTags(sHtmlContent) sHtmlContent = unescape(sHtmlContent) #.decode('utf-8')) page = json.loads(sHtmlContent) page = json.loads(page['flashvars']['metadata']) url = [] qua = [] title = clean_name(original_title, 1) + '.S%sE%s' % (season_n, episode_n) HEADERS = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' } for x in page['videos']: api_call = '%s|User-Agent=%s&Accept=%s' % ( x['url'], HEADERS['User-Agent'], HEADERS['Accept']) api_call = api_call + '&Referer=' + e_url + '&Origin=http://ok.ru' url.append(api_call) qua.append(x['name']) res = x['name'].replace('mobile', '480').replace( 'lowest', '480').replace('low', '480').replace('sd', '480').replace('hd', '720') ''' try_head = requests.head(x['url'],headers=base_header, stream=True,verify=False,timeout=15) f_size2=0 if 'Content-Length' in try_head.headers: if int(try_head.headers['Content-Length'])>(1024*1024): f_size2=str(round(float(try_head.headers['Content-Length'])/(1024*1024*1024), 2)) ''' all_links.append( (title, 'Direct_link$$$' + api_call, str(0), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] search_url = ( '%s s%se%s' % (clean_name(original_title, 1), season_n, episode_n)).lower() if 1: x = get_html('http://eztv.show/?s=' + search_url.replace(' ', '%20'), headers=base_header, timeout=10).content() logging.warning('http://eztv.show/?s=' + search_url) regex = 'h2 class="entry-title"><a href="(.+?)".+?>(.+?)<' macth_pre = re.compile(regex).findall(x) regex = 'strong>Download Torrent: </strong> <a href="(.+?)"' regex1 = re.compile(regex) for link, title in macth_pre: if clean_name(original_title, 1).lower() in title.lower( ) and 's%se%s' % (season_n, episode_n) in title.lower(): y = get_html(link, headers=base_header, timeout=10).content() regex = 'strong>Download Torrent: </strong> <a href="(.+?)"' lk = regex1.findall(y) if len(lk) > 0: lk = lk[0] if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: regex = 'Filesize:(.+?)<' size = re.compile(regex).findall(y)[0] o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all all_links=[] if tv_movie=='movie': return [] allow_debrid=True search_url=('%s-s%se%s'%(clean_name(original_title,1).replace(' ','-'),season_n,episode_n)).lower() x=requests.get('https://eztv.io/search/{0}'.format(search_url),headers=base_header,timeout=10).content regex_pre='<tr name="hover"(.+?)</tr>' m_pre=re.compile(regex_pre,re.DOTALL).findall(x) for items in m_pre: regex='<td class="forum_thread_post".+?class="epinfo">(.+?)<.+?a href="(.+?)".+?<td align="center" class="forum_thread_post">(.+?)<.+?<td align="center" class="forum_thread_post_end"><font color="green">(.+?)<' m2=re.compile(regex,re.DOTALL).findall(items) if len (m2)==0: regex='<td class="forum_thread_post".+?class="epinfo">(.+?)<.+?a href="(.+?)".+?<td align="center" class="forum_thread_post">(.+?)<.+?<td align="center" class="forum_thread_post_end">(.+?)<' m2=re.compile(regex,re.DOTALL).findall(items) for title,links,size,seed in m2: seed=seed.replace('-','0') peer=0 if stop_all==1: break size=size.replace(' '," ") try: o_size=size size=float(o_size.replace('GiB','').replace('MiB','').replace('GB','').replace('MB','').replace(",",'').strip()) if 'MB' in o_size or 'MiB' in o_size: size=size/1000 except: size=0 regex='dn=(.+?)&' nam=title max_size=int(Addon.getSetting("size_limit")) if '.TS.' in nam: continue if int(size)<max_size: if '1080' in nam: res='1080' elif '720' in nam: res='720' elif '480' in nam: res='480' elif '360' in nam: res='360' else: res='HD' if clean_name(original_title,1).lower() not in title.lower(): continue if 0:#allow_debrid: x=requests.get('https://eztv.io'+links,headers=base_header,timeout=10).content regex='"magnet(.+?)"' mm=re.compile(regex).findall(x) if len(mm)==0: continue lk='magnet'+mm[0] else: lk=links all_links.append((title,lk,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all if tv_movie=='movie': search_url=[clean_name(original_title,1).replace(' ','%20')+'%20'] s_type='Movies' type='207' type2='201' else: if Addon.getSetting('debrid_select')=='0' : search_url=[clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n+'e'+episode_n,clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n,clean_name(original_title,1).replace(' ','%20')+'%20season%20'+season] else: search_url=[clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n+'e'+episode_n] s_type='TV' type='208' type2='205' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': 'https://magno.netlify.app/', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', } all_links=[] all_l=[] regex="magnet(.+?)'" regex1=re.compile(regex) for itt in search_url: if stop_all==1: break params = ( ('term', itt), ) try: x = get_html('https://magno.netlify.app/.netlify/functions/x', headers=headers, params=params).json() #x=get_html('https://magno.netlify.app/.netlify/functions/api?keyword=%s'%(itt),headers=base_header,timeout=10).json() except Exception as e: continue logging.warning(x) div_size=1024*1024*1024 for items in x: title=items['title'] link=items['link'] if 'magnet' not in str(link): continue try: link=get_html(link,stream=True).url except Exception as e: regex="magnet(.+?)'" link='magnet'+regex1.findall(str(e))[0] size=items['size'] if stop_all==1: break if '4k' in title: res='2160' elif '2160' in title: res='2160' elif '1080' in title: res='1080' elif '720' in title: res='720' elif '480' in title: res='480' elif '360' in title: res='360' else: res='HD' o_link=link size=(float(size)/(div_size)) max_size=int(Addon.getSetting("size_limit")) if size<max_size: all_links.append((title,link,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all try: que = urllib.quote_plus except: que = urllib.parse.quote_plus all_links = [] domains = ['torrentdownloads.me', 'torrentdownloads.info'] search = '{0}/rss.xml?new=1&type=search&cid={1}&search={2}' for domain in domains: try: url = 'https://%s' % domain result = client.request(url, timeout='10') search_n = re.findall('alt="Torrent Downloads"', result, re.DOTALL)[0] if search_n: break except Exception: pass if tv_movie == 'tv': cid = '8' if Addon.getSetting('debrid_select') == '0': search_sting = [ clean_name(original_title, 1).replace(' ', '+') + '+s%se%s' % (season_n, episode_n), clean_name(original_title, 1).replace(' ', '+') + '+s%s' % (season_n), clean_name(original_title, 1).replace(' ', '+') + '+season+%s' % (season) ] else: search_sting = [ clean_name(original_title, 1).replace(' ', '+') + '+s%se%s' % (season_n, episode_n) ] else: cid = '4' search_sting = [ clean_name(original_title, 1).replace(' ', '+') + '+%s' % (show_original_year) ] regex = '<item>(.+?)</item' data_regex = re.compile(regex, re.DOTALL) regex = '<title>(.+?)<.+?<size>(.+?)<.+?<info_hash>(.+?)<' data_regex2 = re.compile(regex, re.DOTALL) for itt in search_sting: url_f = search.format(url, cid, itt) x = get_html(url_f, headers=base_header).content() m_pre = data_regex.findall(x) count = 0 for items in m_pre: count += 1 m = data_regex2.findall(items) for title, size, hash in m: size = float(int(size)) / 1073741824 lk = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, que(title)) if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_url = ('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower() type = 'load' else: search_url = '%s' % (clean_name(original_title, 1).replace( ' ', '+')).lower() type = 'publ' if 1: x = get_html('https://torrenthood.net/search/?q=%s&m=%s&t=0' % (search_url, type), headers=base_header, timeout=10).content() regex = '<tr>(.+?)</tr>' macth_pre = re.compile(regex, re.DOTALL).findall(x) regex = ' href="(.+?)"' regex1 = re.compile(regex) regex = '<div class="table-row-equal"(.+?)</form>' regex2 = re.compile(regex, re.DOTALL) regex = '<span class="info2">(.+?)<.+?Size: <b>(.+?)<.+?magnet:(.+?)\'' regex3 = re.compile(regex, re.DOTALL) regex = '<div class="table-row-equal">(.+?)</form></div>' regex4 = re.compile(regex, re.DOTALL) regex = '<span class="info2" id="episode-(.+?)".+?onClick="(.+?)"' regex5 = re.compile(regex, re.DOTALL) regex = '<span id="blue">Full Season.+?span class="info4">(.+?)<' regex6 = re.compile(regex, re.DOTALL) for itm in macth_pre: regex = ' href="(.+?)"' ittm = regex1.findall(itm)[0] if clean_name(original_title, 1).lower().replace(' ', '-') not in ittm: continue y = get_html(ittm, headers=base_header, timeout=10).content() if tv_movie == 'movie': regex = '<div class="table-row-equal"(.+?)</form>' m_p = regex2.findall(y) for mpp in m_p: regex = '<span class="info2">(.+?)<.+?Size: <b>(.+?)<.+?magnet:(.+?)\'' m_pre = regex3.findall(mpp) if len(m_pre) == 0: continue nm = m_pre[0][0] title = nm size = m_pre[0][1] lk = 'magnet:' + m_pre[0][2] if len(title) > 0: if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' else: res = '720' try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((nm, lk, str(size), res)) global_var = all_links else: if 'season-%s-' % season not in ittm and '-s%s' % season_n not in ittm: continue regex = '<div class="table-row-equal">(.+?)</form></div>' m_pre = regex4.findall(y) for ittm2 in m_pre: regex = '<span class="info2" id="episode-(.+?)".+?onClick="(.+?)"' m_in = regex5.findall(ittm2) for idd, lk in m_in: if stop_all == 1: break if tv_movie == 'tv': if episode != idd: continue if 'magnet' not in lk: continue regex = '<span id="blue">Full Season.+?span class="info4">(.+?)<' title = regex6.findall(y) lk = lk.replace("self.location='", '').replace("'", '') nm = clean_name(original_title, 1) if len(title) > 0: if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' else: res = '720' regex = 'Download Size\: <b>(.+?) \(per episode\)' size = re.compile(regex).findall(y) if len(size) > 0: try: o_size = size[0] size = float( o_size.replace('GB', '').replace( 'MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 else: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((nm, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_url = [('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower()] type_s = '1' else: type_s = '41' if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower(), ('%s+s%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n)).lower(), ('%s+season+%s' % (clean_name(original_title, 1).replace( ' ', '+'), season)).lower() ] else: search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower() ] div = 1024 * 1024 * 1024 regex = 'class="showthecross".+?<b>(.+?)<.+?"magnet(.+?)".+?align=\'center\'>(.+?)</td>' regex1 = re.compile(regex, re.DOTALL) for itt in search_url: for page in range(0, 3): x = get_html( 'http://glodls.to/search_results.php?cat=%s&search=%s&sort=seeders&order=desc&page=%s' % (type_s, itt, page), headers=base_header, timeout=10).content() m = regex1.findall(x) for title, link, size in m: if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, 'magnet' + link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all import xbmc, sys path = xbmc.translatePath( 'special://home/addons/script.module.resolveurl/lib') sys.path.append(path) path = xbmc.translatePath('special://home/addons/script.module.six/lib') sys.path.append(path) import resolveurl all_links = [] if tv_movie == 'movie': search_string = clean_name(original_title, 1).replace( ' ', '%20') + '%20' + show_original_year else: search_string = clean_name(original_title, 1).replace( ' ', '%20') + '%20season%20' + season response = get_html('https://gowatchseries.tv/search.html?keyword=' + search_string, headers=base_header).content() regex = '<li>(.+?)</li>' m_pre = re.compile(regex, re.DOTALL).findall(response) for itt in m_pre: regex = 'a href="(.+?)".+?<div class="name">(.+?)<' m = re.compile(regex, re.DOTALL).findall(itt) for lk, nm in m: check = False if tv_movie == 'movie': if clean_name(original_title, 1).lower() in nm.lower( ) and show_original_year in nm.lower(): check = True else: if clean_name(original_title, 1).lower() in nm.lower( ) and ' season %s$$$' % season in (nm.lower() + '$$$'): check = True if check: x = get_html('https://gowatchseries.tv' + lk, headers=base_header).content() if tv_movie == 'movie': title = clean_name(original_title, 1) regex = '<span>Latest Episode: </span>.+?<a href="(.+?)"' m2 = re.compile(regex, re.DOTALL).findall(x)[0] else: title = clean_name(original_title, 1) + '.S%sE%s' % (season_n, episode_n) regex = '<li class="child_episode">.+?a href="(.+?)"' m2_pre = re.compile(regex, re.DOTALL).findall(x) found = False for m2 in m2_pre: if 'season-%s-episode-%s$$$' % ( season, episode) in m2.lower() + '$$$': found = True break if not found: return [] y = get_html('https://gowatchseries.tv' + m2, headers=base_header).content() regex = 'data-video="(.+?)"' lk_pre = re.compile(regex, re.DOTALL).findall(y) for f_lk in lk_pre: f_lk_r = False try: if '#caption=' in f_lk: f_lk = f_lk.split('#caption=')[0] f_lk_r = resolveurl.resolve(f_lk) except: pass size = 0 if f_lk_r: all_links.append((title, 'Direct_link$$$' + f_lk_r, str(size), 'HD')) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': cat = 'movies' search_url = [('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower()] else: cat = 'tv' if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower(), ('%s+s%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n)).lower(), ('%s-season-%s' % (clean_name(original_title, 1).replace( ' ', '+'), season)).lower() ] else: search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower() ] regex = '<tr class(.+?)</tr>' regex1 = re.compile(regex, re.DOTALL) regex = 'a href="magnet(.+?)".+?td class="tli".+?title="(.+?)".+?td class="sy">(.+?)<.+?td class="ly">(.+?)<' regex2 = re.compile(regex, re.DOTALL) for itt in search_url: for page in range(1, 4): x = get_html( 'https://extratorrent2.unblockninja.com/search/?search=%s&x=0&y=0&category=%s&page=%s' % (itt, cat, str(page)), headers=base_header).content() regex = '<tr class(.+?)</tr>' macth_pre = regex1.findall(x) for items in macth_pre: if stop_all == 1: break regex = 'a href="magnet(.+?)".+?td class="tli".+?title="(.+?)".+?td class="sy">(.+?)<.+?td class="ly">(.+?)<' match = regex2.findall(items) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', items)[0] except: size = 0 for link, title, seed, peer in match: if stop_all == 1: break title = title.replace('view ', '') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append( (title, 'magnet' + o_link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all search_url=clean_name(original_title,1).replace(' ','%20') imdb_id=cache.get(get_imdb, 999,tv_movie,id,table='pages') all_links=[] all_l=[] if 1: x=get_html('https://qazwsxedcrfvtgb.info/show/'+(imdb_id),headers=base_header,timeout=10,verify=False).json() logging.warning(x) for items in x['episodes']: title=clean_name(original_title,1) if tv_movie=='tv': res_c='720' title=title+'.S%sE%s'%(season_n,episode_n) if not(episode==str(items['episode']) and season==str(items['season'])): continue else: res_c='1080' if 'mb_stream' in items: for key in items['mb_stream']: id_lk=items['mb_stream'][key] else: continue link='https://drive.google.com/file/d/'+id_lk+'/view' if '4k' in res_c: res='2160' elif '2160' in res_c: res='2160' elif '1080' in res_c: res='1080' elif '720' in res_c: res='720' elif '480' in res_c: res='480' elif '360' in res_c: res='360' else: res='HD' if 1: all_links.append((title,link,str(0),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all try: unque = urllib.unquote_plus except: unque = urllib.parse.unquote_plus if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + show_original_year ] s_type = 'movies' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'tv' all_links = [] regex = '-- Start of Loop -->(.+?)-- End of Loop -->' regex1 = re.compile(regex, re.DOTALL) regex_pre = '<tr (.+?)</tr>' regex2 = re.compile(regex_pre, re.DOTALL) regex = 'title="Torrent magnet link" href="(.+?)".+?class="cellMainLink">(.+?)<.+?class="nobr center">(.+?)<.+?lass="green center">(.+?)<.+?class="red lasttd center">(.+?)<' regex3 = re.compile(regex, re.DOTALL) for itt in search_url: x = get_html('https://kick4ss.net/usearch/{0}/'.format(itt), headers=base_header, timeout=10).content() regex = '-- Start of Loop -->(.+?)-- End of Loop -->' m = regex1.findall(x) regex_pre = '<tr (.+?)</tr>' m_pre = regex2.findall(m[0]) for items in m_pre: if stop_all == 1: break regex = 'title="Torrent magnet link" href="(.+?)".+?class="cellMainLink">(.+?)<.+?class="nobr center">(.+?)<.+?lass="green center">(.+?)<.+?class="red lasttd center">(.+?)<' macth_pre = regex3.findall(items) for link, title, size, seed, peer in macth_pre: if stop_all == 1: break seed = seed.replace('N/A', '0') peer = peer.replace('N/A', '0') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: f_link = unque(o_link.split('url=')[1]) all_links.append((title, f_link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] url = 'https://bitlordsearch.com' token, cookies = _get_token_and_cookies(url) #headers = { # 'x-request-token': token, # 'cookie': cookies #} headers = { 'authority': 'bitlordsearch.com', 'pragma': 'no-cache', 'cache-control': 'no-cache', 'accept': '*/*', 'dnt': '1', 'x-request-token': token, 'x-requested-with': 'XMLHttpRequest', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36', 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'origin': 'https://bitlordsearch.com', 'sec-fetch-site': 'same-origin', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'cookie': cookies, } if tv_movie == 'tv': if Addon.getSetting('debrid_select') == '0': query = [ clean_name(original_title, 1) + '+s%s' % (season_n), clean_name(original_title, 1) + '+s%se%s' % (season_n, episode_n), clean_name(original_title, 1) + '+season+' + season ] query = [clean_name(original_title, 1) + '+season+' + season] else: query = [ clean_name(original_title, 1) + '+s%se%s' % (season_n, episode_n) ] else: query = [clean_name(original_title, 1) + ' ' + show_original_year] for qrr in query: data = { 'query': qrr, 'offset': 0, 'limit': 99, 'filters[field]': 'seeds', 'filters[sort]': 'desc', 'filters[time]': 4, 'filters[category]': 3 if tv_movie == 'movie' else 4, 'filters[adult]': False, 'filters[risky]': False } response = get_html("https://bitlordsearch.com" + "/get_list", data=data, headers=headers, timeout=10).json() for el in response['content']: try: size = int(el['size']) if size == 0: continue else: if size < 120 and el['source'] == 'thePirateBay': size = size * 1024 elif size > 122880: size = int(size / 1024) elif size < 120: continue size = size / 1000 except: pass if 1: #check and check1: max_size = int(Addon.getSetting("size_limit")) title = el['name'] if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' if size < max_size: all_links.append( (el['name'], el['magnet'], str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'tv': cat = '205' elif tv_movie == 'movie': cat = '201' else: cat = '0' if tv_movie == 'movie': search_url = [('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower()] elif tv_movie == 'tv': if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower(), ('%s+s%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n)).lower(), ('%s+season+%s' % (clean_name(original_title, 1).replace( ' ', '+'), season)).lower() ] else: search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower() ] regex_pre = '<tr (.+?)</tr>' regex1 = re.compile(regex_pre, re.DOTALL) seed_t = '' f_seeds = False use_debrid = Addon.getSetting('debrid_use') == 'true' if (Addon.getSetting('torrents') == 'true' and use_debrid == False): f_seeds = True seed_t = 'S: >>' regex = '<a title="Magnet link".+?href="(.+?)">.+?class="progress-bar prog-blue prog-l.+?>(.+?).+?title="Seeders: (.+?) \| Leechers: (.+?)"' regex2 = re.compile(regex, re.DOTALL) for itt in search_url: x = get_html( 'https://zooqle.torrentbay.to//search?q={0}+%2Blang%3Aen'.format( itt, cat), headers=base_header, timeout=10).content() regex_pre = '<tr (.+?)</tr>' m_pre = regex1.findall(x) for items in m_pre: match = regex2.findall(items) for links, size, seed, peer in match: if f_seeds: if int(Addon.getSetting('min_seed')) > int(seed): continue seed_t = 'S:%s>>,' % str(seed) size = size.replace(' ', " ") if stop_all == 1: break try: o_size = size size = float( o_size.replace('GiB', '').replace('MiB', '').replace( 'GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size or 'MiB' in o_size: size = size / 1000 except: size = 0 regex = 'dn=(.+?)&' nam = re.compile(regex).findall(links)[0] max_size = int(Addon.getSetting("size_limit")) if '.TS.' in nam: continue if int(size) < max_size: if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' try: nam = urllib.unquote_plus(nam).replace( '[zooqle.com]', '').strip() except: pass all_links.append((seed_t + nam, links, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' ] s_type = 'Movies' type = '207' type2 = '201' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'TV' type = '208' type2 = '205' all_links = [] all_l = [] regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' regex1 = re.compile(regex_pre, re.DOTALL) for itt in search_url: for page in range(0, 7): if stop_all == 1: break x = get_html('https://thepiratebay0.org/search/%s/%s/99/%s' % (itt, str(page), type), headers=base_header, timeout=10).content() regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' m_pre = regex1.findall(x) if len(m_pre) == 0: break for title, link, size, seed, peer in m_pre: if link in all_l: continue all_l.append(link) if stop_all == 1: break size = size.replace(' ', ' ') size = size.replace('GiB', 'GB') size = size.replace('MiB', 'MB') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' regex2 = re.compile(regex_pre, re.DOTALL) for page in range(0, 7): if stop_all == 1: break x = get_html( 'https://www.thepiratebay.com/proxy/go.php?url=search/%s/%s/99/%s' % (search_url, str(page), type2), headers=base_header, timeout=10).content() regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' m_pre = regex2.findall(x) if len(m_pre) == 0: break for title, link, size, seed, peer in m_pre: if stop_all == 1: break if link in all_l: continue all_l.append(link) size = size.replace(' ', ' ') size = size.replace('GiB', 'GB') size = size.replace('MiB', 'MB') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_string = clean_name(original_title, 1).replace( ' ', '+') + '+' + show_original_year else: search_string = clean_name(original_title, 1).replace( ' ', '+') + '+s%se%s' % (season_n, episode_n) ur = 'https://torrz.techpeg.in/torrent?q=rampage&cat=all&inc_wout_cat=1&exc_adult_res=0' headers = { 'x-timestamp': '1620256156', 'x-hash': 'HgyZDm%2Fby2Ec2oEjM2U7OwBIf07IZiqsfT4gATySmsQ%3D', 'x-app-version': '20', 'Connection': 'Keep-Alive', 'Accept-Encoding': 'utf-8', 'User-Agent': 'okhttp/4.2.2', 'X-NewRelic-ID': 'VwYEWVVXABAJVlhRAAIPVV0=' } if 1: y = get_html(ur, headers=headers, timeout=10).json() logging.warning(y) for results in y: if stop_all == 1: break nam = results['name'] o_size = results['size'] try: size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 lk = results['magnet'] if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if (size) < max_size: all_links.append((nam, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages') x = get_html( "https://torrentapi.org/pubapi_v2.php?app_id=me&get_token=get_token", headers=base_header, timeout=10).json() token = x['token'] if tv_movie == 'movie': search_url = [((clean_name(original_title, 1).replace(' ', '%20') + '%20' + show_original_year)).lower()] elif tv_movie == 'tv': if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + 'S' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20' + 's' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20' + 'season ' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + 's' + season_n + 'e' + episode_n ] headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', } for itt in search_url: time.sleep(0.4) ur = 'https://torrentapi.org/pubapi_v2.php?app_id=Torapi&mode=search&search_imdb=%s&token=%s&sort=seeders&ranked=0&limit=100&format=json_extended&search_string=%s' % ( imdb_id, token, itt) y = get_html(ur, headers=headers, timeout=10).json() if 'torrent_results' not in y: continue for results in y['torrent_results']: if stop_all == 1: break nam = results['title'] size = (float(results['size']) / (1024 * 1024 * 1024)) peer = results['leechers'] seed = results['seeders'] links = results['download'] if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if (size) < max_size: all_links.append((nam, links, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'TE': 'Trailers', } from resources.modules.general import cloudflare_request uri = 'https://soap2day.to/search/keyword/%s/' % clean_name( original_title, 1).replace(' ', '%20') a, cook = cloudflare_request(uri, headers=headers) res = '<div class="img-group">(.+?)></h5>' m_pre = re.compile(res, re.DOTALL).findall(a) for items in m_pre: regex = 'style="padding\:3">(.+?)<.+?<h5><a href=(?:"|\')(.+?)(?:"|\')>(.+?)<' m = re.compile(regex, re.DOTALL).findall(items) for yr, lk, nm in m: yr = yr.replace('-', '') if show_original_year in yr and clean_name( original_title, 1).lower() == nm.replace(':', '%3a').lower(): x = get_html('https://soap2day.to' + lk, headers=cook[1], cookies=cook[0]).content() y = x if tv_movie == 'tv': regex = '<h4>Season%s .+?a href="(.+?)">%s.' % (season, episode) mm = re.compile(regex).findall(x)[0] mm = mm.split( '<div class="col-sm-12 col-md-6 col-lg-4 myp1"><a href="' ) llk = mm[len(mm) - 1] y = get_html('https://soap2day.to' + llk, headers=cook[1], cookies=cook[0]).content() regex = 'type="hidden" id="hId" value="(.+?)"' did = re.compile(regex).findall(y)[0] cook[1]['Referer'] = 'https://soap2day.to' + lk data = {'pass': did} if tv_movie == 'tv': added = 'GetEInfoAjax' else: added = 'GetMInfoAjax' response = get_html('https://soap2day.to/home/index/' + added, headers=cook[1], cookies=cook[0], data=data).content() #response,cook2 = cloudflare_request('https://soap2day.to/home/index/'+added, headers=headers, post=data) j_res = json.loads(response) f_link = j_res['val'] if tv_movie == 'tv': title = clean_name(original_title, 1) + '.S%sE%s' % (season_n, episode_n) else: title = clean_name(original_title, 1) if 'subs' in j_res: if (j_res['subs']) != None: title = j_res['subs'][0]['source_file_name'].replace( '.srt', '') try_head = requests.head(f_link, headers=base_header, stream=True, verify=False, timeout=15) f_size2 = 0 if 'Content-Length' in try_head.headers: if int(try_head.headers['Content-Length']) > (1024 * 1024): f_size2 = str( round( float(try_head.headers['Content-Length']) / (1024 * 1024 * 1024), 2)) all_links.append( (title, 'Direct_link$$$' + f_link, str(f_size2), '720')) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all if tv_movie=='movie': search_url=[clean_name(original_title,1).replace(' ','+')+'+'+show_original_year] type='movie' else: if Addon.getSetting('debrid_select')=='0' : search_url=[clean_name(original_title,1).replace(' ','+')+'+s'+season_n+'e'+episode_n,clean_name(original_title,1).replace(' ','+')+'+s'+season_n,clean_name(original_title,1).replace(' ','+')+'+season+'+season] else: search_url=[clean_name(original_title,1).replace(' ','+')+'+s'+season_n+'e'+episode_n] type='television' all_links=[] regex='<tr(.+?)</tr>' regex1=re.compile(regex,re.DOTALL) regex='href="magnet(.+?)".+?title="(.+?)".+?class="is-hidden-touch">(.+?)<.+?green.+?>(.+?)<.+?red.+?>(.+?)<' regex2=re.compile(regex,re.DOTALL) for itt in search_url: for page in range(1,4): if stop_all==1: break x=get_html('https://skytorrents.net/?search=%s&page=%s'%(itt,str(page)),headers=base_header).content() macth_pre=regex1.findall(x) for items in macth_pre: regex='href="magnet(.+?)".+?title="(.+?)".+?class="is-hidden-touch">(.+?)<.+?green.+?>(.+?)<.+?red.+?>(.+?)<' macth=regex2.findall(items) if stop_all==1: break for link,title,size,seed,peer in macth: if stop_all==1: break if '4k' in title: res='2160' elif '2160' in title: res='2160' elif '1080' in title: res='1080' elif '720' in title: res='720' elif '480' in title: res='480' elif '360' in title: res='360' else: res='HD' o_link=link try: o_size=size size=float(o_size.replace('GB','').replace('MB','').replace(",",'').strip()) if 'MB' in o_size: size=size/1000 except: size=0 max_size=int(Addon.getSetting("size_limit")) if size<max_size: all_links.append((title.replace('using magnet link','').strip(),'magnet'+link,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] allow_debrid = True search_url = ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower() x = get_html('https://showrss.info/browse', headers=base_header, timeout=10).content() regex_pre = 'option value="(.+?)">(.+?)<' m_pre = re.compile(regex_pre, re.DOTALL).findall(x) found = False for idd, title in m_pre: if title.lower() == clean_name( original_title, 1).lower() or title.lower() == (clean_name( original_title, 1).lower() + ' (%s)' % show_original_year): found = True break if found: x = get_html('https://showrss.info/browse/' + idd, headers=base_header, timeout=10).content() regex = '<li><a href="(.+?)".+?title="(.+?)"' m_pre = re.compile(regex, re.DOTALL).findall(x) for lk, ti in m_pre: seed = '0' peer = '0' if stop_all == 1: break size = '0' nam = ti if '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' if 's%se%s ' % (season_n, episode_n) not in ti.lower( ) and 's%se%s.' % (season_n, episode_n) not in ti.lower(): continue if 'upcoming' in lk: continue all_links.append((ti, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' ] s_type = 'Movies' type = '207' type2 = '201' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'TV' type = '208' type2 = '205' seed = '' f_seeds = False use_debrid = Addon.getSetting('debrid_use') == 'true' if (Addon.getSetting('torrents') == 'true' and use_debrid == False): f_seeds = True seed = 'S: >>' all_links = [] all_l = [] id_table = ['20', '4', '7', '24', '2', '32', '13'] for idd in id_table: for itt in search_url: if stop_all == 1: break try: x = get_html( 'http://157.230.67.147/t_api/simplehtmldom_1_5/my_parsers/scraping/my_scraper3.php?query=%s&sort=0&category=0&page=0&adult=0&key=halyoa&concurrent=0&provider_ids[]=%s' % (itt, idd), headers=base_header, timeout=10).json() except: continue for items in x['results']: title = items['title'] link = items['magnet'] size = items['size'] if f_seeds: seed = items['seeds'].replace(",", "") if int(Addon.getSetting('min_seed')) > int(seed): continue seed = 'S:%s>>,' % str(seed) if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = float( o_size.replace('GB', '').replace('MB', '').replace( ",", '').strip()) / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((seed + title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '+') + '+' + show_original_year ] type = 'movie' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '+') + '+s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '+') + '+s' + season_n, clean_name(original_title, 1).replace(' ', '+') + '+season+' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '+') + '+s' + season_n + 'e' + episode_n ] type = 'television' #from resources.modules.cfscrape.cfscrape_solver import solve_challenge all_links = [] # Or: scraper = cloudscraper.CloudScraper() # CloudScraper inherits from requests.Session #x= scraper.get('https://btdb.io/search/arrow/') #cook=x.cookies #head=x.headers for itt in search_url: for page in range(1, 4): if stop_all == 1: break logging.warning('Start Sky') x = get_html('https://btdb.eu/search/%s/?sort=popular&page=%s' % (itt, str(page)), headers=base_header).content() logging.warning('got Sky') regex = '<li class="recent-item">(.+?)</i></a>' macth_pre = re.compile(regex, re.DOTALL).findall(x) for items in macth_pre: regex = 'title="(.+?)".+?Size.+?>(.+?)<.+?Seeders.+?>(.+?)<.+?Leechers.+?>(.+?)<.+?href="magnet(.+?)"' macth = re.compile(regex, re.DOTALL).findall(items) if stop_all == 1: break for title, size, seed, peer, link in macth: if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' if ' anal ' in title.lower( ) or 'deepthroat' in title.lower( ) or 'f**k' in title.lower() or 'p**n' in title.lower( ) or 'sex' in title.lower() or 'xxx' in title.lower(): continue o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append( (title.replace('using magnet link', '').strip(), 'magnet' + link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all x = get_html('http://www.magnetdl.com/', headers=base_header, timeout=10).content() regex = 'type="hidden" name="m" value="(.+?)"' match = re.compile(regex).findall(x)[0] all_links = [] if tv_movie == 'movie': search_url = [('%s-%s' % (clean_name(original_title, 1).replace( ' ', '-'), show_original_year)).lower()] else: if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower(), ('%s-s%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n)).lower(), ('%s-season-%s' % (clean_name(original_title, 1).replace( ' ', '-'), season)).lower() ] else: search_url = [ ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower() ] x = get_html('http://www.magnetdl.com/search/?q=%s&m=%s' % (search_url, match), headers=base_header).geturl() regex = '//www.magnetdl.com/(.+?)/' letter = re.compile(regex).findall(x)[0] regex = '<tr>(.+?)</tr>' regex1 = re.compile(regex) regex = '<td class="m"><a href="(.+?)".+?a href.+?title="(.+?)".+?class=".+?">(.+?)</td><td>.+?</td><td>(.+?)</td><td class="s">(.+?)</td><td class="l">(.+?)<' regex2 = re.compile(regex) for itt in search_url: for page in range(1, 4): x = get_html('http://www.magnetdl.com/%s/%s/se/desc/%s/' % (letter, itt, str(page)), headers=base_header, timeout=10).content() regex = '<tr>(.+?)</tr>' macth_pre = regex1.findall(x) for items in macth_pre: if stop_all == 1: break regex = '<td class="m"><a href="(.+?)".+?a href.+?title="(.+?)".+?class=".+?">(.+?)</td><td>.+?</td><td>(.+?)</td><td class="s">(.+?)</td><td class="l">(.+?)<' match = regex2.findall(items) for link, title, type, size, seed, peer in match: if stop_all == 1: break if type.lower() == tv_movie.lower(): if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace( ",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, o_link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all, all_links all_links = [] if tv_movie == 'tv': url = domain_s + 'solarmovie.id/search/%s' % (original_title.replace( "Marvel's ", '').replace("%20", "+").replace(" ", "+") + '+season+' + season) else: url = domain_s + 'solarmovie.id/search/%s' % (original_title.replace( "%20", "+").replace(" ", "+") + '+' + show_original_year) headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', #'Host': 'solarmovie.id', 'Pragma': 'no-cache', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0', } html = get_html(url, headers=headers).content() regex = 'data-id="(.+?)".+?data-href="(.+?)".+?data-name="(.+?)"' match = re.compile(regex, re.DOTALL).findall(html) for id, link, name in match: logging.warning(name) check = False if tv_movie == 'tv': if 'Season ' + season in name: x = get_html('https://solarmovie.id' + link, headers=headers).content() regex = 'data-ep-id="(.+?)".+?href="(.+?)".+?title="(.+?)"' match_ep = re.compile(regex, re.DOTALL).findall(x) check = False for id_ep, link_ep, name_ep in match_ep: if 'Episode %s:' % episode_n in name_ep: id_s = id_ep check = True else: check = True if clean_name(original_title, 1).lower() in name.lower() and check == True: link = 'https://solarmovie.id' + link logging.warning(link) if tv_movie == 'movie': resolve_solaris(link, 'getmovie', '0', '0', original_title, id, '0') else: resolve_solaris(link, 'getEpisodeEmb', season, episode_n, original_title, id, id_s) return global_var