def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if Addon.getSetting("provider.furk") == 'false': return [] all_links = [] search_name = _search_name(tv_movie, str(show_original_year), season, episode, clean_name(original_title, 1)) files = Furk.search(search_name) headers = { 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Sec-Fetch-Site': 'none', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-User': '******', 'Sec-Fetch-Dest': 'document', 'Accept-Language': 'en-US,en;q=0.9', } for items in files: if 'is_ready' in items: if items['is_ready'] == '1': if stop_all == 1: break title = items['name'] if items['type'] == 'video': if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' lk = items['url_pls'] #head=urllib.urlencode(headers) #lk=lk+"|"+head size = float(int(items['size'])) / 1073741824 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all import xbmc global_var = [] if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + show_original_year ] s_type = 'movies' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'tv' all_links = [] thread = [] for itt in search_url: x = requests.get('http://tse-api.gianlu.xyz/search?q=' + (itt), headers=base_header, timeout=10).json() for items in x['result']: if stop_all == 1: break ur = ('http://tse-api.gianlu.xyz/getTorrent?e=%s&url=%s' % (items['engine'], items['url'].encode('base64'))).replace( ' ', '').replace('\n', '').replace('\r', '').replace('\t', '') thread.append(Thread(get_results, ur, all_links)) thread[len(thread) - 1].setName('fill_table') thread[len(thread) - 1].start() still_alive = True while (still_alive): still_alive = False for trd in thread: if trd.isAlive(): still_alive = True if stop_all == 1: break xbmc.sleep(100) return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_url = ( '%s %s' % (clean_name(original_title, 1), show_original_year)).lower() else: search_url = ( '%s s%se%s' % (clean_name(original_title, 1), season_n, episode_n)).lower() for page in range(0, 4): params = ( ('sort', 'seeders'), ('q', search_url), ('category', 'all'), ('skip', str(page * 40)), ('fuv', 'yes'), ) x = get_html('https://solidtorrents.net/api/v1/search', headers=base_header, params=params, timeout=10).json() for items in x['results']: if stop_all == 1: break link = items['magnet'] size = float(items['size']) / (1024 * 1024 * 1024) title = items['title'] if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all all_links=[] if tv_movie=='tv': search_url=('{0}%20s{1}e{2}'.format(clean_name(original_title,1).replace(' ','%20'),season_n,episode_n)).lower() else: search_url=clean_name(original_title,1).replace(' ','%20')+'%20'+show_original_year if 1: x=get_html('https://api.magsearch.net/search?keywords=%s&itemn=200&start=0&filetype=video&sortby=hot&userid=99999999999999999999999999999999'%(search_url.replace(' ','%20')),headers=base_header,timeout=10).json() max_size=int(Addon.getSetting("size_limit")) dev_num=1024*1024*1024 for items in x: title=items['name'] lk=items['url'] size=(float(items['length'])/dev_num) if int(size)<max_size: if '2160' in title: res='2160' if '1080' in title: res='1080' elif '720' in title: res='720' elif '480' in title: res='480' elif '360' in title: res='360' else: res='HD' all_links.append((title,lk,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': return [] imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages') allow_debrid = True search_url = ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower() for pages in range(0, 3): x = get_html( 'https://eztv.re/api/get-torrents?imdb_id=%s&limit=100&page=%s' % (imdb_id.replace('tt', ''), str(pages)), headers=base_header, timeout=10).json() max_size = int(Addon.getSetting("size_limit")) dev_num = 1024 * 1024 * 1024 for items in x['torrents']: title = items['filename'] if 's%se%s.' % (season_n, episode_n) not in title.lower(): continue lk = items['magnet_url'] size = (float(items['size_bytes']) / dev_num) if int(size) < max_size: if '2160' in title: res = '2160' if '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_results(ur, all_links): global global_var, stop_all y = requests.get(ur, headers=base_header, timeout=10).content try: y = json.loads(y) except: return if stop_all == 1: return title = y['title'] link = y['magnet'] size = (float(y['size']) / (1024 * 1024 * 1024)) if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'tv': cat = '205' elif tv_movie == 'movie': cat = '201' else: cat = '0' if tv_movie == 'movie': search_url = [('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower()] elif tv_movie == 'tv': if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower(), ('%s+s%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n)).lower(), ('%s+season+%s' % (clean_name(original_title, 1).replace( ' ', '+'), season)).lower() ] else: search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower() ] regex_pre = '<tr (.+?)</tr>' regex1 = re.compile(regex_pre, re.DOTALL) seed_t = '' f_seeds = False use_debrid = Addon.getSetting('debrid_use') == 'true' if (Addon.getSetting('torrents') == 'true' and use_debrid == False): f_seeds = True seed_t = 'S: >>' regex = '<a title="Magnet link".+?href="(.+?)">.+?class="progress-bar prog-blue prog-l.+?>(.+?).+?title="Seeders: (.+?) \| Leechers: (.+?)"' regex2 = re.compile(regex, re.DOTALL) for itt in search_url: x = get_html( 'https://zooqle.torrentbay.to//search?q={0}+%2Blang%3Aen'.format( itt, cat), headers=base_header, timeout=10).content() regex_pre = '<tr (.+?)</tr>' m_pre = regex1.findall(x) for items in m_pre: match = regex2.findall(items) for links, size, seed, peer in match: if f_seeds: if int(Addon.getSetting('min_seed')) > int(seed): continue seed_t = 'S:%s>>,' % str(seed) size = size.replace(' ', " ") if stop_all == 1: break try: o_size = size size = float( o_size.replace('GiB', '').replace('MiB', '').replace( 'GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size or 'MiB' in o_size: size = size / 1000 except: size = 0 regex = 'dn=(.+?)&' nam = re.compile(regex).findall(links)[0] max_size = int(Addon.getSetting("size_limit")) if '.TS.' in nam: continue if int(size) < max_size: if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' try: nam = urllib.unquote_plus(nam).replace( '[zooqle.com]', '').strip() except: pass all_links.append((seed_t + nam, links, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all try: que = urllib.quote_plus except: que = urllib.parse.quote_plus all_links = [] domains = ['torrentdownloads.me', 'torrentdownloads.info'] search = '{0}/rss.xml?new=1&type=search&cid={1}&search={2}' for domain in domains: try: url = 'https://%s' % domain result = client.request(url, timeout='10') search_n = re.findall('alt="Torrent Downloads"', result, re.DOTALL)[0] if search_n: break except Exception: pass if tv_movie == 'tv': cid = '8' if Addon.getSetting('debrid_select') == '0': search_sting = [ clean_name(original_title, 1).replace(' ', '+') + '+s%se%s' % (season_n, episode_n), clean_name(original_title, 1).replace(' ', '+') + '+s%s' % (season_n), clean_name(original_title, 1).replace(' ', '+') + '+season+%s' % (season) ] else: search_sting = [ clean_name(original_title, 1).replace(' ', '+') + '+s%se%s' % (season_n, episode_n) ] else: cid = '4' search_sting = [ clean_name(original_title, 1).replace(' ', '+') + '+%s' % (show_original_year) ] regex = '<item>(.+?)</item' data_regex = re.compile(regex, re.DOTALL) regex = '<title>(.+?)<.+?<size>(.+?)<.+?<info_hash>(.+?)<' data_regex2 = re.compile(regex, re.DOTALL) for itt in search_sting: url_f = search.format(url, cid, itt) x = get_html(url_f, headers=base_header).content() m_pre = data_regex.findall(x) count = 0 for items in m_pre: count += 1 m = data_regex2.findall(items) for title, size, hash in m: size = float(int(size)) / 1073741824 lk = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, que(title)) if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all all_links=[] if tv_movie=='movie': return [] allow_debrid=True search_url=('%s-s%se%s'%(clean_name(original_title,1).replace(' ','-'),season_n,episode_n)).lower() x=requests.get('https://eztv.io/search/{0}'.format(search_url),headers=base_header,timeout=10).content regex_pre='<tr name="hover"(.+?)</tr>' m_pre=re.compile(regex_pre,re.DOTALL).findall(x) for items in m_pre: regex='<td class="forum_thread_post".+?class="epinfo">(.+?)<.+?a href="(.+?)".+?<td align="center" class="forum_thread_post">(.+?)<.+?<td align="center" class="forum_thread_post_end"><font color="green">(.+?)<' m2=re.compile(regex,re.DOTALL).findall(items) if len (m2)==0: regex='<td class="forum_thread_post".+?class="epinfo">(.+?)<.+?a href="(.+?)".+?<td align="center" class="forum_thread_post">(.+?)<.+?<td align="center" class="forum_thread_post_end">(.+?)<' m2=re.compile(regex,re.DOTALL).findall(items) for title,links,size,seed in m2: seed=seed.replace('-','0') peer=0 if stop_all==1: break size=size.replace(' '," ") try: o_size=size size=float(o_size.replace('GiB','').replace('MiB','').replace('GB','').replace('MB','').replace(",",'').strip()) if 'MB' in o_size or 'MiB' in o_size: size=size/1000 except: size=0 regex='dn=(.+?)&' nam=title max_size=int(Addon.getSetting("size_limit")) if '.TS.' in nam: continue if int(size)<max_size: if '1080' in nam: res='1080' elif '720' in nam: res='720' elif '480' in nam: res='480' elif '360' in nam: res='360' else: res='HD' if clean_name(original_title,1).lower() not in title.lower(): continue if 0:#allow_debrid: x=requests.get('https://eztv.io'+links,headers=base_header,timeout=10).content regex='"magnet(.+?)"' mm=re.compile(regex).findall(x) if len(mm)==0: continue lk='magnet'+mm[0] else: lk=links all_links.append((title,lk,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [clean_name(original_title, 1).replace(' ', '%20')] s_type = 'Movies' type = '207' type2 = '201' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'TV' type = '208' type2 = '205' all_links = [] all_l = [] idd_table = ['6', '8'] for idd in idd_table: for itt in search_url: if stop_all == 1: break x = get_html( 'https://torrmonk.in/api/torrent/search?ID=%s&query=%s' % (str(idd), itt), headers=base_header, timeout=10).json() if 'errorCode' in x: if x['errorCode'] == 404: continue continue if 'data' not in x: continue if len(x['data']) == 0: continue if 'result' not in x['data']: continue if len(x['data']['result']) == 0: continue for items in x['data']['result']: title = items['name'] link = items['magnetLink'] size = items['size'] if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GiB', '').replace('MiB', '').replace(",", '').strip()) if 'MiB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages') if 1: x = get_html('http://movies-v2.api-fetch.sh/%s/%s' % (tv_movie.replace('tv', 'show'), imdb_id), headers=base_header).json() if 'episodes' in x: for items in x['episodes']: if int(season) != int(items['season']) or int(episode) != int( items['episode']): continue for items2 in items['torrents']: if stop_all == 1: break link = items['torrents'][items2]['url'] if link == None: continue name = original_title seed = items['torrents'][items2]['seeds'] peer = items['torrents'][items2]['peers'] size = 0 if stop_all == 1: break if '4k' in items2: res = '2160' elif '2160' in items2: res = '2160' elif '1080' in items2: res = '1080' elif '720' in items2: res = '720' elif '480' in items2: res = '480' elif '360' in items2: res = '360' else: res = 'HD' try: size = (float(size) / (1024 * 1024 * 1024)) except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((name, link, str(size), res)) global_var = all_links else: for items in x['torrents']['en']: if stop_all == 1: break link = x['torrents']['en'][items]['url'] if link == None: continue name = original_title seed = x['torrents']['en'][items]['seed'] peer = x['torrents']['en'][items]['peer'] size = x['torrents']['en'][items]['size'] if stop_all == 1: break if '4k' in items: res = '2160' elif '2160' in items: res = '2160' elif '1080' in items: res = '1080' elif '720' in items: res = '720' elif '480' in items: res = '480' elif '360' in items: res = '360' else: res = 'HD' try: size = (float(size) / (1024 * 1024 * 1024)) except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((name, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_string = clean_name(original_title, 1).replace( ' ', '+') + '+' + show_original_year else: search_string = clean_name(original_title, 1).replace( ' ', '+') + '+s%se%s' % (season_n, episode_n) ur = 'https://torrz.techpeg.in/torrent?q=rampage&cat=all&inc_wout_cat=1&exc_adult_res=0' headers = { 'x-timestamp': '1620256156', 'x-hash': 'HgyZDm%2Fby2Ec2oEjM2U7OwBIf07IZiqsfT4gATySmsQ%3D', 'x-app-version': '20', 'Connection': 'Keep-Alive', 'Accept-Encoding': 'utf-8', 'User-Agent': 'okhttp/4.2.2', 'X-NewRelic-ID': 'VwYEWVVXABAJVlhRAAIPVV0=' } if 1: y = get_html(ur, headers=headers, timeout=10).json() logging.warning(y) for results in y: if stop_all == 1: break nam = results['name'] o_size = results['size'] try: size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 lk = results['magnet'] if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if (size) < max_size: all_links.append((nam, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all try: que = urllib.quote_plus except: que = urllib.parse.quote_plus if tv_movie == 'movie': search_url = clean_name(original_title, 1).replace(' ', '%20') + '%20' s_type = 'Movies' type = '207' type2 = '201' else: return [] all_links = [] all_l = [] idd_table = ['3', '7'] if 1: x = get_html( 'https://yts.mx/api/v2/list_movies.json?query_term=%s&page=1&limit=300&order_by=desc&sort_by=rating' % (search_url), headers=base_header, timeout=10, verify=False).json() for items in x['data']['movies']: title = items['slug'].replace('-', '.') for te in items['torrents']: hash = te['hash'] link = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, que(title)) size = te['size'] res = te['quality'].replace('p', '') o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] imdb_id = cache.get(get_imdb, 999, tv_movie, id, table='pages') x = get_html( "https://torrentapi.org/pubapi_v2.php?app_id=me&get_token=get_token", headers=base_header, timeout=10).json() token = x['token'] if tv_movie == 'movie': search_url = [((clean_name(original_title, 1).replace(' ', '%20') + '%20' + show_original_year)).lower()] elif tv_movie == 'tv': if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + 'S' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20' + 's' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20' + 'season ' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + 's' + season_n + 'e' + episode_n ] headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', } for itt in search_url: time.sleep(0.4) ur = 'https://torrentapi.org/pubapi_v2.php?app_id=Torapi&mode=search&search_imdb=%s&token=%s&sort=seeders&ranked=0&limit=100&format=json_extended&search_string=%s' % ( imdb_id, token, itt) y = get_html(ur, headers=headers, timeout=10).json() if 'torrent_results' not in y: continue for results in y['torrent_results']: if stop_all == 1: break nam = results['title'] size = (float(results['size']) / (1024 * 1024 * 1024)) peer = results['leechers'] seed = results['seeders'] links = results['download'] if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if (size) < max_size: all_links.append((nam, links, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': cat = 'movies' search_url = [('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower()] else: cat = 'tv' if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower(), ('%s+s%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n)).lower(), ('%s-season-%s' % (clean_name(original_title, 1).replace( ' ', '+'), season)).lower() ] else: search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower() ] regex = '<tr class(.+?)</tr>' regex1 = re.compile(regex, re.DOTALL) regex = 'a href="magnet(.+?)".+?td class="tli".+?title="(.+?)".+?td class="sy">(.+?)<.+?td class="ly">(.+?)<' regex2 = re.compile(regex, re.DOTALL) for itt in search_url: for page in range(1, 4): x = get_html( 'https://extratorrent2.unblockninja.com/search/?search=%s&x=0&y=0&category=%s&page=%s' % (itt, cat, str(page)), headers=base_header).content() regex = '<tr class(.+?)</tr>' macth_pre = regex1.findall(x) for items in macth_pre: if stop_all == 1: break regex = 'a href="magnet(.+?)".+?td class="tli".+?title="(.+?)".+?td class="sy">(.+?)<.+?td class="ly">(.+?)<' match = regex2.findall(items) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', items)[0] except: size = 0 for link, title, seed, peer in match: if stop_all == 1: break title = title.replace('view ', '') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append( (title, 'magnet' + o_link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] search_url = ( '%s s%se%s' % (clean_name(original_title, 1), season_n, episode_n)).lower() if 1: x = get_html('http://eztv.show/?s=' + search_url.replace(' ', '%20'), headers=base_header, timeout=10).content() logging.warning('http://eztv.show/?s=' + search_url) regex = 'h2 class="entry-title"><a href="(.+?)".+?>(.+?)<' macth_pre = re.compile(regex).findall(x) regex = 'strong>Download Torrent: </strong> <a href="(.+?)"' regex1 = re.compile(regex) for link, title in macth_pre: if clean_name(original_title, 1).lower() in title.lower( ) and 's%se%s' % (season_n, episode_n) in title.lower(): y = get_html(link, headers=base_header, timeout=10).content() regex = 'strong>Download Torrent: </strong> <a href="(.+?)"' lk = regex1.findall(y) if len(lk) > 0: lk = lk[0] if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: regex = 'Filesize:(.+?)<' size = re.compile(regex).findall(y)[0] o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_url = [('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower()] type_s = '1' else: type_s = '41' if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower(), ('%s+s%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n)).lower(), ('%s+season+%s' % (clean_name(original_title, 1).replace( ' ', '+'), season)).lower() ] else: search_url = [ ('%s+s%se%s' % (clean_name(original_title, 1).replace( ' ', '+'), season_n, episode_n)).lower() ] div = 1024 * 1024 * 1024 regex = 'class="showthecross".+?<b>(.+?)<.+?"magnet(.+?)".+?align=\'center\'>(.+?)</td>' regex1 = re.compile(regex, re.DOTALL) for itt in search_url: for page in range(0, 3): x = get_html( 'http://glodls.to/search_results.php?cat=%s&search=%s&sort=seeders&order=desc&page=%s' % (type_s, itt, page), headers=base_header, timeout=10).content() m = regex1.findall(x) for title, link, size in m: if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, 'magnet' + link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all all_links=[] imdb_id=cache.get(get_imdb, 999,tv_movie,id,table='pages') try: que=urllib.quote_plus except: que=urllib.parse.quote_plus seed='' f_seeds=False use_debrid=Addon.getSetting('debrid_use')=='true' if (Addon.getSetting('torrents')=='true' and use_debrid==False): f_seeds=True seed='S: >>' if tv_movie=='movie': ur='https://torrentio.strem.fun/stream/movie/%s.json'%imdb_id elif tv_movie=='tv': ur='https://torrentio.strem.fun/stream/movie/{0}%3A{1}%3A{2}.json'.format(imdb_id,season,episode) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', } if 1: logging.warning(ur) y=get_html(ur,headers=headers,timeout=10).json() for results in y['streams']: nam=results['title'] if f_seeds: regex='👤 (.+?) 💾' seeds=re.compile(regex).findall(nam) if len(seeds)>0: seed=seeds[0] else: continue if int(Addon.getSetting('min_seed'))>int(seed): continue seed='S:%s>>,'%str(seed) if stop_all==1: break regex='💾(.+?)⚙️' s=re.compile(regex).findall(nam) size=0 if len(s)>0: size=float(s[0].replace('GB','').replace('MB','').replace(",",'').strip()) if 'MB' in s: size=size/1000 links=results['infoHash'] try: lk='magnet:?xt=urn:btih:%s&dn=%s'%(links,que(nam)) except: lk='magnet:?xt=urn:btih:%s&dn=%s'%(links,que(nam.encode('utf-8'))) if '4k' in nam: res='2160' elif '2160' in nam: res='2160' elif '1080' in nam: res='1080' elif '720' in nam: res='720' elif '480' in nam: res='480' elif '360' in nam: res='360' else: res='HD' max_size=int(Addon.getSetting("size_limit")) if (size)<max_size: all_links.append((seed+nam,lk,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] if tv_movie == 'movie': search_url = ('%s+%s' % (clean_name(original_title, 1).replace( ' ', '+'), show_original_year)).lower() type = 'load' else: search_url = '%s' % (clean_name(original_title, 1).replace( ' ', '+')).lower() type = 'publ' if 1: x = get_html('https://torrenthood.net/search/?q=%s&m=%s&t=0' % (search_url, type), headers=base_header, timeout=10).content() regex = '<tr>(.+?)</tr>' macth_pre = re.compile(regex, re.DOTALL).findall(x) regex = ' href="(.+?)"' regex1 = re.compile(regex) regex = '<div class="table-row-equal"(.+?)</form>' regex2 = re.compile(regex, re.DOTALL) regex = '<span class="info2">(.+?)<.+?Size: <b>(.+?)<.+?magnet:(.+?)\'' regex3 = re.compile(regex, re.DOTALL) regex = '<div class="table-row-equal">(.+?)</form></div>' regex4 = re.compile(regex, re.DOTALL) regex = '<span class="info2" id="episode-(.+?)".+?onClick="(.+?)"' regex5 = re.compile(regex, re.DOTALL) regex = '<span id="blue">Full Season.+?span class="info4">(.+?)<' regex6 = re.compile(regex, re.DOTALL) for itm in macth_pre: regex = ' href="(.+?)"' ittm = regex1.findall(itm)[0] if clean_name(original_title, 1).lower().replace(' ', '-') not in ittm: continue y = get_html(ittm, headers=base_header, timeout=10).content() if tv_movie == 'movie': regex = '<div class="table-row-equal"(.+?)</form>' m_p = regex2.findall(y) for mpp in m_p: regex = '<span class="info2">(.+?)<.+?Size: <b>(.+?)<.+?magnet:(.+?)\'' m_pre = regex3.findall(mpp) if len(m_pre) == 0: continue nm = m_pre[0][0] title = nm size = m_pre[0][1] lk = 'magnet:' + m_pre[0][2] if len(title) > 0: if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' else: res = '720' try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((nm, lk, str(size), res)) global_var = all_links else: if 'season-%s-' % season not in ittm and '-s%s' % season_n not in ittm: continue regex = '<div class="table-row-equal">(.+?)</form></div>' m_pre = regex4.findall(y) for ittm2 in m_pre: regex = '<span class="info2" id="episode-(.+?)".+?onClick="(.+?)"' m_in = regex5.findall(ittm2) for idd, lk in m_in: if stop_all == 1: break if tv_movie == 'tv': if episode != idd: continue if 'magnet' not in lk: continue regex = '<span id="blue">Full Season.+?span class="info4">(.+?)<' title = regex6.findall(y) lk = lk.replace("self.location='", '').replace("'", '') nm = clean_name(original_title, 1) if len(title) > 0: if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' else: res = '720' regex = 'Download Size\: <b>(.+?) \(per episode\)' size = re.compile(regex).findall(y) if len(size) > 0: try: o_size = size[0] size = float( o_size.replace('GB', '').replace( 'MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 else: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((nm, lk, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all if tv_movie=='movie': search_url=[clean_name(original_title,1).replace(' ','%20')+'%20'] s_type='Movies' type='207' type2='201' else: if Addon.getSetting('debrid_select')=='0' : search_url=[clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n+'e'+episode_n,clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n,clean_name(original_title,1).replace(' ','%20')+'%20season%20'+season] else: search_url=[clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n+'e'+episode_n] s_type='TV' type='208' type2='205' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': 'https://magno.netlify.app/', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', } all_links=[] all_l=[] regex="magnet(.+?)'" regex1=re.compile(regex) for itt in search_url: if stop_all==1: break params = ( ('term', itt), ) try: x = get_html('https://magno.netlify.app/.netlify/functions/x', headers=headers, params=params).json() #x=get_html('https://magno.netlify.app/.netlify/functions/api?keyword=%s'%(itt),headers=base_header,timeout=10).json() except Exception as e: continue logging.warning(x) div_size=1024*1024*1024 for items in x: title=items['title'] link=items['link'] if 'magnet' not in str(link): continue try: link=get_html(link,stream=True).url except Exception as e: regex="magnet(.+?)'" link='magnet'+regex1.findall(str(e))[0] size=items['size'] if stop_all==1: break if '4k' in title: res='2160' elif '2160' in title: res='2160' elif '1080' in title: res='1080' elif '720' in title: res='720' elif '480' in title: res='480' elif '360' in title: res='360' else: res='HD' o_link=link size=(float(size)/(div_size)) max_size=int(Addon.getSetting("size_limit")) if size<max_size: all_links.append((title,link,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' ] s_type = 'Movies' type = '207' type2 = '201' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'TV' type = '208' type2 = '205' all_links = [] all_l = [] regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' regex1 = re.compile(regex_pre, re.DOTALL) for itt in search_url: for page in range(0, 7): if stop_all == 1: break x = get_html('https://thepiratebay0.org/search/%s/%s/99/%s' % (itt, str(page), type), headers=base_header, timeout=10).content() regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' m_pre = regex1.findall(x) if len(m_pre) == 0: break for title, link, size, seed, peer in m_pre: if link in all_l: continue all_l.append(link) if stop_all == 1: break size = size.replace(' ', ' ') size = size.replace('GiB', 'GB') size = size.replace('MiB', 'MB') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' regex2 = re.compile(regex_pre, re.DOTALL) for page in range(0, 7): if stop_all == 1: break x = get_html( 'https://www.thepiratebay.com/proxy/go.php?url=search/%s/%s/99/%s' % (search_url, str(page), type2), headers=base_header, timeout=10).content() regex_pre = 'class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<' m_pre = regex2.findall(x) if len(m_pre) == 0: break for title, link, size, seed, peer in m_pre: if stop_all == 1: break if link in all_l: continue all_l.append(link) size = size.replace(' ', ' ') size = size.replace('GiB', 'GB') size = size.replace('MiB', 'MB') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] url = 'https://bitlordsearch.com' token, cookies = _get_token_and_cookies(url) #headers = { # 'x-request-token': token, # 'cookie': cookies #} headers = { 'authority': 'bitlordsearch.com', 'pragma': 'no-cache', 'cache-control': 'no-cache', 'accept': '*/*', 'dnt': '1', 'x-request-token': token, 'x-requested-with': 'XMLHttpRequest', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36', 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'origin': 'https://bitlordsearch.com', 'sec-fetch-site': 'same-origin', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'cookie': cookies, } if tv_movie == 'tv': if Addon.getSetting('debrid_select') == '0': query = [ clean_name(original_title, 1) + '+s%s' % (season_n), clean_name(original_title, 1) + '+s%se%s' % (season_n, episode_n), clean_name(original_title, 1) + '+season+' + season ] query = [clean_name(original_title, 1) + '+season+' + season] else: query = [ clean_name(original_title, 1) + '+s%se%s' % (season_n, episode_n) ] else: query = [clean_name(original_title, 1) + ' ' + show_original_year] for qrr in query: data = { 'query': qrr, 'offset': 0, 'limit': 99, 'filters[field]': 'seeds', 'filters[sort]': 'desc', 'filters[time]': 4, 'filters[category]': 3 if tv_movie == 'movie' else 4, 'filters[adult]': False, 'filters[risky]': False } response = get_html("https://bitlordsearch.com" + "/get_list", data=data, headers=headers, timeout=10).json() for el in response['content']: try: size = int(el['size']) if size == 0: continue else: if size < 120 and el['source'] == 'thePirateBay': size = size * 1024 elif size > 122880: size = int(size / 1024) elif size < 120: continue size = size / 1000 except: pass if 1: #check and check1: max_size = int(Addon.getSetting("size_limit")) title = el['name'] if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' if size < max_size: all_links.append( (el['name'], el['magnet'], str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] tmdbKey = '653bb8af90162bd98fc7ee32bcbbfb3d' if tv_movie == 'tv': url2 = 'http://api.themoviedb.org/3/tv/%s?api_key=%s&append_to_response=external_ids' % ( id, tmdbKey) else: url2 = 'http://api.themoviedb.org/3/movie/%s?api_key=%s&append_to_response=external_ids' % ( id, tmdbKey) try: imdb_id = get_html(url2, timeout=10).json()['external_ids']['imdb_id'] except: imdb_id = " " if tv_movie == 'tv': x = get_html("https://movies.org/api/releases/tv/%s/%s000%s" % (imdb_id, season, episode_n), headers=base_header, timeout=10).json() else: x = get_html("https://movies.org/api/releases/movie/" + (imdb_id), headers=base_header, timeout=10).json() check_rd = False if Addon.getSetting('debrid_use') == 'true' and Addon.getSetting( 'debrid_select') == '0': from resources.modules import real_debrid rd = real_debrid.RealDebrid() check_rd = True max_size = int(Addon.getSetting("size_limit")) for results in x: nam = results['Release'] link_pre = results['Link'] if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' if 'gounlimited.to' in link_pre: y = get_html(link_pre, headers=base_header, timeout=10).content() regex = "<script type='text/javascript'>(.+?)</script>" m2 = re.compile(regex, re.DOTALL).findall(y)[0] from resources.modules.jsunpack import unpack try: data = unpack(m2) data = re.findall('sources:(\[\{.+?\}\])', data, re.DOTALL)[0] try: data = json.loads(data) except: data = data.replace('file', '"file"').replace('label', '"label"') data = json.loads(data) data = [(i['file']) for i in data if data] except: regex = 'src:"(.+?)"' link = re.compile(regex).findall(data)[0] data = [link] for link_in in data: link = 'Direct_link$$$' + link try: try_head = get_html(link_in, headers=base_header, stream=True, verify=False, timeout=3) if 'Content-Length' in try_head.headers: if int(try_head.headers['Content-Length']) > (1024 * 1024): size = (round( float(try_head.headers['Content-Length']) / (1024 * 1024 * 1024), 2)) except: size = 0 if (size) < max_size: all_links.append((nam, link, str(size), res)) global_var = all_links else: if 'clipwatching.com' in link_pre: y = get_html(link_pre, headers=base_header, timeout=10).content() regex = 'src: "(.+?)"' m2 = re.compile(regex, re.DOTALL).findall(y)[0] all_links.append((nam, 'Direct_link$$$' + m2, str(0), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all try: unque = urllib.unquote_plus except: unque = urllib.parse.unquote_plus if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' + show_original_year ] s_type = 'movies' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'tv' all_links = [] regex = '-- Start of Loop -->(.+?)-- End of Loop -->' regex1 = re.compile(regex, re.DOTALL) regex_pre = '<tr (.+?)</tr>' regex2 = re.compile(regex_pre, re.DOTALL) regex = 'title="Torrent magnet link" href="(.+?)".+?class="cellMainLink">(.+?)<.+?class="nobr center">(.+?)<.+?lass="green center">(.+?)<.+?class="red lasttd center">(.+?)<' regex3 = re.compile(regex, re.DOTALL) for itt in search_url: x = get_html('https://kick4ss.net/usearch/{0}/'.format(itt), headers=base_header, timeout=10).content() regex = '-- Start of Loop -->(.+?)-- End of Loop -->' m = regex1.findall(x) regex_pre = '<tr (.+?)</tr>' m_pre = regex2.findall(m[0]) for items in m_pre: if stop_all == 1: break regex = 'title="Torrent magnet link" href="(.+?)".+?class="cellMainLink">(.+?)<.+?class="nobr center">(.+?)<.+?lass="green center">(.+?)<.+?class="red lasttd center">(.+?)<' macth_pre = regex3.findall(items) for link, title, size, seed, peer in macth_pre: if stop_all == 1: break seed = seed.replace('N/A', '0') peer = peer.replace('N/A', '0') if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: f_link = unque(o_link.split('url=')[1]) all_links.append((title, f_link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id): global global_var,stop_all if tv_movie=='movie': search_url=[clean_name(original_title,1).replace(' ','+')+'+'+show_original_year] type='movie' else: if Addon.getSetting('debrid_select')=='0' : search_url=[clean_name(original_title,1).replace(' ','+')+'+s'+season_n+'e'+episode_n,clean_name(original_title,1).replace(' ','+')+'+s'+season_n,clean_name(original_title,1).replace(' ','+')+'+season+'+season] else: search_url=[clean_name(original_title,1).replace(' ','+')+'+s'+season_n+'e'+episode_n] type='television' all_links=[] regex='<tr(.+?)</tr>' regex1=re.compile(regex,re.DOTALL) regex='href="magnet(.+?)".+?title="(.+?)".+?class="is-hidden-touch">(.+?)<.+?green.+?>(.+?)<.+?red.+?>(.+?)<' regex2=re.compile(regex,re.DOTALL) for itt in search_url: for page in range(1,4): if stop_all==1: break x=get_html('https://skytorrents.net/?search=%s&page=%s'%(itt,str(page)),headers=base_header).content() macth_pre=regex1.findall(x) for items in macth_pre: regex='href="magnet(.+?)".+?title="(.+?)".+?class="is-hidden-touch">(.+?)<.+?green.+?>(.+?)<.+?red.+?>(.+?)<' macth=regex2.findall(items) if stop_all==1: break for link,title,size,seed,peer in macth: if stop_all==1: break if '4k' in title: res='2160' elif '2160' in title: res='2160' elif '1080' in title: res='1080' elif '720' in title: res='720' elif '480' in title: res='480' elif '360' in title: res='360' else: res='HD' o_link=link try: o_size=size size=float(o_size.replace('GB','').replace('MB','').replace(",",'').strip()) if 'MB' in o_size: size=size/1000 except: size=0 max_size=int(Addon.getSetting("size_limit")) if size<max_size: all_links.append((title.replace('using magnet link','').strip(),'magnet'+link,str(size),res)) global_var=all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all all_links = [] try: que = urllib.quote_plus except: que = urllib.parse.quote_plus if tv_movie == 'movie': ur = 'https://solidtorrents.net/api/v1/search?q=%s&category=video&sort=seeders' % ( original_title + '%20' + show_original_year) elif tv_movie == 'tv': ur = 'https://solidtorrents.net/api/v1/search?q=%s&category=video&sort=seeders' % ( original_title + '%20' + 'S%sE%s' % (season_n, episode_n)) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', } if 1: y = get_html(ur, headers=headers, timeout=10).json() div_value = 1024 * 1024 * 1024 for results in y['results']: if stop_all == 1: break nam = results['title'] size = float(results['size']) / div_value links = results['magnet'] if '4k' in nam: res = '2160' elif '2160' in nam: res = '2160' elif '1080' in nam: res = '1080' elif '720' in nam: res = '720' elif '480' in nam: res = '480' elif '360' in nam: res = '360' else: res = 'HD' max_size = int(Addon.getSetting("size_limit")) if (size) < max_size: all_links.append((nam, links, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20' ] s_type = 'Movies' type = '207' type2 = '201' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n, clean_name(original_title, 1).replace(' ', '%20') + '%20season%20' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '%20') + '%20s' + season_n + 'e' + episode_n ] s_type = 'TV' type = '208' type2 = '205' seed = '' f_seeds = False use_debrid = Addon.getSetting('debrid_use') == 'true' if (Addon.getSetting('torrents') == 'true' and use_debrid == False): f_seeds = True seed = 'S: >>' all_links = [] all_l = [] id_table = ['20', '4', '7', '24', '2', '32', '13'] for idd in id_table: for itt in search_url: if stop_all == 1: break try: x = get_html( 'http://157.230.67.147/t_api/simplehtmldom_1_5/my_parsers/scraping/my_scraper3.php?query=%s&sort=0&category=0&page=0&adult=0&key=halyoa&concurrent=0&provider_ids[]=%s' % (itt, idd), headers=base_header, timeout=10).json() except: continue for items in x['results']: title = items['title'] link = items['magnet'] size = items['size'] if f_seeds: seed = items['seeds'].replace(",", "") if int(Addon.getSetting('min_seed')) > int(seed): continue seed = 'S:%s>>,' % str(seed) if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size.decode('utf8', 'ignore') size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = float( o_size.replace('GB', '').replace('MB', '').replace( ",", '').strip()) / 1000 except Exception as e: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((seed + title, link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all if tv_movie == 'movie': search_url = [ clean_name(original_title, 1).replace(' ', '+') + '+' + show_original_year ] type = 'movie' else: if Addon.getSetting('debrid_select') == '0': search_url = [ clean_name(original_title, 1).replace(' ', '+') + '+s' + season_n + 'e' + episode_n, clean_name(original_title, 1).replace(' ', '+') + '+s' + season_n, clean_name(original_title, 1).replace(' ', '+') + '+season+' + season ] else: search_url = [ clean_name(original_title, 1).replace(' ', '+') + '+s' + season_n + 'e' + episode_n ] type = 'television' #from resources.modules.cfscrape.cfscrape_solver import solve_challenge all_links = [] # Or: scraper = cloudscraper.CloudScraper() # CloudScraper inherits from requests.Session #x= scraper.get('https://btdb.io/search/arrow/') #cook=x.cookies #head=x.headers for itt in search_url: for page in range(1, 4): if stop_all == 1: break logging.warning('Start Sky') x = get_html('https://btdb.eu/search/%s/?sort=popular&page=%s' % (itt, str(page)), headers=base_header).content() logging.warning('got Sky') regex = '<li class="recent-item">(.+?)</i></a>' macth_pre = re.compile(regex, re.DOTALL).findall(x) for items in macth_pre: regex = 'title="(.+?)".+?Size.+?>(.+?)<.+?Seeders.+?>(.+?)<.+?Leechers.+?>(.+?)<.+?href="magnet(.+?)"' macth = re.compile(regex, re.DOTALL).findall(items) if stop_all == 1: break for title, size, seed, peer, link in macth: if stop_all == 1: break if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' if ' anal ' in title.lower( ) or 'deepthroat' in title.lower( ) or 'f**k' in title.lower() or 'p**n' in title.lower( ) or 'sex' in title.lower() or 'xxx' in title.lower(): continue o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace(",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append( (title.replace('using magnet link', '').strip(), 'magnet' + link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all x = get_html('http://www.magnetdl.com/', headers=base_header, timeout=10).content() regex = 'type="hidden" name="m" value="(.+?)"' match = re.compile(regex).findall(x)[0] all_links = [] if tv_movie == 'movie': search_url = [('%s-%s' % (clean_name(original_title, 1).replace( ' ', '-'), show_original_year)).lower()] else: if Addon.getSetting('debrid_select') == '0': search_url = [ ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower(), ('%s-s%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n)).lower(), ('%s-season-%s' % (clean_name(original_title, 1).replace( ' ', '-'), season)).lower() ] else: search_url = [ ('%s-s%se%s' % (clean_name(original_title, 1).replace( ' ', '-'), season_n, episode_n)).lower() ] x = get_html('http://www.magnetdl.com/search/?q=%s&m=%s' % (search_url, match), headers=base_header).geturl() regex = '//www.magnetdl.com/(.+?)/' letter = re.compile(regex).findall(x)[0] regex = '<tr>(.+?)</tr>' regex1 = re.compile(regex) regex = '<td class="m"><a href="(.+?)".+?a href.+?title="(.+?)".+?class=".+?">(.+?)</td><td>.+?</td><td>(.+?)</td><td class="s">(.+?)</td><td class="l">(.+?)<' regex2 = re.compile(regex) for itt in search_url: for page in range(1, 4): x = get_html('http://www.magnetdl.com/%s/%s/se/desc/%s/' % (letter, itt, str(page)), headers=base_header, timeout=10).content() regex = '<tr>(.+?)</tr>' macth_pre = regex1.findall(x) for items in macth_pre: if stop_all == 1: break regex = '<td class="m"><a href="(.+?)".+?a href.+?title="(.+?)".+?class=".+?">(.+?)</td><td>.+?</td><td>(.+?)</td><td class="s">(.+?)</td><td class="l">(.+?)<' match = regex2.findall(items) for link, title, type, size, seed, peer in match: if stop_all == 1: break if type.lower() == tv_movie.lower(): if '4k' in title: res = '2160' elif '2160' in title: res = '2160' elif '1080' in title: res = '1080' elif '720' in title: res = '720' elif '480' in title: res = '480' elif '360' in title: res = '360' else: res = 'HD' o_link = link try: o_size = size size = float( o_size.replace('GB', '').replace('MB', '').replace( ",", '').strip()) if 'MB' in o_size: size = size / 1000 except: size = 0 max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, o_link, str(size), res)) global_var = all_links return global_var
def get_links(tv_movie, original_title, season_n, episode_n, season, episode, show_original_year, id): global global_var, stop_all search_url = clean_name(original_title, 1).replace(' ', '%20') all_links = [] all_l = [] if 1: x = requests.get( 'https://theaterplus.xyz/api/get_search_results1/?api_key=dda11uT8cBLzm6a1YvsiUWOEgrFowk95K2DM3tHAPRCX4ypGjN&search=' + (search_url), headers=base_header, timeout=10, verify=False).json() for items in x['posts']: title = str(items['category_name']).replace('-', '.') link = items['channel_url'] if tv_movie == 'movie': res_c = items['channel_name'] if show_original_year not in items['channel_name']: continue else: res_c = link if 'Season %s - Episode %s$$' % ( season, episode) not in items['channel_name'] + '$$': continue title = title + '.S%sE%s' % (season_n, episode_n) if '4k' in res_c: res = '2160' elif '2160' in res_c: res = '2160' elif '1080' in res_c: res = '1080' elif '720' in res_c: res = '720' elif '480' in res_c: res = '480' elif '360' in res_c: res = '360' else: res = 'HD' try_head = requests.get(link, headers=base_header, stream=True, verify=False, timeout=15) size = 0 if 'Content-Length' in try_head.headers: if int(try_head.headers['Content-Length']) > (1024 * 1024): size = float(try_head.headers['Content-Length']) / ( 1024 * 1024 * 1024) max_size = int(Addon.getSetting("size_limit")) if size < max_size: all_links.append((title, link, str(size), res)) global_var = all_links return global_var