def yandex(url): try: cookie = client.request(url, output='cookie') r = client.request(url, cookie=cookie) r = re.sub(r'[^\x00-\x7F]+', ' ', r) sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0] idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0] idclient = binascii.b2a_hex(os.urandom(16)) post = { 'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring } post = urllib.urlencode(post) r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie) r = json.loads(r) url = r['models'][0]['data']['file'] return url except: return
def play_list(self, url): try: result = client.request(url) result = json.loads(result) items = result['items'] except: pass for i in range(1, 5): try: if not 'nextPageToken' in result: raise Exception() next = url + '&pageToken=' + result['nextPageToken'] result = client.request(next) result = json.loads(result) items += result['items'] except: pass for item in items: try: title = item['snippet']['title'] title = title.encode('utf-8') url = item['id'] url = url.encode('utf-8') image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception() image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def sourcesResolve(self, item, info=False): try: self.url = None u = url = item['url'] d = item['debrid'] ; direct = item['direct'] provider = item['provider'] call = [i[1] for i in self.sourceDict if i[0] == provider][0] u = url = call.resolve(url) if url == None or not '://' in str(url): raise Exception() url = url[8:] if url.startswith('stack:') else url urls = [] for part in url.split(' , '): u = part if not d == '': part = debrid.resolver(part, d) elif not direct == True: hmf = urlresolver.HostedMediaFile(url=u, include_disabled=True, include_universal=False) if hmf.valid_url() == True: part = hmf.resolve() urls.append(part) url = 'stack://' + ' , '.join(urls) if len(urls) > 1 else urls[0] if url == False or url == None: raise Exception() ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower() if ext == 'rar': raise Exception() try: headers = url.rsplit('|', 1)[1] except: headers = '' headers = urllib.quote_plus(headers).replace('%3D', '=') if ' ' in headers else headers headers = dict(urlparse.parse_qsl(headers)) if url.startswith('http') and '.m3u8' in url: result = client.request(url.split('|')[0], headers=headers, output='geturl', timeout='20') if result == None: raise Exception() elif url.startswith('http'): result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20') if result == None: raise Exception() self.url = url return url except: if info == True: self.errorForSources() return
def request(url, check, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, timeout='30'): try: r = client.request(url, close=close, redirect=redirect, proxy=proxy, post=post, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, timeout=timeout) if r == None and error == False: return r if check in str(r) or str(r) == '': return r proxies = sorted(get(), key=lambda x: random.random()) proxies = sorted(proxies, key=lambda x: random.random()) proxies = proxies[:3] for p in proxies: p += urllib.quote_plus(url) if not post == None: p += urllib.quote_plus('?%s' % post) r = client.request(p, close=close, redirect=redirect, proxy=proxy, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, timeout='20') if check in str(r) or str(r) == '': return r except: pass
def searchShow(self, title, season, year): try: title = cleantitle.normalize(title) t = cleantitle.get(title) url = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus( cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season))))) r = client.request(url, timeout='10') r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) if r: r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2]) ][0] else: url = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus( cleantitle.query( '%s %01d' % (title.replace('\'', '-'), int(year))))) r = client.request(url, timeout='10') r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) return url.encode('utf-8') except: return
def kickinradio(): try: url = 'https://www.internet-radio.com/stations/' result = client.request(url) items = client.parseDOM(result, 'dt', attrs={'style': 'font-size: 22px;'}) except: return for item in items: try: url = client.parseDOM(item, 'a', ret="href")[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') addCategoryItem('[UPPERCASE]' + url[10:-1] + '[/UPPERCASE]', 'kickinradiocats', radiocasticon, radiocastfanart, url=url) except: pass endDirectory()
def odnoklassniki(url): try: url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0] url = 'http://ok.ru/dk?cmd=videoPlayerMetadata&mid=%s' % url result = client.request(url) result = re.sub(r'[^\x00-\x7F]+',' ', result) result = json.loads(result)['videos'] try: hd = [{'quality': '4K', 'url': i['url']} for i in result if i['name'] == 'ultra'] except: pass try: hd += [{'quality': '1440p', 'url': i['url']} for i in result if i['name'] == 'quad'] except: pass try: hd += [{'quality': '1080p', 'url': i['url']} for i in result if i['name'] == 'full'] except: pass try: hd += [{'quality': 'HD', 'url': i['url']} for i in result if i['name'] == 'hd'] except: pass try: sd = [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'sd'] except: pass try: sd += [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'low'] except: pass try: sd += [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'lowest'] except: pass try: sd += [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'mobile'] except: pass url = hd + sd[:1] if not url == []: return url except: return
def vk(url): try: query = urlparse.parse_qs(urlparse.urlparse(url).query) try: oid, video_id = query['oid'][0], query['id'][0] except: oid, video_id = re.findall('\/video(.*)_(.*)', url)[0] sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id) html = client.request(sources_url) html = re.sub(r'[^\x00-\x7F]+', ' ', html) sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html) if not sources: sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html) sources = [(i[0], i[1].replace('\\', '')) for i in sources] sources = dict(sources) url = [] try: url += [{'quality': 'HD', 'url': sources['720']}] except: pass try: url += [{'quality': 'SD', 'url': sources['540']}] except: pass try: url += [{'quality': 'SD', 'url': sources['480']}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': sources['360']}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': sources['240']}] except: pass if not url == []: return url except: return
def kickinradiocats(url): try: url = urlparse.urljoin('https://www.internet-radio.com', url) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'col-md-7'}) a = client.parseDOM(result, 'h4', attrs={'class': 'text-danger'}) b = client.parseDOM(result, 'samp') items = zip(a, b) except: return for item in items: try: try: a = client.parseDOM(item[0], 'a')[0] except: a = '' try: b = [ i for i in client.parseDOM(item[0], 'a', ret='href') [0].split('/') if not i == '' ][-1] except: b = '' if not a == '': name = a elif not b == '': name = b else: name = item[0] name = name.capitalize() name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = item[1].split() url = [i for i in url if i.startswith('http')][0] url = re.sub('[0-9a-zA-Z]+\.pls(?:.+|)|\.m3u(?:.+|)', '', url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') addDirectoryItem(name, url, '0', radiocasticon, radiocastfanart) except: pass try: next = client.parseDOM(result, 'ul', attrs={'class': 'pagination'}) next = client.parseDOM(next, 'li', attrs={'class': 'next'}) next = client.parseDOM(next, 'a', ret='href')[0] next = client.replaceHTMLCodes(next) next = next.encode('utf-8') addCategoryItem('[B][I]NEXT[/I][/B]', 'kickinradiocats', radiocasticon, radiocastfanart, url=next) except: pass endDirectory()
def radioResolve(url): url = radio1fmResolve(url) url = client.request(url, output='geturl') title = control.infoLabel('ListItem.Label') image = control.infoLabel('ListItem.Icon') meta = {'title': title, 'album': title, 'artist': title, 'comment': title} item = control.item(path=url, iconImage=image, thumbnailImage=image) item.setArt({'icon': image}) item.setInfo(type='Music', infoLabels=meta) control.player.play(url, item)
def geturl(url): try: r = client.request(url, output='geturl') if r == None: return r host1 = re.findall('([\w]+)[.][\w]+$', urlparse.urlparse(url.strip().lower()).netloc)[0] host2 = re.findall('([\w]+)[.][\w]+$', urlparse.urlparse(r.strip().lower()).netloc)[0] if host1 == host2: return r proxies = sorted(get(), key=lambda x: random.random()) proxies = sorted(proxies, key=lambda x: random.random()) proxies = proxies[:3] for p in proxies: p += urllib.quote_plus(url) r = client.request(p, output='geturl') if not r == None: return parse(r) except: pass
def googlepass(url): try: try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None url = url.split('|')[0].replace('\\', '') url = client.request(url, headers=headers, output='geturl') if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') if headers: url += '|%s' % urllib.urlencode(headers) return url except: return
def _getAniList(url): try: url = urlparse.urljoin('https://anilist.co', '/api%s' % url) return client.request(url, headers={ 'Authorization': '%s %s' % cache.get(_getToken, 1), 'Content-Type': 'application/x-www-form-urlencoded' }) except: pass
def resolve(url): try: if '/vod/' in url: url = re.compile('/(\d+)').findall(url)[-1] url = 'http://www.filmon.com/vod/info/%s' % url elif '/tv/' in url: url = url.replace('/tv/', '/channel/') elif not '/channel/' in url: raise Exception() headers = {'X-Requested-With': 'XMLHttpRequest'} cookie = client.request(url, output='cookie') cid = client.request(url, headers=headers) cid = json.loads(cid)['id'] headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url} url = 'http://www.filmon.com/ajax/getChannelInfo?channel_id=%s' % cid result = client.request(url, cookie=cookie, headers=headers) result = json.loads(result) try: result = result['streams'] except: result = result['data']['streams'] result = [i[1] for i in result.items()] url = [(i['url'], int(i['watch-timeout'])) for i in result] url = [i for i in url if '.m3u8' in i[0]] url.sort() url = url[-1][0] return url except: return
def _getToken(): result = urllib.urlencode({ 'grant_type': 'client_credentials', 'client_id': 'kodiexodus-7erse', 'client_secret': 'XelwkDEccpHX2uO8NpqIjVf6zeg' }) result = client.request( 'https://anilist.co/api/auth/access_token', post=result, headers={'Content-Type': 'application/x-www-form-urlencoded'}, error=True) result = utils.json_loads_as_str(result) return result['token_type'], result['access_token']
def episodeAbsoluteNumber(self, thetvdb, season, episode): try: url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ( 'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode)) r = client.request(url) episode = client.parseDOM(r, 'absolute_number')[0] return int(episode) except: pass return episode
def radio1fmResolve(url): try: domain = (urlparse.urlparse(url).netloc).lower() if not domain == 'rad.io': return url url = client.request(url, headers={ 'User-Agent': base64.b64decode('WEJNQyBBZGRvbiBSYWRpbw==') }) url = json.loads(url)['streamURL'] return url except: return
def search(self, url): try: query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] url = self.search_link % urllib.quote_plus(query) + self.key_link result = client.request(url) items = json.loads(result)['items'] items = [(i['id']['videoId']) for i in items] for url in items: url = self.resolve(url) if not url is None: return url except: return
def cldmailru(url): try: v = url.split('public')[-1] r = client.request(url) r = re.sub(r'[^\x00-\x7F]+',' ', r) tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0] url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0] url = '%s%s?key=%s' % (url, v, tok) return url except: return
def getTrakt(url, post=None): try: url = urlparse.urljoin('http://api-v2launch.trakt.tv', url) headers = { 'Content-Type': 'application/json', 'trakt-api-key': 'c029c80fd3d3a5284ee820ba1cf7f0221da8976b8ee5e6c4af714c22fc4f46fa', 'trakt-api-version': '2' } if not post == None: post = json.dumps(post) result = client.request(url, post=post, headers=headers) return result except: pass
def getOriginalTitle(self, imdb): try: tmdb_link = base64.b64decode( 'aHR0cHM6Ly9hcGkudGhlbW92aWVkYi5vcmcvMy9maW5kLyVzP2FwaV9rZXk9MTBiYWIxZWZmNzZkM2NlM2EyMzQ5ZWIxMDQ4OTRhNmEmbGFuZ3VhZ2U9ZW4tVVMmZXh0ZXJuYWxfc291cmNlPWltZGJfaWQ=' ) t = client.request(tmdb_link % imdb, timeout='10') try: title = json.loads(t)['movie_results'][0]['original_title'] except: pass try: title = json.loads(t)['tv_results'][0]['original_name'] except: pass title = cleantitle.normalize(title) return title except: return
def radio1fm(): try: url = 'http://rad.io/info/index/searchembeddedbroadcast?q=1%20FM&streamcontentformats=aac%2Cmp3&start=0&rows=1000' result = client.request( url, headers={ 'User-Agent': base64.b64decode('WEJNQyBBZGRvbiBSYWRpbw==') }) index = [] items = json.loads(result) except: return for item in items: try: name = item['name'] if not name.lower().startswith('1.fm'): raise Exception() name = name.split('-', 1)[-1].strip().capitalize() name = name.encode('utf-8') url = item['id'] url = 'http://rad.io/info/broadcast/getbroadcastembedded?broadcast=%s' % url url = url.encode('utf-8') index.append({ 'name': name, 'url': url, 'thumb': '0', 'image': radio1fmicon, 'fanart': radio1fmfanart }) except: pass index = [i for x, i in enumerate(index) if i not in index[x + 1:]] index = sorted(index, key=lambda k: k['name']) for i in index: addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'], i['fanart']) endDirectory()
def radio181fm(): try: url = 'http://www.181.fm/index.php?p=mp3links' result = client.request(url) index = [] items = client.parseDOM(result, 'td', attrs={'id': 'rightlinks'}) except: pass for item in items: try: if not item.startswith('http://'): raise Exception() name = items[:items.index(item)] name = [i for i in name if not 'http://' in i][-1] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = item.split('<')[0].replace('///', '://') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') index.append({ 'name': name, 'url': url, 'thumb': '0', 'image': radio181fmicon, 'fanart': radio181fmfanart }) except: pass index = [i for x, i in enumerate(index) if i not in index[x + 1:]] index = sorted(index, key=lambda k: k['name']) for i in index: addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'], i['fanart']) endDirectory()
def resolve(self, url): try: id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split( '&')[0] result = client.request('http://www.youtube.com/watch?v=%s' % id) message = client.parseDOM(result, 'div', attrs={'id': 'unavailable-submessage'}) message = ''.join(message) alert = client.parseDOM(result, 'div', attrs={'id': 'watch7-notification-area'}) if len(alert) > 0: raise Exception() if re.search('[a-zA-Z]', message): raise Exception() url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id return url except: return
def searchMovie(self, title, year): try: title = cleantitle.normalize(title) url = urlparse.urljoin( self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-')))) r = client.request(url, timeout='10') t = cleantitle.get(title) r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) return url.encode('utf-8') except: return
def google(url): try: netloc = urlparse.urlparse(url.strip().lower()).netloc netloc = netloc.split('.google')[0] if netloc == 'docs' or netloc == 'drive': url = url.split('/preview', 1)[0] url = url.replace('drive.google.com', 'docs.google.com') headers = {'User-Agent': client.agent()} result = client.request(url, output='extended', headers=headers) try: headers['Cookie'] = result[2]['Set-Cookie'] except: pass result = result[0] if netloc == 'docs' or netloc == 'drive': result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0] result = json.loads(result) result = [i.split('|')[-1] for i in result.split(',')] result = sum([googletag(i) for i in result], []) elif netloc == 'photos': result = result.replace('\r','').replace('\n','').replace('\t','') result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0] result = result.replace('\\u003d','=').replace('\\u0026','&') result = re.compile('url=(.+?)&').findall(result) result = [urllib.unquote(i) for i in result] result = [googletag(i)[0] for i in result] elif netloc == 'picasaweb': id = re.compile('#(\d*)').findall(url)[0] result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1) result = json.loads(result)['feed']['entry'] if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0] elif len(result) == 1: result = result[0] result = result['media']['content'] result = [i['url'] for i in result if 'video' in i['type']] result = sum([googletag(i) for i in result], []) elif netloc == 'plus': id = (urlparse.urlparse(url).path).split('/')[-1] result = result.replace('\r','').replace('\n','').replace('\t','') result = result.split('"%s"' % id)[-1].split(']]')[0] result = result.replace('\\u003d','=').replace('\\u0026','&') result = re.compile('url=(.+?)&').findall(result) result = [urllib.unquote(i) for i in result] result = [googletag(i)[0] for i in result] url = [] try: url += [[i for i in result if i['quality'] == '1080p'][0]] except: pass try: url += [[i for i in result if i['quality'] == 'HD'][0]] except: pass try: url += [[i for i in result if i['quality'] == 'SD'][0]] except: pass for i in url: i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)}) if url == []: return return url except: return
def video_list(self, cid, url, pagination): try: result = client.request(url) result = json.loads(result) items = result['items'] except: pass for i in range(1, 5): try: if pagination == True: raise Exception() if not 'nextPageToken' in result: raise Exception() page = url + '&pageToken=' + result['nextPageToken'] result = client.request(page) result = json.loads(result) items += result['items'] except: pass try: if pagination == False: raise Exception() next = cid + '&pageToken=' + result['nextPageToken'] except: next = '' for item in items: try: title = item['snippet']['title'] title = title.encode('utf-8') try: url = item['snippet']['resourceId']['videoId'] except: url = item['id']['videoId'] url = url.encode('utf-8') image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception() image = image.encode('utf-8') append = {'title': title, 'url': url, 'image': image} if not next == '': append['next'] = next self.list.append(append) except: pass try: u = [ range(0, len(self.list))[i:i + 50] for i in range(len(range(0, len(self.list))))[::50] ] u = [','.join([self.list[x]['url'] for x in i]) for i in u] u = [self.content_link % i + self.key_link for i in u] threads = [] for i in range(0, len(u)): threads.append(workers.Thread(self.thread, u[i], i)) self.data.append('') [i.start() for i in threads] [i.join() for i in threads] items = [] for i in self.data: items += json.loads(i)['items'] except: pass for item in range(0, len(self.list)): try: vid = self.list[item]['url'] self.list[item]['url'] = self.play_link % vid d = [(i['id'], i['contentDetails']) for i in items] d = [i for i in d if i[0] == vid] d = d[0][1]['duration'] duration = 0 try: duration += 60 * 60 * int(re.findall('(\d*)H', d)[0]) except: pass try: duration += 60 * int(re.findall('(\d*)M', d)[0]) except: pass try: duration += int(re.findall('(\d*)S', d)[0]) except: pass duration = str(duration) self.list[item]['duration'] = duration except: pass return self.list
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data['url'] episode = int(data['episode']) if url == None: return sources url = urlparse.urljoin(self.base_link, url) p = client.request(url, timeout='10') if episode > 0: r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r] r = [i[0] for i in r if int(i[1]) == episode][0] p = client.request(r, timeout='10') p = re.findall('load_player\((\d+)\)', p) p = urllib.urlencode({'id': p[0]}) headers = {'Referer': url} r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3') r = client.request(r, post=p, headers=headers, XHR=True, timeout='10') url = json.loads(r)['value'] url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10') if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url: sources.append({ 'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) raise Exception() r = client.request(url, headers=headers, XHR=True, timeout='10') try: src = json.loads(r)['playlist'][0]['sources'] links = [i['file'] for i in src if 'file' in i] for i in links: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def thread(self, url, i): try: result = client.request(url) self.data[i] = result except: return