def get(self, query): try: query, imdb = query.split('/imdb=') match = re.findall('^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})', query) cookie = self.s.get('https://subztv.online/', headers=self.hdr).cookies cj = requests.utils.dict_from_cookiejar(cookie) if len(match) > 0: title, year = match[0][0], match[0][1] if imdb.startswith('tt'): frame = 'https://subztv.online/view/%s' % imdb r = self.s.get(frame) r = re.sub(r'[^\x00-\x7F]+', ' ', r.content) else: url = 'https://subztv.online/search/%s/movies' % urllib.quote( title) data = self.s.get(url).content data = client.parseDOM(data, 'span', attrs={'class': 'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0]) for i in data if i] frame = [ i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title) ][0] r = self.s.get(frame).text r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r, 'input', ret='value', attrs={'id': 'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') else: title, season, episode = re.findall( '^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0] #xbmc.log('$#$MATCH-SUBZ: %s | %s | %s' % (title, season, episode), xbmc.LOGNOTICE) season, episode = '%01d' % int(season), '%01d' % int(episode) hdlr = 'season-%s-episode-%s' % (season, episode) if imdb.startswith('tt'): r = self.s.get('https://subztv.online/view/%s' % imdb).content # xbmc.log('$#$MATCH-SUBZ-RRR-source: %s' % r) #r = re.sub(r'[^\x00-\x7F]+', ' ', r) frames = client.parseDOM(r, 'a', ret='href') frame = [i for i in frames if hdlr in i][0] else: baseurl = ' https://api.thetvdb.com/login' series_url = 'https://api.thetvdb.com/series/%s' greek_api = 'CAYAM6RT1K2SERUE' user_key = '7F5420E18BAD7762' username = '******' _headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Connection': 'close' } post = { "apikey": greek_api, "username": username, "userkey": user_key } # data = requests.post(baseurl, data=json.dumps(post), headers=_headers).json() data = client.request(baseurl, post=json.dumps(post), headers=_headers) auth = 'Bearer %s' % urllib.unquote_plus( json.loads(data)['token']) _headers['Authorization'] = auth series_data = client.request(series_url % imdb, headers=_headers) imdb = json.loads(series_data)['data']['imdbId'] #xbmc.log('$#$MATCH-SUBZ-RRR-IMDB: %s' % imdb) r = self.s.get('https://subztv.online/view/%s' % imdb).content # xbmc.log('$#$MATCH-SUBZ-RRR-source: %s' % r) #r = re.sub(r'[^\x00-\x7F]+', ' ', r) frames = client.parseDOM(r, 'a', ret='href') frame = [i for i in frames if hdlr in i][0] #xbmc.log('$#$MATCH-SUBZ-λινκ: %s' % frame) r = self.s.get(frame).text r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r, 'input', ret='value', attrs={'id': 'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') except BaseException: return for item in items: try: try: imdb = re.search('\/(tt\d+)\/', frame).groups()[0] except BaseException: imdb = re.search('\/(tt\d+)', frame).groups()[0] data = re.findall( '''downloadMe\(['"](\w+\-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''', str(item), re.I | re.DOTALL)[0] name = data[2] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = 'https://subztv.online/dll/{}/0/{}'.format( data[0], secCode) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') down = data[1] rating = self._rating(down) self.list.append({ 'name': name, 'url': '%s|%s|%s|%s|%s|%s' % (frame.encode('utf-8'), url, cj['__cfduid'], cj['PHPSESSID'], name, imdb), 'source': 'subztv', 'rating': rating }) except BaseException: pass return self.list
def get(self, query): try: query, imdb = query.split('/imdb=') match = re.findall(r'^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})', query) if len(match) > 0: title, year = match[0][0], match[0][1] if imdb.startswith('tt'): url = 'https://yifysubtitles.org/movie-imdb/{}'.format( imdb) r = six.ensure_text(client.request(url)) else: url = urljoin(self.base_link, self.search.format(quote_plus(title))) r = six.ensure_text(client.request(url)) data = client.parseDOM(r, 'div', attrs={ 'class': 'media-body' }) # <div class="media-body"> for i in data: try: name = client.parseDOM(i, 'h3')[0].encode('utf-8') if not cleantitle.get(title) == cleantitle.get( client.replaceHTMLCodes(name)): raise Exception() y = re.search(r'">(\d{4})<small>year</small>', i).groups()[0] if not year == y: raise Exception() url = client.parseDOM(i, 'a', ret='href')[0] url = url.encode('utf-8') url = urljoin(self.base_link, url) r = client.request(url) except BaseException: pass data = client.parseDOM(r, 'tr', attrs={'data-id': r'\d+'}) items = [i for i in data if 'greek' in i.lower()] # xbmc.log('$#$MATCH-YIFI-RRR: %s' % items) urls = [] for item in items: try: # rating = client.parseDOM(item, 'span', attrs={'title': 'rating'})[0] name = client.parseDOM(item, 'a')[0] name = re.sub(r'<.+?>', '', name).replace('subtitle', '') name = client.replaceHTMLCodes(name) url = client.parseDOM(item, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) if six.PY2: url = url.encode('utf-8') name = name.encode('utf-8') urls += [(name, url)] except BaseException: pass else: return self.list except BaseException: return for i in urls: try: r = six.ensure_text( client.request(urljoin(self.base_link, i[1]))) url = client.parseDOM( r, 'a', ret='href', attrs={'class': 'btn-icon download-subtitle'})[0] url = 'https://yifysubtitles.org/' + url if url.startswith( '/') else url self.list.append({ 'name': i[0], 'url': url, 'source': 'yifi', 'rating': '5' }) except BaseException: pass return self.list
def get(self, query): try: match = re.findall('(.+?) \((\d{4})\)/imdb=$', query) if len(match) > 0: title, year = match[0][0], match[0][1] query = ' '.join( urllib.unquote_plus( re.sub('%\w\w', ' ', urllib.quote_plus(title))).split()) url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query) result = client.request(url) result = re.sub(r'[^\x00-\x7F]+', ' ', result) url = client.parseDOM(result, 'section', attrs={'class': 'movies'})[0] url = re.findall('(/movies/\d+)', url) url = [x for y, x in enumerate(url) if x not in url[:y]] url = [urljoin('https://subz.xyz', i) for i in url] url = url[:3] for i in url: c = cache.get(self.cache, 2200, i) if c is not None: if cleantitle.get(c[0]) == cleantitle.get( title) and c[1] == year: try: item = self.r except: item = client.request(i) break else: title, season, episode = re.findall( '(.+?) S(\d+)E(\d+)/imdb=$', query)[0] season, episode = '%01d' % int(season), '%01d' % int(episode) query = ' '.join( urllib.unquote_plus( re.sub('%\w\w', ' ', urllib.quote_plus(title))).split()) url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query) result = client.request(url) result = re.sub(r'[^\x00-\x7F]+', ' ', result) url = client.parseDOM(result, 'section', attrs={'class': 'tvshows'})[0] url = re.findall('(/series/\d+)', url) url = [x for y, x in enumerate(url) if x not in url[:y]] url = [urljoin('https://subz.xyz', i) for i in url] url = url[:3] for i in url: c = cache.get(self.cache, 2200, i) if c is not None: if cleantitle.get(c[0]) == cleantitle.get(title): item = i break item = '%s/seasons/%s/episodes/%s' % (item, season, episode) item = client.request(item) item = re.sub(r'[^\x00-\x7F]+', ' ', item) items = client.parseDOM(item, 'tr', attrs={'data-id': '.+?'}) except: return for item in items: try: r = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1] url = client.parseDOM(r, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.replace("'", "").encode('utf-8') name = url.split('/')[-1].strip() name = re.sub('\s\s+', ' ', name) name = name.replace('_', '').replace('%20', '.') name = client.replaceHTMLCodes(name) name = name.encode('utf-8') self.list.append({ 'name': name, 'url': url, 'source': 'subzxyz', 'rating': 5 }) except: pass return self.list
def get(self, query): try: query, imdb = query.split('/imdb=') match = re.findall('^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})', query) #xbmc.log('$#$MATCH-YIFI: %s' % match, xbmc.LOGNOTICE) if len(match) > 0: title, year = match[0][0], match[0][1] if imdb.startswith('tt'): r = client.request(self.base_link + 'movie-imdb/%s' % imdb) else: url = urlparse.urljoin( self.base_link, self.search) % urllib.quote_plus(title) r = client.request(url) data = client.parseDOM(r, 'div', attrs={'id': 'content'})[0] data = client.parseDOM(data, 'li', attrs={'class': 'movie-wrapper'}) for i in data: try: name = client.parseDOM(i, 'span', attrs={'class': 'title' })[0].encode('utf-8') if not cleantitle.get(title) == cleantitle.get( client.replaceHTMLCodes(name)): raise Exception() y = client.parseDOM( i, 'span', attrs={'class': 'wrap-enlarge year'})[0] y = re.search('(\d{4})', y).groups()[0] if not year == y: raise Exception() url = client.parseDOM(i, 'a', ret='href')[0] url = url.encode('utf-8') url = urlparse.urljoin(self.base_link, url) r = client.request(url) except BaseException: pass data = client.parseDOM(r, 'li', attrs={'data-id': '\d+'}) items = [i for i in data if 'greek' in i.lower()] urls = [] for item in items: try: rating = client.parseDOM(item, 'span', attrs={'title': 'rating'})[0] name = client.parseDOM(item, 'span', attrs={'class': 'subdesc'})[0].replace( 'subtitle', '') name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = client.parseDOM(item, 'a', ret='href', attrs={'class': 'subtitle-page'})[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') urls += [(name, url, rating)] except BaseException: pass else: return self.list except BaseException: return for i in urls: try: r = client.request(urlparse.urljoin(self.base_link, i[1])) url = client.parseDOM( r, 'a', ret='href', attrs={'class': 'dl-button blue download-subtitle'})[0] self.list.append({ 'name': i[0], 'url': url, 'source': 'yifi', 'rating': '5' }) except BaseException: pass return self.list
def get(self, query): try: query, imdb = query.split('/imdb=') match = re.findall('^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})', query) #xbmc.log('$#$MATCH-SUBZ: %s' % match, xbmc.LOGNOTICE) if len(match) > 0: title, year = match[0][0], match[0][1] cj = requests.get('https://subztv.online/rainbow/master-js', headers=self.hdr).cookies if imdb.startswith('tt'): r = requests.get('https://subztv.online/view/%s' % imdb, headers=self.hdr, cookies=cj).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) else: url = 'https://subztv.online/search/%s/movies' % urllib.quote( title) data = requests.get(url, headers=self.hdr).content data = client.parseDOM(data, 'span', attrs={'class': 'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0]) for i in data if i] link = [ i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title) ][0] cj = requests.get( 'https://subztv.online/rainbow/master-js', headers=self.hdr).cookies r = requests.get(link, headers=self.hdr, cookies=cj).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r, 'input', ret='value', attrs={'id': 'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') else: title, season, episode = re.findall( '^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0] xbmc.log( '$#$MATCH-SUBZ: %s | %s | %s' % (title, season, episode), xbmc.LOGNOTICE) season, episode = '%01d' % int(season), '%01d' % int(episode) hdlr = 'season-%s-episode-%s' % (season, episode) cj = requests.get('https://subztv.online/rainbow/master-js', headers=self.hdr).cookies xbmc.log('$#$MATCH-SUBZ-ΨΟΟΚΙΕΣ: %s' % hdlr) xbmc.log('$#$MATCH-SUBZ-IMDB: %s' % imdb) if imdb.startswith('tt'): xbmc.log('$#$MALAKASSSSSS') r = requests.get('https://subztv.online/view/%s' % imdb, headers=self.hdr).content # xbmc.log('$#$MATCH-SUBZ-RRR-source: %s' % r) r = re.sub(r'[^\x00-\x7F]+', ' ', r) frames = client.parseDOM(r, 'a', ret='href') frame = [i for i in frames if hdlr in i][0] xbmc.log('$#$MATCH-SUBZ-IMDB: %s' % frame) else: url = 'https://subztv.online/search/%s/tv' % urllib.quote( title) data = requests.get(url, headers=self.hdr).text data = re.sub(r'[^\x00-\x7F]+', ' ', data) # xbmc.log('$#$MATCH-SUBZ-DATA-source: %s' % data) data = client.parseDOM(data, 'span', attrs={'class': 'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0]) for i in data if i] xbmc.log('$#$MATCH-SUBZ-DATA-list: %s' % data) try: frame = [i[1] for i in data if hdlr in i[1]][0] xbmc.log('$#$MATCH-SUBZ-TRYYYY: %s' % frame) except BaseException: link = [ i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title) ][0] xbmc.log('$#$MATCH-SUBZ-EXCEPT: %s' % link) r = requests.get(link, headers=self.hdr, cookies=cj).text r = re.sub(r'[^\x00-\x7F]+', ' ', r) url = client.parseDOM( r, 'section', ) url = [i for i in url if 'sessaon' in i][0] url = client.parseDOM(url, 'a', ret='href') frame = [i for i in url if hdlr in i][0] xbmc.log('$#$MATCH-SUBZ-LINKKK: %s' % frame) r = requests.get(frame, headers=self.hdr).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r, 'input', ret='value', attrs={'id': 'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') except BaseException: return for item in items: try: data = re.compile( '''downloadMe\(['"](\w+\-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''', re.I | re.DOTALL).findall(str(item))[0] name = data[2] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = 'https://subztv.online/' + 'dll/' + data[ 0] + '/0/' + secCode url = client.replaceHTMLCodes(url) url = url.encode('utf-8') down = data[1] rating = self._rating(down) self.list.append({ 'name': name, 'url': '%s|%s|%s|%s' % (url, name, cj['PHPSESSID'], imdb), 'source': 'subztv', 'rating': rating }) except BaseException: pass return self.list
def get(self, query): try: match = re.findall('(.+?) \((\d{4})\)/imdb=(.+?)$', query) if len(match) > 0: title, year, imdb = match[0][0], match[0][1], match[0][2] cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies if imdb.startswith('tt'): r = requests.get('https://subztv.club/view/%s' % imdb, headers=self.hdr, cookies=cj).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) else: url = 'https://subztv.club/search/%s/movies' % urllib.quote(title) data = requests.get(url, headers=self.hdr).content data = client.parseDOM(data, 'span', attrs={'class': 'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0]) for i in data if i] link = [i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title)][0] cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies r = requests.get(link, headers=self.hdr, cookies=cj).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r,'input', ret='value', attrs={'id':'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') else: title, season, episode = re.findall('(.+?) S(\d+)E(\d+)/imdb=', query)[0] season, episode = '%01d' % int(season), '%01d' % int(episode) hdlr = 'season-%s-episode-%s' % (season, episode) url = 'https://subztv.club/search/%s/tv' % urllib.quote(title) data = requests.get(url, headers=self.hdr).content data = client.parseDOM(data, 'span', attrs={'class':'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0])for i in data if i] try: url = [i[1] for i in data if hdlr in i[1]][0] except: link = [i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title)][0] cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies r = requests.get(link, headers=self.hdr, cookies=cj).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) url = client.parseDOM(r, 'section',) url = [i for i in url if 'sessaon' in i][0] url = client.parseDOM(url, 'a', ret='href') url = [i for i in url if hdlr in i][0] imdb = re.findall('(tt\d+)', url)[0] cj = requests.get('https://subztv.club/rainbow/master-js', headers=self.hdr).cookies r = requests.get(url, headers=self.hdr, cookies=cj).content r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r,'input', ret='value', attrs={'id':'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') except: return for item in items: try: data = re.compile('''downloadMe\(['"](\w+\-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''', re.I|re.DOTALL).findall(str(item))[0] name = data[2] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = 'https://subztv.club/' + 'dll/' + data[0] + '/0/' + secCode url = client.replaceHTMLCodes(url) url = url.encode('utf-8') down = data[1] rating = self._rating(down) self.list.append({'name': name, 'url': '%s|%s|%s|%s' % (url, name, cj['PHPSESSID'], imdb), 'source': 'subztv', 'rating': rating}) except: pass return self.list
def get(self, query): try: query, imdb = query.split('/imdb=') match = re.findall(r'^(?P<title>.+)[\s+\(|\s+](?P<year>\d{4})', query) # xbmc.log('MATCH: {}'.format(match)) cookie = self.s.get(self.baseurl, headers=self.hdr) cj = requests.utils.dict_from_cookiejar(cookie.cookies) if len(match) > 0: title, year = match[0][0], match[0][1] if imdb.startswith('tt'): frame = self.baseurl + 'view/{}'.format(imdb) r = self.s.get(frame).text if six.PY2: r = re.sub(r'[^\x00-\x7F]+', ' ', r) # try: # r = r.decode('utf-8', errors='replace') # except AttributeError: # pass else: url = self.baseurl + 'search/{}/movies'.format( quote(title)) data = self.s.get(url).text data = client.parseDOM(data, 'span', attrs={'class': 'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0]) for i in data if i] frame = [ i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title) ][0] r = self.s.get(frame).text if six.PY2: r = re.sub(r'[^\x00-\x7F]+', ' ', r) # try: # r = r.decode('utf-8', errors='replace') # except AttributeError: # pass secCode = client.parseDOM(r, 'input', ret='value', attrs={'id': 'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] # xbmc.log('ITEMS: {}'.format(items)) items = client.parseDOM(items, 'tr') else: title, season, episode = re.findall( r'^(?P<title>.+)\s+S(\d+)E(\d+)', query, re.I)[0] hdlr = 'season-{}-episode-{}'.format(int(season), int(episode)) if imdb.startswith('tt'): r = self.s.get(self.baseurl + 'view/{}'.format(imdb)).text # r = re.sub(r'[^\x00-\x7F]+', ' ', r) frames = client.parseDOM(r, 'a', ret='href') link = [i for i in frames if hdlr in i] if not link: frame = self.baseurl + 'view/{}'.format(imdb) else: frame = link[0] else: if len(imdb) > 1: baseurl = ' https://api.thetvdb.com/login' series_url = 'https://api.thetvdb.com/series/%s' greek_api = '7d4261794838bb48a3122381811ecb42' user_key = 'TJXB86PGDBYN0818' username = '******' _headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Connection': 'close' } post = { "apikey": greek_api, "username": username, "userkey": user_key } # data = requests.post(baseurl, data=json.dumps(post), headers=_headers).json() data = client.request(baseurl, post=json.dumps(post), headers=_headers) auth = 'Bearer {}'.format( unquote_plus(json.loads(data)['token'])) _headers['Authorization'] = auth series_data = client.request(series_url % imdb, headers=_headers) imdb = json.loads(series_data)['data']['imdbId'] r = self.s.get(self.baseurl + 'view/{}'.format(imdb)).text # r = re.sub(r'[^\x00-\x7F]+', ' ', r) frames = client.parseDOM(r, 'a', ret='href') frame = [i for i in frames if hdlr in i][0] else: url = self.baseurl + 'search/{}/tv'.format( quote(title)) data = self.s.get(url).text data = client.parseDOM(data, 'span', attrs={'class': 'h5'}) data = [(client.parseDOM(i, 'a')[0], client.parseDOM(i, 'a', ret='href')[0]) for i in data if i] serie_link = [ i[1] for i in data if cleantitle.get(i[0]) == cleantitle.get(title) ][0] imdbid = re.findall(r'\/(tt\d+)\/', serie_link)[0] r = self.s.get(self.baseurl + 'view/{}'.format(imdbid)).text frames = client.parseDOM(r, 'a', ret='href') frame = [i for i in frames if hdlr in i][0] frame = client.replaceHTMLCodes(frame) frame = six.ensure_text(frame, encoding='utf-8') r = self.s.get(frame).text # r = re.sub(r'[^\x00-\x7F]+', ' ', r) secCode = client.parseDOM(r, 'input', ret='value', attrs={'id': 'secCode'})[0] items = client.parseDOM(r, 'tbody')[0] items = client.parseDOM(items, 'tr') # xbmc.log('ITEMS: {}'.format(items)) except BaseException: return for item in items: try: item = six.ensure_str(item, encoding='utf-8') # xbmc.log('$#$MATCH-SUBZ-ITEM: {}'.format(item)) try: imdb = re.search(r'\/(tt\d+)\/', str(frame)).groups()[0] except BaseException: imdb = re.search(r'\/(tt\d+)', str(frame)).groups()[0] data = re.findall( r'''downloadMe\(['"](\w+-\w+).+?label.+?>(\d+).+?<td>(.+?)</td''', item, re.I | re.DOTALL)[0] name = data[2] name = client.replaceHTMLCodes(name) url = self.baseurl + 'dll/{}/0/{}'.format(data[0], secCode) url = client.replaceHTMLCodes(url) url = six.ensure_str(url, encoding='utf-8') url = six.ensure_str(url, encoding='utf-8') name = six.ensure_str(name) down = data[1] rating = str(self._rating(down)) self.list.append({ 'name': name, 'url': '{}|{}|{}|{}|{}'.format(frame, url, cj['PHPSESSID'], name, imdb), 'source': 'subztv', 'rating': rating }) except BaseException: pass return self.list