def convert(self, items_list, thumb_quality='medium'): for item in items_list[:-1]: title = item['snippet']['title'] url = self.play_link.format(py3_dec(item['id']['videoId'])) image = py3_dec( item['snippet']['thumbnails'][thumb_quality]['url']) plot = item['snippet']['description'] data = {'title': title, 'url': url, 'image': image, 'plot': plot} self.list.append(data) return self.list
def worker(self, name, url, append_string=''): try: if url.startswith(self.base_link): url = self.resolve(url) if url is None: raise Exception return url elif not url.startswith('http://'): url = self.play_link.format(py3_dec(url)) url = self.resolve(url) if url is None: raise Exception return url else: raise Exception except Exception: query = ' '.join([name, append_string]) query = self.youtube_search.format(py2_enc(query)) url = self.search(query) if url is None: return return url
def resolve(self, url): try: vid = url.split('?v=')[-1].split('/')[-1].split('?')[0].split( '&')[0] url = self.play_link.format(py3_dec(vid)) return url except Exception: return
def __init__(self): self.list = [] self.data = [] self.main_youtube_id = 'UCfU04d4DbqpyotwfgxRS6EQ' self.main_playlist_id = 'UUfU04d4DbqpyotwfgxRS6EQ' self.scramble = ( 'eJwVy80KgjAAAOBXkZ1TdCrTbmIhogVhYHUR24Yzl1ubP1n07uH9+75AU6zoALYGaNLkUJ6YyXEWeTebDZdsHqGHwcYAtWyrji4ri9JPXS' 'yxSooS7eTcPsg9z0O2XI/v86vak1HESPBgXS1ZA7Rtzw2RGyAfmRPjyPFdSBWRsCGOpoSzafJF1wVKt8SqpdRWI0TD6aipwqIfaD9YWDzB' '7w/HIjj4') self.live_url = 'https://s1.cystream.net/live/faros1/playlist.m3u8' self.live_url_2 = 'https://s1.cystream.net/live/faros2/playlist.m3u8' self.radio_url = 'http://176.31.183.51:8300' self.key = json.loads(decompress(py3_dec(b64decode( self.scramble))))['api_key']
def __init__(self, key='', api_key_setting='yt_api_key', replace_url=True, max_workers=5): self.list = [] self.data = [] self.max_workers = max_workers self.base_link = 'https://www.youtube.com/' self.base_addon = 'plugin://plugin.video.youtube/' self.google_base_link = 'https://www.googleapis.com/youtube/v3/' self.key_link = '&key={0}'.format( py3_dec(key) or control.setting(api_key_setting)) self.playlists_link = ''.join([ self.google_base_link, 'playlists?part=snippet&maxResults=50&channelId={}' ]) self.playlist_link = ''.join([ self.google_base_link, 'playlistItems?part=snippet&maxResults=50&playlistId={}' ]) self.videos_link = ''.join([ self.google_base_link, 'search?part=snippet&order=date&maxResults=50&channelId={}' ]) self.content_link = ''.join( [self.google_base_link, 'videos?part=contentDetails&id={}']) self.search_link = ''.join([ self.google_base_link, 'search?part=snippet&type=video&maxResults=5&q={}' ]) self.youtube_search = ''.join([self.google_base_link, 'search?q={}']) if not replace_url: self.play_link = ''.join([self.base_link, 'watch?v={}']) else: self.play_link = ''.join([self.base_addon, 'play/?video_id={}'])
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', username=None, password=None, verify=True, as_bytes=False, allow_caching=True): try: url = url.decode('utf-8') except Exception: pass if isinstance(post, dict): post = bytes(urlencode(post), encoding='utf-8') elif isinstance(post, str) and is_py3: post = bytes(post, encoding='utf-8') try: handlers = [] if username is not None and password is not None and not proxy: passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm() passmgr.add_password(None, uri=url, user=username, passwd=password) handlers += [urllib2.HTTPBasicAuthHandler(passmgr)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if proxy is not None: if username is not None and password is not None: if is_py3: passmgr = urllib2.HTTPPasswordMgr() passmgr.add_password(None, uri=url, user=username, passwd=password) else: passmgr = urllib2.ProxyBasicAuthHandler() passmgr.add_password(None, uri=url, user=username, passwd=password) handlers += [ urllib2.ProxyHandler({'http': '{0}'.format(proxy)}), urllib2.HTTPHandler, urllib2.ProxyBasicAuthHandler(passmgr) ] else: handlers += [ urllib2.ProxyHandler({'http': '{0}'.format(proxy)}), urllib2.HTTPHandler ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if output == 'cookie' or output == 'extended' or close is not True: cookies = cookielib.LWPCookieJar() handlers += [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if not verify or ((2, 7, 8) < sys.version_info < (2, 7, 12)): try: ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) except Exception: pass try: headers.update(headers) except Exception: headers = {} if 'User-Agent' in headers: pass elif mobile is not True: if allow_caching: from tulip import cache headers['User-Agent'] = cache.get(randomagent, 12) else: headers['User-Agent'] = CHROME else: if allow_caching: from tulip import cache headers['User-Agent'] = cache.get(random_mobile_agent, 12) else: headers['User-Agent'] = ANDROID if 'Referer' in headers: pass elif referer is None: headers['Referer'] = '%s://%s/' % (urlparse(url).scheme, urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'en-US' if 'Cookie' in headers: pass elif cookie is not None: headers['Cookie'] = cookie if redirect is False: class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, reqst, fp, code, msg, head): infourl = addinfourl(fp, head, reqst.get_full_url()) infourl.status = code infourl.code = code return infourl http_error_300 = http_error_302 http_error_301 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 opener = urllib2.build_opener(NoRedirectHandler()) urllib2.install_opener(opener) try: del headers['Referer'] except Exception: pass req = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(req, timeout=int(timeout)) except HTTPError as response: if response.code == 503: if 'cf-browser-verification' in response.read(5242880): if log_debug: log_debug( 'This request cannot be handled due to human verification gate' ) else: print( 'This request cannot be handled due to human verification gate' ) return elif error is False: return elif error is False: return if output == 'cookie': try: result = '; '.join( ['{0}={1}'.format(i.name, i.value) for i in cookies]) except Exception: pass elif output == 'response': if limit == '0': result = (str(response.code), response.read(224 * 1024)) elif limit is not None: result = (str(response.code), response.read(int(limit) * 1024)) else: result = (str(response.code), response.read(5242880)) elif output == 'chunk': try: content = int(response.headers['Content-Length']) except Exception: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) elif output == 'extended': try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except Exception: pass content = response.headers result = response.read(5242880) if not as_bytes: result = py3_dec(result) return result, headers, content, cookie elif output == 'geturl': result = response.geturl() elif output == 'headers': content = response.headers if close: response.close() return content elif output == 'file_size': try: content = int(response.headers['Content-Length']) except Exception: content = '0' response.close() return content elif output == 'json': content = json.loads(response.read(5242880)) response.close() return content else: if limit == '0': result = response.read(224 * 1024) elif limit is not None: if isinstance(limit, int): result = response.read(limit * 1024) else: result = response.read(int(limit) * 1024) else: result = response.read(5242880) if close is True: response.close() if not as_bytes: result = py3_dec(result) return result except Exception as reason: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) if log_debug: log_debug('Request failed, reason: ' + repr(reason) + ' on url: ' + url) else: print('Request failed, reason: ' + repr(reason) + ' on url: ' + url) return
def get(self, query): query = py3_dec(query) try: query = ' '.join( unquote_plus(re.sub(r'%\w\w', ' ', quote_plus(query))).split()) url = ''.join([ self.base_link, '/search.php?name={0}'.format(quote_plus(query)) ]) result = client.request(url, timeout=control.setting('timeout')) try: result = result.decode('utf-8', errors='replace') except AttributeError: pass items = client.parseDOM(result, 'tr', attrs={'on.+?': '.+?'}) if not items: log_debug('Subtitles.gr did not provide any results') return except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug('Subtitles.gr failed at get function, reason: ' + str(e)) return for item in items: try: if u'flags/el.gif' not in item: continue try: uploader = client.parseDOM(item, 'a', attrs={'class': 'link_from'})[0].strip() uploader = client.replaceHTMLCodes(uploader) except IndexError: uploader = '' try: uploader = uploader.decode('utf-8') except AttributeError: pass if not uploader: uploader = 'other' try: downloads = client.parseDOM( item, 'td', attrs={'class': 'latest_downloads'})[0].strip() except: downloads = '0' downloads = re.sub('[^0-9]', '', downloads) name = client.parseDOM(item, 'a', attrs={'onclick': 'runme.+?'})[0] name = ' '.join(re.sub('<.+?>', '', name).split()) name = client.replaceHTMLCodes(name) label = u'[{0}] {1} [{2} DLs]'.format(uploader, name, downloads) url = client.parseDOM(item, 'a', ret='href', attrs={'onclick': 'runme.+?'})[0] url = url.split('"')[0].split('\'')[0].split(' ')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') rating = self._rating(downloads) self.list.append({ 'name': label, 'url': url, 'source': 'subtitlesgr', 'rating': rating, 'title': name, 'downloads': downloads }) except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug( 'Subtitles.gr failed at self.list formation function, reason: ' + str(e)) return return self.list
def get(self, query): query = py3_dec(query) try: query = ' '.join( unquote_plus(re.sub(r'%\w\w', ' ', quote_plus(query))).split()) query_link = '&'.join([ 'keywords={keywords}', 'movie_type={movie_type}', 'language=!el', 'seasons={seasons}', 'episodes={episodes}', 'year={year}', 'type=', 'undefined=auto', 'undefined=en' ]) match = re.findall( r'(.+?)(?: -)?[ \.](?:\(?(\d{4})\)?|S?(\d{1,2})X?(?: |\.)?E?P?(\d{1,2})(?: \. (.+))?)', query, flags=re.I) if match: query, year, season, episode, episode_title = match[0] url = '?'.join([ self.search_link, query_link.format( keywords=quote_plus(query), movie_type='movie' if year and not (season or episode) else 'tv-series', seasons=season, episodes=episode, year=year) ]) else: url = '?'.join([ self.search_link, query_link.format(keywords=quote_plus(query), movie_type='', seasons='', episodes='', year='') ]) result = client.request(url, headers={ 'Accept': 'text/html', 'Accept-Language': 'en-US,en;q=0.9,el;q=0.8' }, timeout=control.setting('timeout'), verify=False) try: result = result.decode('utf-8', errors='replace') except AttributeError: pass items = client.parseDOM(result, 'tr', attrs={'class': 'subtitle-entry'}) if not items: log_debug('Podnapisi.net did not provide any results') return except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug('Podnapisi.net failed at get function, reason: ' + str(e)) return for item in items: try: if '<span>el</span>' not in item: continue table = client.parseDOM(item, 'td') try: downloads = [i.strip() for i in table] downloads = [i for i in downloads if i.isdigit()][1] except IndexError: downloads = '0' downloads = re.sub('[^0-9]', '', downloads) label = client.parseDOM(item, 'a', attrs={'alt': 'Subtitles\' page'})[0] label = client.replaceHTMLCodes(label) name = u'{0} [{1} DLs]'.format(label, downloads) url = [i for i in table if 'Download subtitles.' in i][0] url = client.parseDOM(url, 'a', ret='href')[0] url = ''.join([self.base_link, url]) rating = [i for i in table if 'progress rating' in i][0] rating = client.parseDOM(rating, 'div', attrs={'class': 'progress rating'}, ret='data-title')[0] rating = int(rating.partition('.')[0]) / 20 self.list.append({ 'name': name, 'url': url, 'source': 'podnapisi', 'rating': rating, 'title': label, 'downloads': downloads }) except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug( 'Podnapisi.net failed at self.list formation function, reason: ' + str(e)) return return self.list
def get(self, query): query = py3_dec(query) try: try: title, season, episode = re.findall( r'(.+?)[ .]s?(\d{1,2})(?: |.)?(?:ep?|x|\.)?(\d{1,2})?', query, flags=re.I)[0] except (IndexError, TypeError): log_debug( "Search query is not a tv show related, xsubs.tv does not offer subs for movies" ) return if season.startswith('0'): season = season[-1] title = re.sub(r'^THE\s+|^A\s+', '', title.strip().upper()) title = cleantitle.get(title) url = ''.join([self.base_link, '/series/all.xml']) srsid = self.cache(url) srsid = [i[0] for i in srsid if title == i[1]][0] url = ''.join( [self.base_link, '/series/{0}/main.xml'.format(srsid)]) result = client.request(url) try: ssnid = client.parseDOM(result, 'series_group', ret='ssnid', attrs={'ssnnum': season})[0] except IndexError: return url = ''.join( [self.base_link, '/series/{0}/{1}.xml'.format(srsid, ssnid)]) result = client.request(url) items = client.parseDOM(result, 'subg') items = [(client.parseDOM(i, 'etitle', ret='number'), i) for i in items] items = [ i[1] for i in items if len(i[0]) > 0 and i[0][0] == episode ][0] items = re.findall('(<sr .+?</sr>)', items) except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug('Xsubs.tv failed at get function, reason: ' + str(e)) return for item in items: try: p = client.parseDOM(item, 'sr', ret='published_on')[0] if p == '': continue name = client.parseDOM(item, 'sr')[0] name = name.rsplit('<hits>', 1)[0] label = re.sub('</.+?><.+?>|<.+?>', ' ', name).strip() label = client.replaceHTMLCodes(label) name = '{0} {1}'.format(client.replaceHTMLCodes(query), label) name = name.encode('utf-8') url = client.parseDOM(item, 'sr', ret='rlsid')[0] url = ''.join( [self.base_link, '/xthru/getsub/{0}'.format(url)]) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') downloads = client.parseDOM(item, 'hits')[0] self.list.append({ 'name': name, 'url': url, 'source': 'xsubstv', 'rating': 5, 'downloads': downloads, 'title': label }) except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug( 'Xsubs.tv failed at self.list formation function, reason: ' + str(e)) return return self.list
def get(self, query): query = py3_dec(query) try: query = ' '.join( unquote_plus(re.sub(r'%\w\w', ' ', quote_plus(query))).split()) match = re.findall( r'(.+?)(?: -)?[ \.](?:\(?(\d{4})\)?|S?(\d{1,2})X?(?: |\.)?E?P?(\d{1,2})(?: \. (.+))?)', query, flags=re.I) if match: query, year, season, episode, episode_title = match[0] search = quote_plus(' '.join( [query, 's', season, 'e', episode, episode_title])) url = self.search_show.format(search) else: url = self.search_movie.format(quote_plus(query)) self.data = [ client.request(url, timeout=control.setting('timeout')) ] try: _next_button = client.parseDOM( self.data[0], 'a', attrs={'class': 'next page-numbers'}, ret='href')[0] except IndexError: _next_button = None while _next_button: self.data.append( client.request(_next_button, timeout=control.setting('timeout'))) try: _next_button = client.parseDOM( self.data[-1], 'a', attrs={'class': 'next page-numbers'}, ret='href')[0] control.sleep(200) except IndexError: _next_button = None break html = '\n'.join(self.data) items = client.parseDOM(html, 'div', attrs={'class': 'article__summary'}) if not items: log_debug('Vipsubs.gr did not provide any results') return except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug('Vipsubs.gr failed at get function, reason: ' + str(e)) return for item in items: try: label = itertags_wrapper(item, 'a', attrs={'rel': "bookmark"})[0].text label = client.replaceHTMLCodes(label) url = itertags_wrapper(item, 'a', ret='href')[-1] if 'vipsubs.gr' in url: continue rating = 10.0 self.list.append({ 'name': label, 'url': url, 'source': 'vipsubs', 'rating': rating, 'title': label, 'downloads': '1000' }) except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug( 'Vipsubs.gr failed at self.list formation function, reason: ' + str(e)) return return self.list
def _video_list(self, cid, url, pagination, limit): try: result = client.request(url) result = json.loads(result) items = result['items'] except Exception: log_debug( 'Youtube: Could not fetch items from the cdn, invalid key or no quota left' ) return for i in list(range(1, limit)): try: if pagination is True: raise Exception if not 'nextPageToken' in result: raise Exception page = url + '&pageToken=' + result['nextPageToken'] result = client.request(page) result = json.loads(result) items += result['items'] except Exception: pass try: if pagination is False: raise Exception next = cid + '&pageToken=' + result['nextPageToken'] except Exception: next = '' for item in items: try: title = item['snippet']['title'] try: title = py2_enc(title) except AttributeError: pass try: url = item['snippet']['resourceId']['videoId'] except (KeyError, ValueError): url = item['id']['videoId'] try: url = py2_enc(url) except AttributeError: pass image = item['snippet']['thumbnails']['high']['url'] if '/default.jpg' in image: raise Exception try: image = py2_enc(image) except AttributeError: pass try: dateadded = item['snippet']['publishedAt'] dateadded = str( iso8601.parse_date(dateadded).strftime( '%Y-%m-%d %H:%M:%S')) except Exception: dateadded = str( datetime.now().strftime('%Y-%m-%d %H:%M:%S')) date = '.'.join(dateadded.split()[0].split('-')[::-1]) data = { 'title': title, 'url': url, 'image': image, 'dateadded': dateadded, 'date': date, 'premiered': dateadded.split()[0], 'aired': dateadded.split()[0], 'year': int(dateadded[:4]) } if next != '': data['next'] = next self.list.append(data) except Exception: pass try: urls = [ list(range(0, len(self.list)))[i:i + 50] for i in list(range(len(list(range(0, len(self.list))))))[::50] ] urls = [','.join([self.list[x]['url'] for x in i]) for i in urls] urls = [ self.content_link.format(''.join([i, self.key_link])) for i in urls ] with concurrent_futures.ThreadPoolExecutor( max_workers=self.max_workers) as executor: threads = [executor.submit(self.thread, u) for u in urls] for future in concurrent_futures.as_completed(threads): item = future.result() if not item: continue self.data.append(item) items = [] for i in self.data: items += json.loads(i)['items'] except Exception: pass for item in list(range(0, len(self.list))): try: vid = self.list[item]['url'] self.list[item]['url'] = self.play_link.format(py3_dec(vid)) d = [(i['id'], i['contentDetails']) for i in items] d = [i for i in d if i[0] == vid] d = d[0][1]['duration'] duration = 0 try: duration += 60 * 60 * int(re.search(r'(\d*)H', d).group(1)) except Exception: pass try: duration += 60 * int(re.search(r'(\d*)M', d).group(1)) except Exception: pass try: duration += int(re.search(r'(\d*)S', d).group(1)) except Exception: pass self.list[item]['duration'] = duration except Exception: pass return self.list
def run(self, query=None): if 'Greek' not in str(self.langs).split(','): control.directory(self.syshandle) control.infoDialog(control.lang(30002)) return dup_removal = False if not query: title = match_title = control.infoLabel( '{0}.Title'.format(infolabel_prefix)) with concurrent_futures.ThreadPoolExecutor(5) as executor: if re.search(r'[^\x00-\x7F]+', title) is not None: title = control.infoLabel( '{0}.OriginalTitle'.format(infolabel_prefix)) title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore') title = py3_dec(title) year = control.infoLabel('{0}.Year'.format(infolabel_prefix)) tvshowtitle = control.infoLabel( '{0}.TVshowtitle'.format(infolabel_prefix)) season = control.infoLabel( '{0}.Season'.format(infolabel_prefix)) if len(season) == 1: season = '0' + season episode = control.infoLabel( '{0}.Episode'.format(infolabel_prefix)) if len(episode) == 1: episode = '0' + episode if 's' in episode.lower(): season, episode = '0', episode[-1:] if tvshowtitle != '': # episode title_query = '{0} {1}'.format(tvshowtitle, title) season_episode_query = '{0} S{1} E{2}'.format( tvshowtitle, season, episode) season_episode_query_nospace = '{0} S{1}E{2}'.format( tvshowtitle, season, episode) threads = [ executor.submit(self.subtitlesgr, season_episode_query_nospace), executor.submit(self.xsubstv, season_episode_query), executor.submit(self.podnapisi, season_episode_query), executor.submit(self.vipsubs, season_episode_query) ] dup_removal = True log_debug('Dual query used for subtitles search: ' + title_query + ' / ' + season_episode_query) if control.setting('queries') == 'true': threads.extend([ executor.submit(self.subtitlesgr, title_query), executor.submit(self.vipsubs, title_query), executor.submit(self.podnapisi, title_query), executor.submit(self.subtitlesgr, season_episode_query) ]) elif year != '': # movie query = '{0} ({1})'.format(title, year) threads = [ executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query), executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query) ] else: # file query, year = control.cleanmovietitle(title) if year != '': query = '{0} ({1})'.format(query, year) threads = [ executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query), executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query) ] for future in concurrent_futures.as_completed(threads): item = future.result() if not item: continue self.list.extend(item) if not dup_removal: log_debug('Query used for subtitles search: ' + query) self.query = query self.query = py3_dec(self.query) else: # Manual query with concurrent_futures.ThreadPoolExecutor(5) as executor: query = match_title = py3_dec(query) threads = [ executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query), executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query) ] for future in concurrent_futures.as_completed(threads): item = future.result() if not item: continue self.list.extend(item) if len(self.list) == 0: control.directory(self.syshandle) return f = [] # noinspection PyUnresolvedReferences f += [i for i in self.list if i['source'] == 'xsubstv'] f += [i for i in self.list if i['source'] == 'subtitlesgr'] f += [i for i in self.list if i['source'] == 'podnapisi'] f += [i for i in self.list if i['source'] == 'vipsubs'] self.list = f if dup_removal: self.list = [ dict(t) for t in {tuple(d.items()) for d in self.list} ] for i in self.list: try: if i['source'] == 'xsubstv': i['name'] = u'[xsubstv] {0}'.format(i['name']) elif i['source'] == 'podnapisi': i['name'] = u'[podnapisi] {0}'.format(i['name']) elif i['source'] == 'vipsubs': i['name'] = u'[vipsubs] {0}'.format(i['name']) except Exception: pass if control.setting('sorting') == '1': key = 'source' elif control.setting('sorting') == '2': key = 'downloads' elif control.setting('sorting') == '3': key = 'rating' else: key = 'title' self.list = sorted(self.list, key=lambda k: k[key].lower(), reverse=control.setting('sorting') in ['1', '2', '3']) for i in self.list: u = {'action': 'download', 'url': i['url'], 'source': i['source']} u = '{0}?{1}'.format(self.sysaddon, urlencode(u)) item = control.item(label='Greek', label2=i['name']) item.setArt({'icon': str(i['rating'])[:1], 'thumb': 'el'}) if ratio( splitext(i['title'].lower())[0], splitext(match_title)[0]) >= int( control.setting('sync_probability')): item.setProperty('sync', 'true') else: item.setProperty('sync', 'false') item.setProperty('hearing_imp', 'false') control.addItem(handle=self.syshandle, url=u, listitem=item, isFolder=False) control.directory(self.syshandle)