def submenu(item): data = support.match(item.url + item.args).data action = 'filter' patronMenu = r'<h5 class="[^"]+">(?P<title>[^<]+)[^>]+>[^>]+>\s*<select id="(?P<parameter>[^"]+)"[^>]+>(?P<data>.*?)</select>' def itemlistHook(itemlist): itemlist.insert( 0, item.clone(title=support.typo('Tutti', 'bold'), url=item.url + item.args, action='peliculas')) return itemlist[:-1] return locals()
def __init__(self, itemlist, item=None): self.item = item self.itemlist = itemlist self.selectspecials = False self.manual = False self.auto = False if self.item: self.renumberdict = load(item) self.auto = config.get_setting('autorenumber', item.channel) self.title = self.item.fulltitle.strip() if match(self.itemlist[0].title, patron=r'[Ss]?(\d+)(?:x|_|\s+)[Ee]?[Pp]?(\d+)').match: item.exit = True return elif self.item.channel in self.item.channel_prefs and RENUMBER in self.item.channel_prefs[ item.channel] and self.title not in self.renumberdict: from core.videolibrarytools import check_renumber_options from specials.videolibrary import update_videolibrary check_renumber_options(self.item) update_videolibrary(self.item) self.series = self.renumberdict.get(self.title, {}) self.id = self.series.get(ID, 0) self.episodes = self.series.get(EPISODES, {}) self.seasonsdict = self.series.get(SEASONSDICT, {}) self.season = self.series.get(SEASON, -1) self.episode = self.series.get(EPISODE, -1) self.manual = self.series.get(MANUALMODE, False) self.specials = self.series.get(SPECIALEPISODES, {}) if self.id and self.episodes and self.season >= 0 and self.episode >= 0: if self.item.renumber: self.config() else: self.renumber() elif self.auto or self.item.renumber: self.episodes = {} self.config() else: self.renumberdict = {} for item in self.itemlist: if item.contentType != 'movie': item.context = [{ "title": typo(config.get_localized_string(70585), 'bold'), "action": "start", "channel": "autorenumber", "from_channel": item.channel, "from_action": item.action }]
def get_video_url(page_url, premium=False, user="", password="", video_password=""): global data logger.debug("URL", page_url) video_urls = [] h = json.loads( support.match(data, patron='stream="([^"]+)"').match.replace( '"', '"').replace('\\', '')) baseurl = h['host'] + h['hash'] matches = support.match(baseurl + '/index.m3u8', patron=r'RESOLUTION=\d+x(\d+)\s*([^\s]+)').matches for quality, url in matches: video_urls.append([ "{} {}p [NinjaStream]".format(url.split('.')[-1], quality), '{}/{}'.format(baseurl, url) ]) return video_urls
def getHeaders(): global headers if not headers: session = requests.Session() response = session.get(host) csrf_token = support.match( response.text, patron='name="csrf-token" content="([^"]+)"').match headers = { 'content-type': 'application/json;charset=UTF-8', 'Referer': host, 'x-csrf-token': csrf_token, 'Cookie': '; '.join([x.name + '=' + x.value for x in response.cookies]) }
def search(item, texto): support.log(item.url, "search", texto) itemlist=[] try: item.url = host + "/api/movies?originalTitle="+texto+"&translations.name=" +texto data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) for movie in json_object['hydra:member']: item.contentType='movie' itemlist.extend(get_itemlist_element(movie,item)) item.url = host + "/api/shows?originalTitle="+texto+"&translations.name=" +texto data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) for tvshow in json_object['hydra:member']: item.contentType='tvshow' itemlist.extend(get_itemlist_element(tvshow,item)) return itemlist # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): support.logger.error("%s" % line) return []
def findvideos(item): log() itemlist = [] matches, data = support.match( item, '<iframe class="metaframe rptss" src="([^"]+)"[^>]+>', headers=headers) for url in matches: html = httptools.downloadpage(url, headers=headers).data data += str( scrapertoolsV2.find_multiple_matches( html, '<meta name="og:url" content="([^"]+)">')) itemlist = support.server(item, data) return itemlist
def findvideos(item): support.log('findvideos ->', item) patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">' patron = r'<a href="([^"]+)">' matches, data = support.match(item, patron, patronBlock, headers) if item.args != 'episodios': item.infoLabels['mediatype'] = 'episode' for scrapedurl in matches: if 'is.gd' in scrapedurl: resp = httptools.downloadpage(scrapedurl, follow_redirects=False) data += resp.headers.get("location", "") + '\n' return support.server(item, data)
def findvideos(item): support.info() itemlist = [] matches = support.match(item, patron=r'filename: "(.*?)"').matches for url in matches: itemlist.append( item.clone(action="play", title=support.config.get_localized_string(30137), server='directo', url=host + url)) return support.server(item, itemlist=itemlist)
def findvideos(item): support.log() html = support.match(item, patron=r'TIPO:\s*</b>\s*([A-Za-z]+)') if html.match == 'TV' and item.contentType != 'episode': item.contentType = 'tvshow' item.data = html.data return episodios(item) else: itemlist = [] if item.contentType != 'episode': item.contentType = 'movie' video = support.match(html.data, patron=r'<source src="([^"]+)"').match itemlist.append( support.Item(channel=item.channel, action="play", title='Diretto', quality='', url=video, server='directo', fulltitle=item.fulltitle, show=item.show, contentType=item.contentType, folder=False)) return support.server(item, itemlist=itemlist)
def lista_serie(item): log() itemlist = [] PERPAGE = 15 p = 1 if '{}' in item.url: item.url, p = item.url.split('{}') p = int(p) if '||' in item.url: series = item.url.split('\n\n') matches = [] for i, serie in enumerate(series): matches.append(serie.split('||')) else: # Extrae las entradas patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>' matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" scrapedthumbnail = "" if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break title = cleantitle(scrapedtitle) itemlist.append( Item(channel=item.channel, extra=item.extra, action="episodios", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title, plot=scrapedplot, contentType='episode', originalUrl=scrapedurl, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione if len(matches) >= p * PERPAGE: support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist
def episodios(item): data = '' url = support.match( item, patronBlock= r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">' )[1] seasons = support.match(item, r'<a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0] for season_url, season in seasons: season_url = support.urlparse.urljoin(url, season_url) episodes = support.match(item, r'<a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = support.urlparse.urljoin(url, episode_url) title = season + "x" + episode.zfill(2) data += title + '|' + episode_url + '\n' support.log('DaTa= ', data) patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n' action = 'findvideos' return locals()
def search_movie_by_genre(item): support.log() itemlist = [] data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) for genre in json_object['hydra:member']: itemlist.append( Item(channel=item.channel, action="peliculas", title=support.typo(genre['name'], 'bold'), contentType='movie', url="%s/api/movies?genres.id=%s" % (host, genre['id']), extra=item.extra)) return support.thumb(itemlist, True)
def findvideos(item): support.info('findvideos ->', item) itemlist = [] patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">' patron = r'<a href="([^"]+)">' html = support.match(item, patron=patron, patronBlock=patronBlock, headers=headers) matches = html.matches data= html.data if item.args != 'episodios': item.infoLabels['mediatype'] = 'episode' for scrapedurl in matches: if 'is.gd' in scrapedurl: resp = httptools.downloadpage(scrapedurl, follow_redirects=False) data += resp.headers.get("location", "") + '\n' itemlist += support.server(item, data) data = support.match(item.url).data patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"' series = scrapertools.find_single_match(data, patron) titles = support.typo(series.upper().replace('-', ' '), 'bold color kod') goseries = support.typo("Vai alla Serie:", ' bold color kod') itemlist.append( item.clone(channel=item.channel, title=goseries + titles, fulltitle=titles, show=series, contentType='tvshow', contentSerieName=series, url=host+"/serietv/"+series, action='episodios', contentTitle=titles, plot = "Vai alla Serie " + titles + " con tutte le puntate", )) return itemlist
def play(item): support.info() data = support.match(item).data match = support.match(data, patron='/content/entry/data/(.*?).mp4').match if match: url = 'https://awsvodpkg.iltrovatore.it/local/hls/,/content/entry/data/' + support.match(item, patron='/content/entry/data/(.*?).mp4').match + '.mp4.urlset/master.m3u8' item = item.clone(title='Direct', url=url, server='directo', action='play') else: preurl = support.match(data, patron=r'preTokenUrl = "(.+?)"').match url = support.match(data, patron=r'''["]?dash["]?\s*:\s*["']([^"']+)["']''').match tokenHeader = { 'host': headers['host_token'], 'user-agent': headers['user-agent'], 'accept': headers['accept'], 'accept-language': headers['accept-language'], 'dnt': headers['dnt'], 'te': headers['te'], 'origin': headers['origin'], 'referer': headers['referer'], } preAuthToken = requests.get(preurl, headers=tokenHeader,verify=False).json()['preAuthToken'] licenseHeader = { 'host': headers['host_license'], 'user-agent': headers['user-agent'], 'accept': headers['accept'], 'accept-language': headers['accept-language'], 'preAuthorization': preAuthToken, 'origin': headers['origin'], 'referer': headers['referer'], } preLic= '&'.join(['%s=%s' % (name, value) for (name, value) in licenseHeader.items()]) tsatmp=str(int(support.time())) license_url= key_widevine + '?d=%s'%tsatmp lic_url='%s|%s|R{SSM}|'%(license_url, preLic) item.drm = DRM item.license = lic_url return support.servertools.find_video_items(item, data=url)
def episodios(item): itemlist = [] json_file = current_session.get(item.url, headers=headers, params=payload).json() show_id = str(json_file['data'][0]['show_id']) season_id = str(json_file['data'][0]['season_id']) episodes = [] support.info('SEASON ID= ',season_id) for episode in json_file['data']: episodes.append(episode['episodes']) for episode in episodes: for key in episode: if 'stagione' in encode(key['title']).lower(): season = support.match(encode(key['title']), patron=r'[Ss]tagione\s*(\d+)').match episode = support.match(encode(key['title']), patron=r'[Ee]pisodio\s*(\d+)').match if season and episode: title = season + 'x' + episode + ' - ' + item.fulltitle make_item = True elif int(key['season_id']) == int(season_id): try: title = 'Episodio ' + key['number'] + ' - ' + key['title'].encode('utf8') except: title = 'Episodio ' + key['number'] + ' - ' + key['title'] make_item = True else: make_item = False if make_item == True: if type(title) == tuple: title = title[0] itemlist.append( item.clone(title = title, url= host + show_id + '/season/' + str(key['season_id']) + '/', action= 'findvideos', video_id= key['video_id'])) autorenumber.renumber(itemlist, item, 'bold') if autorenumber.check(item) == True \ or support.match(itemlist[0].title, patron=r"(\d+x\d+)").match: support.videolibrary(itemlist,item) return itemlist
def peliculas(item): getHeaders() logger.debug() itemlist = [] videoType = 'movie' if item.contentType == 'movie' else 'tv' page = item.page if item.page else 0 offset = page * 60 if type(item.args) == int: data = support.scrapertools.decodeHtmlentities(support.match(item).data) records = json.loads(support.match(data, patron=r'slider-title titles-json="(.*?)" slider-name="').matches[item.args]) elif not item.search: payload = json.dumps({'type': videoType, 'offset':offset, 'genre':item.args}) records = session.post(host + '/api/browse', headers=headers, data=payload).json()['records'] else: payload = json.dumps({'q': item.search}) records = session.post(host + '/api/search', headers=headers, data=payload).json()['records'] if records and type(records[0]) == list: js = [] for record in records: js += record else: js = records with futures.ThreadPoolExecutor() as executor: itlist = [executor.submit(makeItem, i, it, item) for i, it in enumerate(js)] for res in futures.as_completed(itlist): itemlist.append(res.result()) itemlist.sort(key=lambda item: item.n) if len(itemlist) >= 60: itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), thumbnail=support.thumb(), page=page + 1)) support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist
def findvideos(item): support.log(item) itemlist = [] if item.number: item.url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.number)[0][0] if 'http' not in item.url: if '//' in item.url[:2]: item.url = 'http:' + item.url elif host not in item.url: item.url = host + item.url if 'adf.ly' in item.url: item.url = adfly.get_long_url(item.url) elif 'bit.ly' in item.url: item.url = support.httptools.downloadpage( item.url, only_headers=True, follow_redirects=False).headers.get("location") matches = support.match(item, r'button"><a href="([^"]+)"')[0] for video in matches: itemlist.append( support.Item(channel=item.channel, action="play", title='diretto', url=video, server='directo')) support.server(item, itemlist=itemlist) return itemlist
def findvideos(item): from lib import vvvvid_decoder itemlist = [] if item.contentType == 'movie': json_file = current_session.get(item.url, headers=headers, params=payload).json() item.url = host + str( json_file['data'][0]['show_id']) + '/season/' + str( json_file['data'][0]['episodes'][0]['season_id']) + '/' item.video_id = json_file['data'][0]['episodes'][0]['video_id'] logger.info('url=', item.url) json_file = current_session.get(item.url, headers=headers, params=payload).json() for episode in json_file['data']: logger.info(episode) if episode['video_id'] == item.video_id: url = vvvvid_decoder.dec_ei(episode['embed_info'] or episode['embed_info_sd']) if 'youtube' in url: item.url = url item.url = url.replace('manifest.f4m', 'master.m3u8').replace( 'http://', 'https://').replace('/z/', '/i/') if 'https' not in item.url: url = support.match( 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/playlist.m3u').data url = url.split()[-1] itemlist.append( item.clone( action='play', title='direct', url='https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url, server='directo')) else: key_url = 'https://www.vvvvid.it/kenc?action=kt&conn_id=' + conn_id + '&url=' + item.url.replace( ':', '%3A').replace('/', '%2F') key = vvvvid_decoder.dec_ei( current_session.get(key_url, headers=headers, params=payload).json()['message']) itemlist.append( item.clone(action='play', title='direct', url=item.url + '?' + key, server='directo')) return support.server(item, itemlist=itemlist, Download=False)
def peliculas_tv(item): log() itemlist = [] patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>' matches, data = support.match(item, patron, headers=headers) for scrapedurl, scrapedtitle in matches: if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]: continue scrapedthumbnail = "" scrapedplot = "" scrapedtitle = cleantitle(scrapedtitle) infoLabels = {} episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))') if episode: # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO episode = episode[0] title = scrapedtitle.split(" S0")[0].strip() title = title.split(" S1")[0].strip() title = title.split(" S2")[0].strip() infoLabels['season'] = episode[1] infoLabels['episode'] = episode[2].zfill(2) itemlist.append( Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'), url=scrapedurl, thumbnail=scrapedthumbnail, contentSerieName=title, contentLanguage='Sub-ITA', plot=scrapedplot, infoLabels=infoLabels, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione patron = r'<strong class="on">\d+</strong>\s<a href="([^<]+)">\d+</a>' support.nextPage(itemlist, item, data, patron) return itemlist
def Trailer(info): global info_list, trailers trailers = [] trailers_list = [] Type = info.getProperty('mediatype') if Type != "movie": Type = "tv" trailers_list = tmdb.Tmdb(id_Tmdb=info.getProperty('tmdb_id'), tipo=Type).get_videos() if trailers_list: for i, trailer in enumerate(trailers_list): item = xbmcgui.ListItem(trailer['name']) item.setProperties({ 'tile': trailer['name'], 'url': trailer['url'], 'thumbnail': 'http://img.youtube.com/vi/' + trailer['url'].split('=')[-1] + '/0.jpg', 'fanart': info.getProperty('fanart'), 'position': '%s/%s' % (i + 1, len(trailers_list)) }) trailers.append(item) else: # TRY youtube search patron = r'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?' patron += r'text":"([^"]+).*?' patron += r'simpleText":"[^"]+.*?simpleText":"([^"]+).*?' patron += r'url":"([^"]+)' matches = support.match( 'https://www.youtube.com/results?search_query=' + info.getProperty('title').replace(' ', '+') + '+trailer+ita', patron=patron).matches i = 0 for thumb, title, text, url in matches: i += 1 item = xbmcgui.ListItem(title + ' - ' + text) item.setProperties({ 'tile': title + ' - ' + text, 'url': url, 'thumbnail': thumb, 'fanart': info.getProperty('fanart'), 'position': '%s/%s' % (i, len(matches)) }) trailers.append(item) main = TrailerWindow('TrailerWindow.xml', config.get_runtime_path()) add({'class': main, 'info': trailers, 'id': RECOMANDED, TRAILERS: 0}) modal()
def calculateToken(): from time import time from base64 import b64encode as b64 import hashlib o = 48 n = support.match(host + '/client-address').data i = 'Yc8U6r8KjAKAepEA' t = int(time() + (3600 * o)) l = '{}{} {}'.format(t, n, i) md5 = hashlib.md5(l.encode()) s = '?token={}&expires={}'.format( b64(md5.digest()).decode().replace('=', '').replace('+', "-").replace( '\\', "_"), t) return s
def filter(item): itemlist = [] matches = support.match( item.data if item.data else item.url, patron=r'<option value="(?P<value>[^"]+)"[^>]*>(?P<title>[^<]+)' ).matches for value, title in matches: itemlist.append( item.clone(title=support.typo(title, 'bold'), url='{}{}&{}%5B0%5D={}'.format(host, item.args, item.parameter, value), action='peliculas', args='filter')) support.thumb(itemlist, genre=True) return itemlist
def years(item): support.info() itemlist = [] from datetime import datetime current_year = datetime.today().year oldest_year = int( support.match(response.text, patron='anime_oldest_date="([^"]+)').match) for year in list(reversed(range(oldest_year, current_year + 1))): item.args['year'] = year itemlist.append( item.clone(title=support.typo(year, 'bold'), action='peliculas')) return itemlist
def genres(item): support.info() # support.dbg() itemlist = [] genres = json.loads( support.match(response.text, patron='genres="([^"]+)').match.replace('"', '"')) for genre in genres: item.args['genres'] = [genre] itemlist.append( item.clone(title=support.typo(genre['name'], 'bold'), action='peliculas')) return support.thumb(itemlist)
def renumber(self): if not self.item.renumber and self.itemlist: if '|' in self.Season: season = int(self.Season.split('|')[0]) addNumber = int(self.Season.split('|')[-1]) - 1 else: season = int(self.Season) addNumber = 0 for item in self.itemlist: if not match( item.title, patron=r'[Ss]?(\d+)(?:x|_|\s+)[Ee]?[Pp]?(\d+)').match: number = match(item.title, patron=r'(\d+)').match.lstrip('0') if number: if number in self.Episodes: if season > 0: item.title = typo( self.Episodes[number] + ' - ', 'bold') + item.title else: item.title = typo( '0x%s - ' % str(int(number) + addNumber), 'bold') + item.title else: self.makelist() if season > 0: item.title = typo( self.Episodes[number] + ' - ', 'bold') + item.title else: item.title = typo( '0x%s - ' % str(int(number) + addNumber), 'bold') + item.title else: self.makelist()
def episodios(item): support.info() itemlist = [] data = support.match(item.url, headers=headers).data json_object = jsontools.load(data) for season in json_object['seasons']: seas_url=host+season['@id']+'/releases' itemlist_season=get_season(item, seas_url, season['seasonNumber']) if(len(itemlist_season)>0): itemlist.extend(itemlist_season) support.videolibrary(itemlist, item, 'color kod bold') support.download(itemlist, item) return itemlist
def load_links(itemlist, re_txt, desc_txt, quality=""): streaming = scrapertools.find_single_match(data, re_txt).replace('"', '') logger.debug('STREAMING=', streaming) matches = support.match( streaming, patron=r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches for scrapedurl, scrapedtitle in matches: logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle)) itemlist.append( item.clone(action="play", title=scrapedtitle, url=scrapedurl, server=scrapedtitle, quality=quality))
def findvideos(item): support.log(item) itemlist = [] if item.args == 'updated': ep = support.match(item.fulltitle, patron=r'(\d+)').match item.url = support.re.sub(r'episodio-\d+-|oav-\d+-' + ep, '', item.url) if 'streaming' not in item.url: item.url = item.url.replace('sub-ita', 'sub-ita-streaming') item.url = support.match(item, patron=ep + r'[^>]+>[^>]+>[^>]+><a href="([^"]+)"').match # post url = host + '/' + support.match( item.url, patron=r'(episodio\d*.php.*?)"').match.replace( '%3F', '?').replace('%3D', '=') headers['Referer'] = url cookies = "" matches = support.re.compile( '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), support.re.DOTALL).findall(support.config.get_cookie_data()) for cookie in matches: cookies += cookie.split('\t')[5] + "=" + cookie.split('\t')[6] + ";" headers['Cookie'] = cookies[:-1] url = support.match(url, patron=r'<source src="([^"]+)"[^>]+>').match itemlist.append( support.Item(channel=item.channel, action="play", title='Diretto', url=url + '|' + support.urllib.urlencode(headers), server='directo')) return support.server(item, itemlist=itemlist)
def episodios(item): seasons = support.match( item, patron=r'<option value="(\d+)"[^>]*>\D+(\d+)').matches patronBlock = r'</select><div style="clear:both"></div></h2>(?P<block>.*?)<div id="trailer" class="tab">' patron = r'(?:<div class="list (?:active)?")?\s*<a data-id="\d+(?:[ ](?P<lang>[SuUbBiItTaA\-]+))?"(?P<other>[^>]+)>.*?Episodio [0-9]+\s?(?:<br>(?P<title>[^<]+))?.*?Stagione (?P<season>[0-9]+) , Episodio - (?P<episode>[0-9]+).*?<(?P<url>.*?<iframe)' def itemHook(item): for value, season in seasons: info(value) info(season) item.title = item.title.replace(value + 'x', season + 'x') item.url += '\n' + item.other return item return locals()
def load_vid_series(html, item, itemlist, blktxt): support.info('HTML', html) # Estrae i contenuti matches = support.match( html, patron=r'<a href=(?:")?([^ "]+)[^>]+>(?!<!--)(.*?)(?:</a>|<img)' ).matches for url, server in matches: item = item.clone(action="play", title=server, url=url, server=server, quality=blktxt) if 'swzz' in item.url: item.url = support.swzz_get_url(item) itemlist.append(item)