def resolve(url): url = RESOLVE_LINK.format(url) result = client.request(url, mobile=True) result = json.loads(result.replace(' ', '')) item = result['media'][0] url = item['mediaUrl'][0]['liveURL'] if not url.startswith('http://'): url = '{0}{1}'.format('http://', url) url = client.replaceHTMLCodes(url) # url = client.request(url, output='geturl') title = item['name'].strip() title = client.replaceHTMLCodes(title) image = item['logo'] image = IMAGE_LINK.format(image) image = image.replace('/promo/', '/500/') if image.endswith('/nologo.png'): image = '0' image = client.replaceHTMLCodes(image) return title, url, image
def directory_list(url): self_list = [] result = client.request(url, mobile=True, output='json') if 'categories' in result: items = result['categories'] else: items = result['countries'] for item in items: if 'categoryName' in item: title = item['categoryName'] else: title = item['regionName'] title = client.replaceHTMLCodes(title) if 'categoryID' in item: url = CATEGORY_LINK.format(str(item['categoryID'])) elif 'regionID' in item: url = REGION_LINK.format(str(item['regionID'])) url = client.replaceHTMLCodes(url) self_list.append({'title': title, 'url': url}) return self_list
def radios_list(url): result = client.request(url, mobile=True) result = json.loads(result) items = result['media'] self_list = [] for item in items: title = item['name'].strip() title = client.replaceHTMLCodes(title) url = str(item['stationID']) url = client.replaceHTMLCodes(url) image = item['logo'] image = IMAGE_LINK.format(image) image = image.replace('/promo/', '/500/') if image.endswith('/nologo.png'): image = '0' image = client.replaceHTMLCodes(image) self_list.append({'title': title, 'url': url, 'image': image}) return self_list
def index_cy(self, url): html = client.request(url) items = [i for i in client.parseDOM(html, 'div', attrs={'class': 'box'}) if urlparse(url).path in i] try: next_link = client.parseDOM(html, 'a', attrs={'class': 'pager__link pager__link--next'}, ret='href')[0] next_link = urljoin(url.partition('?')[0], next_link) except Exception: next_link = None for item in items: try: title_field = client.parseDOM(item, 'div', {'class': 'box__overlay-title'})[0] except IndexError: continue title = client.replaceHTMLCodes(client.parseDOM(title_field, 'a')[0]).replace(u'ᵒ', u' μοίρες').strip() subtitle = client.replaceHTMLCodes(client.parseDOM(item, 'div', {'class': 'box__overlay-subtitle'})[0]) label = ' | '.join([title, subtitle]) url = client.parseDOM(title_field, 'a', ret='href')[0] url = urljoin(self.basecy_link, url + '/webtv') image = client.parseDOM(item, 'img', ret='src')[0] data = {'title': label, 'image': image, 'url': url, 'name': title} if next_link: data.update({'next': next_link}) self.list.append(data) return self.list
def items_list(self, url): page = url result = client.request(page) try: if "contentContainer_totalpages" in result: totalPages = int( re.search(r'contentContainer_totalpages = (\d+);', result).group(1)) seriesId = re.search(r'/templates/data/morevideos\?aid=(\d+)', result).group(1) items = [] threads = [] for i in list(range(1, totalPages + 1)): threads.append( workers.Thread( self.thread, self.more_videos + seriesId + "&p=" + str(i), i - 1)) self.data.append('') [i.start() for i in threads] [i.join() for i in threads] for i in self.data: items.extend(client.parseDOM(i, "article")) else: items = client.parseDOM(result, "article") except: pass for item in items: try: title = client.parseDOM(item, "h2")[0] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') link = client.parseDOM(item, "a", ret="href")[0] if re.match(r'/.+/(\d+)/.+', link) is not None: episodeId = re.search(r'/.+/(\d+)/.+', link).group(1) episodeJSON = client.request(self.episodes_link + episodeId) episodeJSON = json.loads(episodeJSON) url = episodeJSON['url'] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') else: url = self.base_link + link + '/videos' image = client.parseDOM(item, "img", ret="src")[0] image = client.replaceHTMLCodes(image) image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def item_list_1(self, url, lang): try: request = urllib.urlencode({'request': self.post_link % (url, lang)}) result = client.request(self.api_link, post=request) result = json.loads(result) items = result['programs'] except: return for item in items: try: title = item['title'] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = str(item['pId']) url = self.program_link % url url = url.encode('utf-8') image = item['img'] image = self.img_link % image image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass return self.list
def news_episodes_listing(self, query): threads = [] for i in list(range(1, 101)): threads.append(workers.Thread(self.thread, i, self.newsgr_link_ajax.format(page=str(i), category=query))) self.data.append('') [i.start() for i in threads] [i.join() for i in threads] html = '\n'.join(self.data) items = client.parseDOM(html, 'div', attrs={'class': 'newsItem'}) for item in items: label = client.replaceHTMLCodes(client.parseDOM(item, 'a')[1]) title = u'[CR]'.join([label, client.parseDOM(item, 'time')[0]]) image = client.parseDOM(item, 'img', ret='src')[0] url = client.parseDOM(item, 'a', ret='href')[1] self.list.append({'title': title, 'image': image, 'url': url}) return self.list
def list_live_items(): html = client.request(main_link) items = parseDOM2(html, 'article', attrs={'id': re.compile(r'stream-.+')}) self_list = [] for item in items: url = parseDOM2(item.content, 'a', attrs={'class': 'live-stream-button.+' })[0].attrs.get('data-source') title = parseDOM2(item.content, 'div', attrs={ 'class': 'a2a_kit a2a_kit_size_24 addtoany_list' })[0].attrs.get('data-a2a-title') title = client.replaceHTMLCodes(title) image = parseDOM2(item.content, 'img', attrs={'class': 'horizontal-thumbnail' })[0].attrs.get('data-src') data = {'title': title, 'image': image, 'url': url} self_list.append(data) return self_list
def gm_sports(self): if CACHE_DEBUG: html = root(SPORTS) else: html = cache.get(root, 48, SPORTS) options = re.compile('(<option value.+?</option>)', re.U).findall(html) icons = [ 'https://www.shareicon.net/data/256x256/2015/11/08/157712_sport_512x512.png', 'https://www.shareicon.net/data/256x256/2015/12/07/196797_ball_256x256.png' ] items = zip(options, icons) for item, image in items: title = client.parseDOM(item, 'option')[0] url = client.parseDOM(item, 'option', ret='value')[0] url = client.replaceHTMLCodes(url) index = urljoin(GM_BASE, url) data = { 'title': title, 'action': 'listing', 'url': index, 'image': image } self.list.append(data) directory.add(self.list)
def archive(self): self.list = cache.get( youtube.youtube(key=self.yt_key).playlists, 12, self.yt_channel) if self.list is None: return for i in self.list: i['title'] = client.replaceHTMLCodes(i['title']) i.update({'action': 'episodes'}) bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next') bookmark['bookmark'] = i['url'] i.update({ 'cm': [{ 'title': 32501, 'query': { 'action': 'addBookmark', 'url': json.dumps(bookmark) } }] }) control.sortmethods('title') directory.add(self.list, content='videos')
def recent(self): if control.setting('region') == 'CY': url = self.yt_id_cy else: url = self.yt_id_gr self.list = self.yt_videos(url) if self.list is None: return for i in self.list: i.update({'title': client.replaceHTMLCodes(py2_decode(i['title'])), 'action': 'play', 'isFolder': 'False'}) if len(self.list) > int(control.setting('pagination_integer')) and control.setting('paginate') == 'true': try: pages = utils.list_divider(self.list, int(control.setting('pagination_integer'))) self.list = pages[int(control.setting('page'))] reset = False except Exception: pages = utils.list_divider(self.list, int(control.setting('pagination_integer'))) self.list = pages[0] reset = True self.list.insert(0, self.page_menu(len(pages), reset=reset)) directory.add(self.list)
def _cartoon_various(self, url): if url is None: url = '{0}/genre/gamato/'.format(GK_BASE) html = client.request(url) next_link = client.parseDOM(html, 'a', attrs={'class': 'arrow_pag'}, ret='href')[-1] html = client.parseDOM(html, 'div', attrs={'class': 'items'})[0] items = client.parseDOM(html, 'article', attrs={'id': r'post-\d+'}) for item in items: h3 = client.parseDOM(item, 'h3')[0] title = client.parseDOM(h3, 'a')[0] title = client.replaceHTMLCodes(title) url = client.parseDOM(h3, 'a', ret='href')[0] meta = client.parseDOM(item, 'div', attrs={'class': 'metadata'})[0] try: span = client.parseDOM(meta, 'span') etos = [s for s in span if len(s) == 4][0] plot = client.parseDOM(item, 'div', attrs={'class': 'texto'})[0] duration = [s for s in span if s.endswith('min')][0] duration = int(re.search(r'(\d+)', duration).group(1)) * 60 except IndexError: plot = u'Μεταγλωτισμένο' etos = '2022' duration = 3600 year = ''.join(['(', etos, ')']) label = ' '.join([title, year]) image = client.parseDOM(item, 'img', ret='data-lazy-src')[0] i = { 'title': label, 'url': url, 'image': image, 'nextlabel': 30334, 'next': next_link, 'plot': plot, 'duration': duration, 'year': int(etos), 'nexticon': iconname('next') } self.list.append(i) return self.list
def _plot(url): load = client.request(url.partition('?')[0], post=url.partition('?')[2], timeout=20) description = parseDOM(load, 'div', {'class': 'video-description'})[-1] paragraphs = [client.stripTags(p) for p in parseDOM(description, 'p')] plot = client.replaceHTMLCodes('[CR]'.join(paragraphs)) return plot
def videos_list(self, url, lang): try: request = urlencode({'request': self.post_link % (url, lang)}) result = client.request(self.api_link, post=request) result = json.loads(result) items = [] if 'themedetailslist' in result: items = result['themedetailslist'] elif 'programDetailsList' in result: items = result['programDetailsList'] elif 'homelist' in result: items = result['homelist'] except: return for item in items: try: title = item['title'] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') url = str(item['id']) url = url.encode('utf-8') image = self.img2_link % (url, url) image = image.encode('utf-8') self.list.append({'title': title, 'url': url, 'image': image}) except: pass threads = [] for i in list(range(0, len(self.list))): threads.append(workers.Thread(self.list_worker, i, lang)) [i.start() for i in threads] [i.join() for i in threads] self.list = [ i for i in self.list if 'check' in i and not (i['check'] == '' or i['check'] is None) ] return self.list
def loop(self, item, header, count, next_url=None): data_id = item.attributes['data-id'] img = item.attributes['style'] image = re.search(r'url\((.+)\)', img).group(1) url = [i for i in itertags_wrapper(item.text, 'a', ret='href') if 'https' in i][0] meta_url = '?'.join([self.ajax_url, self.load_search.format(data_id=data_id)]) if 'inside-page-thumb-titles' in item.text and control.setting('metadata') == 'false': fanart = None plot = None title = parseDOM(item.text, 'div', attrs={'class': 'inside-page-thumb-titles'})[0] title = client.replaceHTMLCodes(parseDOM(title, 'a')[0]) else: load = client.request(self.ajax_url, post=self.load_search.format(data_id=data_id), timeout=20) title = parseDOM(load, 'p', {'class': 'video-title'})[0].strip() title = client.replaceHTMLCodes(title) description = parseDOM(load, 'div', {'class': 'video-description'})[-1] paragraphs = [client.stripTags(p) for p in parseDOM(description, 'p')] plot = client.replaceHTMLCodes('[CR]'.join(paragraphs)) f = parseDOM(load, 'div', attrs={'class': 'cover'}, ret='style')[0] fanart = re.search(r'url\((.+)\)', f).group(1) data = {'title': title, 'image': image, 'url': url, 'code': count, 'meta_url': meta_url} if next_url: data.update({'next': next_url}) if header in [ u'ΞΕΝΕΣ ΣΕΙΡΕΣ ΠΛΗΡΕΙΣ', u'ΨΥΧΑΓΩΓΙΑ', u'ΣΥΝΕΝΤΕΥΞΕΙΣ', u'ΕΛΛΗΝΙΚΑ ΝΤΟΚΙΜΑΝΤΕΡ', u'ΞΕΝΑ ΝΤΟΚΙΜΑΝΤΕΡ', u'ΠΑΙΔΙΚΑ', u'Η ΕΡΤ ΘΥΜΑΤΑΙ', u'ΑΘΛΗΤΙΚΑ', u'ΞΕΝΕΣ ΣΕΙΡΕΣ CATCH-UP', u'WEB ΣΕΙΡΕΣ', u'ΝΕΕΣ ΕΛΛΗΝΙΚΕΣ ΣΕΙΡΕΣ' ] and not 'archeio' in url and header is not None: data.update({'playable': 'false'}) if fanart: data.update({'fanart': fanart}) if plot: data.update({'plot': plot}) self.list.append(data)
def account_info(): url = reddit_url('/api/v1/me') json_obj = client.request(url, headers=request_headers()) name = json.loads(json_obj)['name'] icon_img = json.loads(json_obj)['icon_img'] return {'name': name, 'icon_img': client.replaceHTMLCodes(icon_img)}
def t5_kind(children_data, next_url): display_name = client.replaceHTMLCodes( children_data['display_name']) title = client.replaceHTMLCodes(children_data['title']) public_description = legacy_replace( children_data['public_description']) description = legacy_replace(children_data['description']) plot = json.dumps({ 'title': title, 'public_description': public_description, 'description': description }) subscribers = str(children_data['subscribers']) url = base_link() + children_data['url'] name = children_data['name'] image, fanart = image_generator(children_data) pairs = { 'title': title + ' | ' + subscribers + self.formatting + '[I]' + display_name + '[/I]', 'url': url, 'image': image, 'next': next_url, 'fanart': fanart, 'display_name': display_name, 'name': name, 'kind': 't5', 'plot': plot } return pairs
def remote(url): if ('pastebin' in url or 'hastebin' in url or 'osmc.tv' in url) and not 'raw' in url: address = re.sub(r'(^.+?\.(?:com|tv)/)(\w+)', r'\1raw/\2', url) elif 'debian' in url and not 'plain' in url: address = re.sub(r'(^.+?\.net/)(\w+)', r'\1plain/\2', url) else: address = url if 'ubuntu' in url and not 'plain' in url: html = client.request(address) text = client.parseDOM(html, 'pre')[1] text = client.replaceHTMLCodes(text) else: text = client.request(address) if not text: return text = text.strip('\r\n') if len(text.splitlines()) in (3, 4): keys = text.splitlines() elif text.startswith('<?xml'): keys = [ client.parseDOM(text, 'id')[0], client.parseDOM(text, 'api_key')[0], client.parseDOM(text, 'secret')[0] ] elif address.endswith('.json') or 'installed' in text: payload = json.loads(text) if 'installed' in payload: payload = payload['installed'] if 'api_key' not in payload: control.okDialog(heading='Youtube Setup', line1=control.lang(30023)) api_key = control.inputDialog() if not api_key: return else: api_key = payload['api_key'] keys = [payload['client_id'], api_key, payload['client_secret']] else: keys = None else: keys = None return keys
def video_listing(self, url): html = client.request(url) try: nexturl = ''.join([ self.old_base, '/videos', client.parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0].replace('&', '&') ]) except IndexError: nexturl = None video_list = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-url') thumbnails = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-poster') titles = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-name') dates = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-date') listing = list(zip(titles, dates, thumbnails, video_list)) for title, date, image, video in listing: title = client.replaceHTMLCodes(title) label = ''.join([title, ' ', '(', date, ')']) self.list.append({ 'title': label, 'image': image, 'url': video, 'next': nexturl, 'nextlabel': 32500, 'nextaction': 'videos' }) return self.list
def recent_list(self): result = client.request(self.recent_link) items = parseDOM(result, 'item') for item in items: title = client.replaceHTMLCodes(parseDOM(item, 'title')[0]) url = parseDOM(item, 'link')[0] image = parseDOM(item, 'img', ret='src')[0] self.list.append({'title': title, 'url': url, 'image': image}) return self.list
def resolve(self, url): referer = url if '.m3u8' in url or '.mp4' in url or url.startswith('plugin'): return url html = client.request(url) if url == self.live_link_gr: url = client.parseDOM(html, 'div', attrs={'class': 'livePlayer'}, ret='data-liveurl')[0] elif url == self.live_link_cy: url = re.search(r'hls: [\'"](.+?)[\'"]', html).group(1) elif 'cloudskep' in html: url = client.parseDOM(html, 'a', {'class': 'player-play-inline hidden'}, ret='href')[0] signature = client.parseDOM(html, 'footer', {'class': 'footer'}, ret='player-signature') if signature: url = '?wmsAuthSign='.join([url, signature[0]]) else: if 'data-plugin-player' not in html: qs = parse_qs(urlparse(url).query) video_id = qs['vid'][0] year = qs['year'][0] show_id = qs['showId'][0] html = client.request(self.player_query.format(video_id=video_id, show_id=show_id, year=year)) try: object_ = client.parseDOM(html, 'div', attrs={'id': 'Video-1'}, ret='data-plugin-player')[0] except Exception: object_ = client.parseDOM(html, 'div', attrs={'id': 'currentvideourl'}, ret='data-plugin-player')[0] url = json.loads(client.replaceHTMLCodes(object_))['Url'] if len(url) == 11: return self.yt_session(url) return url + user_agents.spoofer(referer=True, ref_str=referer)
def event(self, url): html = client.request(url) event_id = client.parseDOM(html, 'div', attrs={'id': 'event_id'})[0] teama_id = client.parseDOM(html, 'div', attrs={'id': 'teama_id'})[0] teamb_id = client.parseDOM(html, 'div', attrs={'id': 'teamb_id'})[0] items = client.request(self.event_link.format(event=event_id, team_a=teama_id, team_b=teamb_id), output='json') videos = [i for i in items if ('Has_Videos' in i and i['Has_Videos']) or ('MediaType' in i and i['MediaType'] == 'video')] for video in videos: title = client.replaceHTMLCodes(video['Title']) try: image = video['ImageLowQuality'] if image: image = ''.join([self.base_link, image]) else: image = control.icon() fanart = video['Image'] if fanart: fanart = ''.join([self.base_link, fanart]) else: fanart = None except KeyError: image = video['Image'] if image: image = ''.join([self.base_link, image]) else: image = control.icon() fanart = None url = ''.join([self.base_link, video['Link']]) data = {'title': title, 'image': image, 'url': url, 'action': 'play', 'isFolder': 'False'} if fanart: data.update({'fanart': fanart}) self.list.append(data) directory.add(self.list)
def front_pages(self): html = client.request(self.fp_link) try: groups = client.parseDOM(html.decode('utf-8'), 'div', attrs={'class': 'tabbertab.+?'}) except (UnicodeEncodeError, UnicodeDecodeError, AttributeError): groups = client.parseDOM(html, 'div', attrs={'class': 'tabbertab.+?'}) for group, papers in list(enumerate(groups)): items = client.parseDOM(papers, 'div', attrs={'class': 'thumber'}) for i in items: title = client.parseDOM(i, 'img', attrs={'style': 'padding:5px.+?'}, ret='alt')[0] title = client.replaceHTMLCodes(title) image = client.parseDOM(i, 'img', attrs={'style': 'padding:5px.+?'}, ret='src')[0] image = ''.join([self.fp_link, image]) link = image.replace('300.jpg', 'I.jpg') data = { 'title': title, 'image': image, 'url': link, 'group': group } self.list.append(data) return self.list
def index_listing(): html = client.request(INDEX_LINK) li = parseDOM(html, 'li') li.extend(parseDOM(html, 'li', attrs={'class': 'hideli'})) items = [i for i in li if 'title' in i] self_list = [] for item in items: title = client.replaceHTMLCodes(parseDOM(item, 'a')[0]) url = parseDOM(item, 'a', ret='href')[0] self_list.append({'title': title, 'url': url}) self_list.sort(key=lambda k: k['title'].lower()) return self_list
def index_listing(self): html = client.request(self.index_link) div = parseDOM(html, 'div', attrs={'class': 'wpb_wrapper'})[0] li = parseDOM(div, 'li') li.extend(parseDOM(div, 'li', attrs={'class': 'hideli'})) items = [i for i in li if 'category' in i and 'title' in i] for item in items: title = client.replaceHTMLCodes(parseDOM(item, 'a')[0]) url = parseDOM(item, 'a', ret='href')[0] self.list.append({'title': title, 'url': url}) self.list = sorted(self.list, key=lambda k: k['title'].lower()) return self.list
def index_gr(self, url): html = client.request(url) items = [i for i in re.findall(r'(<a.+?/a>)', html, re.S) if 'BLOCK_SERIES' not in i] for item in items: url = client.parseDOM(item, 'a', ret='href')[0] title = client.replaceHTMLCodes(client.parseDOM(item, 'h3')[0]).replace(u'ᵒ', u' μοίρες') image = client.parseDOM(item, 'div', attrs={'class': 'tvShowImg'}, ret='style')[0] image = re.search(r'\([\'"](.+?)[\'"]\)', image).group(1) data = {'title': title, 'image': image, 'url': url} if 'alpha-news' in url: data.update({'action': 'news'}) self.list.append(data) return self.list
def videos(): self_list = yt_videos() if self_list is None: return for v in self_list: try: title = v['title'].decode('utf-8') except AttributeError: title = v['title'] v.update({'action': 'play', 'isFolder': 'False', 'title': client.replaceHTMLCodes(title)}) for item in self_list: bookmark = dict((k, v) for k, v in iteritems(item) if not k == 'next') bookmark['bookmark'] = item['url'] bm_cm = {'title': 30006, 'query': {'action': 'addBookmark', 'url': json.dumps(bookmark)}} refresh = {'title': 30008, 'query': {'action': 'refresh'}} cache_clear = {'title': 30005, 'query': {'action': 'cache_clear'}} item.update({'cm': [refresh, cache_clear, bm_cm]}) directory.add(self_list)
def recursive_list_items(url): page = 1 if url.startswith('https'): if BASE_API_LINK not in url: html = client.request(url) script = [ i for i in client.parseDOM(html, 'script') if 'INITIAL_STATE' in i ][0] script = re.sub(r'var _*?\w+_*? = ', '', script).replace(';</script>', '') if script.endswith(';'): script = script[:-1] _json = json.loads(script) else: _json = client.request(url, output='json') if '/list' in url: codename = split(url)[1].partition('=')[2] total_pages = _json['pages']['sectionsByCodename'][codename][ 'totalPages'] page = _json['pages']['sectionsByCodename'][codename][ 'fetchedPage'] tiles = _json['pages']['sectionsByCodename'][codename]['tilesIds'] tiles_post_list = [{'id': i} for i in tiles] else: tiles = [] if 'GetSeriesDetails' in url: episode_groups = _json['episodeGroups'] for group in episode_groups: episodes = group['episodes'] for episode in episodes: codename = episode['id'] tiles.append(codename) tiles_post_list = [{'id': i} for i in tiles] total_pages = 1 else: codenames = list(_json['pages']['sectionsByCodename'].keys()) for codename in codenames: tiles_list = _json['pages']['sectionsByCodename'][ codename]['tilesIds'] tiles.extend(tiles_list) tiles_post_list = [{'id': i} for i in tiles] total_pages = 1 else: if url.startswith('{"platformCodename":"www"'): collection_json = json.loads(url) url = collection_json['orCollectionCodenames'] page = collection_json['page'] filter_tiles = client.request(FILTER_TILES, post=collection_post(url, page), output='json') total_pages = filter_tiles['pagination']['totalPages'] page = filter_tiles['pagination']['page'] tiles = filter_tiles['tiles'] tiles_post_list = [{'id': i['id']} for i in tiles] if total_pages > 1 and page < total_pages: page = page + 1 next_post = collection_post(url, page) else: next_post = None get_tiles = client.request(GET_TILES, post=tiles_post(tiles_post_list), output='json') tiles_list = get_tiles['tiles'] self_list = [] for tile in tiles_list: if tile['isRegionRestrictionEnabled'] and not geo_detect: continue title = tile['title'] if 'subtitle' in tile: title = ' - '.join([title, tile['subtitle']]) try: if tile.get('isEpisode'): try: season = ' '.join([ control.lang(30063), str(tile['season']['seasonNumber']) ]) except KeyError: season = None if not season: subtitle = ' '.join( [control.lang(30064), str(tile['episodeNumber'])]) else: try: subtitle = ''.join([ season, ', ', control.lang(30064), ' ', str(tile['episodeNumber']) ]) except KeyError: subtitle = tile['publishDate'].partition('T')[0] subtitle = '/'.join(subtitle.split('-')[::-1]) title = '[CR]'.join([title, subtitle]) except Exception: pass images = tile['images'] fanart = control.fanart() if len(images) == 1: image = images[0]['url'] else: image_list = [ [i['url'] for i in images if i['isMain']], [i['url'] for i in images if i['role'] == 'hbbtv-icon'], [i['url'] for i in images if i['role'] == 'photo'], [i['url'] for i in images if i['role'] == 'hbbtv-background'] ] image = images[0]['url'] for i in image_list: if i: image = i[0] break fanart_list = [[ i['url'] for i in images if i['role'] == 'photo-details' ], [i['url'] for i in images if i['role'] == 'hbbtv-background'], [ i['url'] for i in images if i['role'] == 'photo' and 'ertflix-background' in i['url'] ]] for f in fanart_list: if f and len(f) > 1: fanart = f[1] break elif f and len(f) == 1: fanart = f[0] break codename = tile['codename'] vid = tile['id'] plots = [ tile.get('description'), tile.get('shortDescription'), tile.get('tinyDescription'), tile.get('subtitle'), tile.get('subTitle') ] plot = control.lang(30014) for p in plots: if p: plot = client.stripTags(p) plot = client.replaceHTMLCodes(plot) break year = tile.get('year') if not year: try: year = int(tile.get('productionYears')[:4]) except Exception: year = 2021 if tile.get('hasPlayableStream') and not tile.get('type') == 'ser': url = VOD_LINK.format('-'.join([vid, codename])) else: url = GET_SERIES_DETAILS.format(vid) data = { 'title': title, 'image': image, 'fanart': fanart, 'url': url, 'plot': plot, 'year': year } if tile.get('durationSeconds'): data.update({'duration': tile.get('durationSeconds')}) if next_post: data.update({ 'next': next_post, 'nextaction': 'listing', 'nextlabel': 30500, 'nexticon': control.addonmedia('next.jpg') }) if tile.get('hasPlayableStream') and not tile.get('type') == 'ser': data.update({'action': 'play', 'isFolder': 'False'}) else: data.update({'action': 'listing'}) self_list.append(data) return self_list
def sub_index_listing(url): html = client.request(url) name = client.parseDOM(html, 'h1', attrs={'class': 'tdb-title-text'})[0] name = client.replaceHTMLCodes(name) links = [ l for l in list(itertags(html, 'a')) if 'su-button' in l.attributes.get('class', '') ] if not links: links = [ l for l in list(itertags(html, 'a')) if l.text and u'Επεισόδια' in l.text ] description = client.replaceHTMLCodes( client.stripTags( client.parseDOM(html, 'div', attrs={'class': 'tdb-block-inner td-fix-index'})[-2])) if '</div>' in description: description = client.stripTags(description.partition('</div>')[2]) else: description = client.stripTags(description) image_div = [i for i in list(itertags(html, 'div')) if 'sizes' in i.text] image = re.search(r'w, (http.+?\.(?:jpg|png)) 300w', image_div[0].text).group(1) fanart = re.search(r'(http.+?\.(?:jpg|png))', image_div[0].text).group(1) self_list = [] for link in links: title = ' - '.join([name, client.stripTags(link.text).strip()]) url = client.replaceHTMLCodes(link.attributes['href']) action = 'listing' if 'series' in link.attributes['href']: url = split(url)[1].split('-')[0] url = GET_SERIES_DETAILS.format(url) elif 'vod' in link.attributes['href']: action = 'play' data = { 'title': title, 'url': url, 'image': image, 'fanart': fanart, 'plot': description, 'action': action } if data['action'] == 'play': data.update({'title': name, 'label': title, 'isFolder': 'False'}) self_list.append(data) if not self_list: self_list.append({ 'title': ''.join([name, ' - ', control.lang(30022)]), 'action': 'read_plot', 'isFolder': 'False', 'isPlayable': 'False', 'plot': description, 'image': image, 'fanart': fanart }) plot_item = { 'title': ''.join(['[B]', name, ' - ', control.lang(30021), '[/B]']), 'action': 'read_plot', 'isFolder': 'False', 'isPlayable': 'False', 'plot': description, 'image': image, 'fanart': fanart } self_list.append(plot_item) return self_list
def t3_kind(children_data, next_url): title = client.replaceHTMLCodes(children_data['title']) name = children_data['name'] author = children_data['author'] domain = children_data['domain'] num_comments = str(children_data['num_comments']) try: if domain.startswith('self.'): selftext = legacy_replace(children_data['selftext']) if selftext == '': selftext = title else: selftext = None except KeyError: selftext = None subreddit = children_data['subreddit'] subreddit_id = children_data['subreddit_id'] url = children_data['url'] permalink = base_link() + children_data['permalink'] image, fanart = image_generator(children_data) if access_boolean() and 'reddit' in url and not 'video' in url: url = url.replace('www.reddit', 'oauth.reddit') label = title + ' | ' + subreddit + ' | ' + '[B]' + author + '[/B]' + self.formatting + '[I]' + domain + '[/I]' + ' | ' + '[B]' + control.lang( 30103) + num_comments + '[/B]' pairs = { 'label': label, 'title': title, 'url': url, 'image': image, 'fanart': fanart, 'next': next_url, 'subreddit_id': subreddit_id, 'subreddit': subreddit, 'subreddit_url': base_link() + '/r/' + subreddit, 'kind': 't3', 'permalink': permalink, 'domain': domain, 'name': name, 'selftext': selftext, 'author': author, 'plot': selftext, 'query': client.quote_paths(permalink) } return pairs