def directory_picker(url, argv): params = dict(parse_qsl(argv[2][1:])) items = items_directory(url, params) if items is None: return for i in items: add_to_playlist = { 'title': 30226, 'query': { 'action': 'add_to_playlist' } } clear_playlist = { 'title': 30227, 'query': { 'action': 'clear_playlist' } } i.update({ 'cm': [add_to_playlist, clear_playlist], 'action': 'play', 'isFolder': 'False' }) directory.add(items, content='movies', argv=argv) prevent_failure()
def main(argv=None): if sys.argv: argv = sys.argv params = dict(parse_qsl(argv[2][1:])) action = params.get('action', 'root') urldispatcher.dispatch(action, params)
def obtain_authorization(_cookie, _uh): data = { 'authorize': 'Allow', 'state': state, 'redirect_uri': redirect_uri, 'response_type': 'code', 'client_id': client_id, 'duration': 'permanent', 'scope': ' '.join(scope), 'uh': _uh } headers = client.request(api_link('authorize'), cookie=_cookie, post=data, redirect=False, output='headers') geturl = dict([ line.partition(': ')[::2] for line in str(headers).splitlines() ]).get('location') token = dict(parse_qsl(urlparse(geturl).query)).get('code') if not token: return get_tokens(code=token)
def gk_debris(link): html = client.request(link) url = client.parseDOM(html, 'iframe', ret='src', attrs={"class": "metaframe rptss"})[0] url = dict(parse_qsl(urlparse(url).query)).get('source') return url
def main(argv=None): if sys.argv: argv = sys.argv params = dict(parse_qsl(argv[2][1:])) action = params.get('action', 'root') if 'audio' in infoLabel('Container.FolderPath') and action in [ None, 'root' ]: action = 'radios' urldispatcher.dispatch(action, params)
def m3u8_picker(url): try: if '|' not in url: raise TypeError link, sep, head = url.rpartition('|') headers = dict(parse_qsl(head)) streams = m3u8.load(link, headers=headers).playlists except TypeError: streams = m3u8.load(url).playlists if not streams: return url qualities = [] urls = [] for stream in streams: quality = repr(stream.stream_info.resolution).strip('()').replace( ', ', 'x') if quality == 'None': quality = 'Auto' uri = stream.uri if not uri.startswith('http'): uri = urljoin(stream.base_uri, uri) qualities.append(quality) try: if '|' not in url: raise TypeError urls.append(uri + ''.join(url.rpartition('|')[1:])) except TypeError: urls.append(uri) if len(qualities) == 1: control.infoDialog(control.lang(30220).format(qualities[0])) return url return stream_picker(qualities, urls)
def do_GET(self): parsed = urlparse(self.path) params = dict(parse_qsl(parsed.query)) self._set_headers() self.wfile.write( reddit_page(authorized='code' in params, token=params.get('code', ''))) if 'code' in params: control.setSetting('auth.token', params['code']) get_tokens(code=params['code']) elif 'error' in params: control.setSetting('get.toggle', 'false') tokens_reset()
def activate_other_addon(url, query=None): if not url.startswith('plugin://'): url = ''.join(['plugin://', url, '/']) parsed = urlparse(url) if not control.condVisibility('System.HasAddon({0})'.format( parsed.netloc)): control.execute('InstallAddon({0})'.format(parsed.netloc)) params = dict(parse_qsl(parsed.query)) action = params.get('action') url = params.get('url') directory.run_builtin(addon_id=parsed.netloc, action=action, url=url, content_type=query)
def image_display(): image = pic_router(link) if 'mp4' in image: play( link=image, title=title, image=dict(parse_qsl(argv[2].replace('?', ''))).get('image'), skip_question=True ) else: if control.setting('image.fullscreen') == 'true': control.execute('ShowPicture("{0}")'.format(image)) else: window = ImageDisplay(title, image) window.doModal() del window close_all()
def directory_picker(url, argv): params = dict(parse_qsl(argv[2][1:])) if CACHE_DEBUG: items = items_directory(url, params) else: items = cache.get(items_directory, 12, url, params) if items is None: return for i in items: add_to_playlist = { 'title': 30226, 'query': { 'action': 'add_to_playlist' } } clear_playlist = { 'title': 30227, 'query': { 'action': 'clear_playlist' } } i.update({ 'cm': [add_to_playlist, clear_playlist], 'action': 'play', 'isFolder': 'False' }) directory.add(items, content='movies', argv=argv, as_playlist=control.setting('action_type') == '2', auto_play=control.setting('auto_play') == 'true') prevent_failure()
def run_builtin(addon_id=control.addonInfo('id'), action=None, mode=None, content_type=None, url=None, query=None, path_history='', get_url=False, command=('ActivateWindow', 'Container.Update'), *args): """ This function will construct a url starting with plugin:// attached to the addon_id, then passed into either the ActivateWindow built-in command or Container.Update for listing/container manipulation. You have to either pass action, mode, content_type or query, otherwise TypeError will be raised. Can also apply the "PlayMedia". Query will override action, mode, url and content_type arguments if passed as dictionary path_history can also be either ",return" or ",replace" """ if not query and not action and not mode and not content_type: raise TypeError('Cannot manipulate container without arguments') if isinstance(query, dict): query_string = urlencode(query) else: query_string = '' if content_type: query_string += 'content_type={0}{1}'.format( content_type, '' if action is None and mode is None and query is None else '&') if action: query_string += 'action={0}'.format(action) if mode: query_string += 'mode={0}'.format(mode) if url: query_string += '&url={0}'.format(quote_plus(url)) if query: query_string += '&query={0}'.format(query) if args: query_string += '&' + '&'.join(args) if 'content_type=video' in query_string: window_id = 'videos' elif 'content_type=audio' in query_string: window_id = 'music' elif 'content_type=image' in query_string: window_id = 'pictures' elif 'content_type=executable' in query_string: window_id = 'programs' elif 'content_type' in query_string and dict( parse_qsl(query_string))['content_type'] not in [ 'video', 'audio', 'image', 'executable' ]: raise AttributeError('Incorrect content_type specified') addon_id = ''.join(['plugin://', addon_id, '/']) if 'content_type' in query_string and isinstance(command, tuple): # noinspection PyUnboundLocalVariable executable = '{0}({1},"{2}?{3}"{4})'.format( command[0], window_id, addon_id, query_string, ',return' if not path_history else path_history) else: if isinstance(command, tuple): executable = '{0}({1}?{2}{3})'.format( command[1], addon_id, query_string, ',return' if not path_history else path_history) else: executable = '{0}({1}?{2}{3})'.format( command, addon_id, query_string, ',return' if not path_history else path_history) if get_url: return executable else: control.execute(executable)
""" from __future__ import absolute_import ######################################################################################################################## import sys from tulip.compat import parse_qsl from resources.lib import navigator, tools ######################################################################################################################## argv = sys.argv syshandle = int(argv[1]) sysaddon = argv[0] params = dict(parse_qsl(argv[2].replace('?',''))) ######################################################################################################################## content = params.get('content_type') action = params.get('action') url = params.get('url') image = params.get('image') title = params.get('title') name = params.get('name') query = params.get('query') if content == 'video': navigator.Indexer(argv=argv).main_menu()
Author Twilight0 SPDX-License-Identifier: GPL-3.0-only See LICENSES/GPL-3.0-only for more information. ''' from __future__ import absolute_import import sys from tulip.compat import parse_qsl argv = sys.argv try: syshandle = int(argv[1]) except IndexError: syshandle = -1 sysaddon = argv[0] try: params_tuple = parse_qsl(argv[2][1:]) params = dict(params_tuple) except IndexError: params = {'action': None} __all__ = ["syshandle", "sysaddon", "params"]
def _listing(self, url): if self.ajax_url in url: result = client.request(url.partition('?')[0], post=url.partition('?')[2]) else: result = client.request(url) try: header = parseDOM(result, 'h2')[0] except IndexError: header = None next_url = None override = False if self.base_link + '/?s=' in url or control.setting('pagination') == 'true': override = True threads_1 = [] threads_2 = [] # Nest the function to work on either of the two cases def _exec(_items, _next_url=None): if control.setting('threading') == 'true': for count, _item in list(enumerate(_items, start=1)): threads_2.append(workers.Thread(self.loop(_item, header, count, _next_url))) [i.start() for i in threads_2] [i.join() for i in threads_2] else: for count, _item in list(enumerate(_items, start=1)): self.loop(_item, header, count, _next_url) if 'enimerosi-24' not in url and self.ajax_url not in url: ajaxes = [i for i in parseDOM(result, 'script', attrs={'type': 'text/javascript'}) if 'ajaxurl' in i] ajax1 = json.loads(re.search(r'var loadmore_params = ({.+})', ajaxes[-1]).group(1)) ajax2 = json.loads(re.search(r'var cactus = ({.+})', ajaxes[0]).group(1)) ajax = self._ajax_merge(ajax1, ajax2) pages = int(ajax['max_page']) posts = ajax['posts'] try: posts = posts.encode('utf-8') except Exception: pass if control.setting('threading') == 'true' and not override: for i in range(0, pages + 1): threads_1.append( workers.Thread( self.thread(self.ajax_url, post=self.load_more.format(query=quote(posts), page=str(i))) ) ) [i.start() for i in threads_1] [i.join() for i in threads_1] else: for i in range(0, pages + 1): a = client.request(self.ajax_url, post=self.load_more.format(query=quote(posts), page=str(i))) self.data.append(a) if i == 0 and override: next_url = '?'.join([self.ajax_url, self.load_more.format(query=quote(posts), page='1')]) break html = '\n'.join(self.data) items = itertags_wrapper(html, 'div', attrs={'class': 'item item-\d+'}) if len(items) < 20: next_url = None _exec(items, next_url) elif self.ajax_url in url: items = itertags_wrapper(result, 'div', attrs={'class': 'item item-\d+'}) parsed = dict(parse_qsl(url.partition('?')[2])) next_page = int(parsed['page']) + 1 parsed['page'] = next_page if len(items) >= 20: next_url = '?'.join([url.partition('?')[0], urlencode(parsed)]) _exec(items, next_url) else: items = itertags_wrapper(result, 'div', attrs={'class': 'item item-\d+'}) for item in items: text = item.text img = item.attributes['style'] image = re.search(r'url\((.+)\)', img).group(1) title = client.replaceHTMLCodes(parseDOM(text, 'a')[0].strip()) url = parseDOM(text, 'a', ret='href')[0] self.list.append({'title': title, 'image': image, 'url': url}) return self.list
# -*- coding: utf-8 -*- ''' Subtitles.gr Addon Author Twilight0 SPDX-License-Identifier: GPL-3.0-only See LICENSES/GPL-3.0-only for more information. ''' import sys from resources.lib.addon import Search, Download from tulip.compat import parse_qsl syshandle = int(sys.argv[1]) sysaddon = sys.argv[0] params = dict(parse_qsl(sys.argv[2][1:])) action = params.get('action') source = params.get('source') url = params.get('url') query = params.get('searchstring') langs = params.get('languages') ######################################################################################################################## if action in [None, 'search', 'manualsearch']: Search(syshandle, sysaddon, langs, action).run(query) elif action == 'download': Download(syshandle, sysaddon).run(url, source)
SPDX-License-Identifier: GPL-3.0-only See LICENSES/GPL-3.0-only for more information. ''' import json from sys import argv from tulip import control, directory, client from tulip.compat import parse_qsl from youtube_registration import register_api_keys from zlib import decompress from base64 import b64decode from os import path sysaddon = argv[0] syshandle = int(argv[1]) params = dict(parse_qsl(argv[2][1:])) action = params.get('action') url = params.get('url') lc = [ { 'title': 'Greek Voice', 'icon': control.addonmedia(addonid='resource.images.greekvoice.artwork', icon='GV1_icon.png'), 'url': 'http://wpso.com:1936/hls/wzra.m3u8', 'fanart': control.addonmedia(addonid='resource.images.greekvoice.artwork', icon='GV_TV1_fanart.jpg'),
def player(url, params): global skip_directory if url is None: log_debug('Nothing playable was found') return if url.startswith('alivegr://'): log_debug('Attempting pseudo live playback') skip_directory = True pseudo_live(url) return url = url.replace('&', '&') skip_directory = params.get('action') == 'play_skipped' directory_boolean = MOVIES in url or SHORTFILMS in url or THEATER in url or GK_BASE in url or ( 'episode' in url and GM_BASE in url) if directory_boolean and control.setting( 'action_type') == '1' and not skip_directory: directory.run_builtin(action='directory', url=url) return log_debug('Attempting to play this url: ' + url) if params.get('action') == 'play_resolved': stream = url elif params.get('query') and control.setting('check_streams') == 'true': sl = json.loads(params.get('query')) index = int(control.infoLabel('Container.CurrentItem')) - 1 stream = check_stream(sl, False, start_from=index, show_pd=True, cycle_list=False) else: stream = conditionals(url) if not stream: log_debug('Failed to resolve this url: {0}'.format(url)) return try: plot = params.get('plot').encode('latin-1') except (UnicodeEncodeError, UnicodeDecodeError, AttributeError): plot = params.get('plot') if not plot and 'greek-movies.com' in url: plot = gm_source_maker(url).get('plot') dash, m3u8_dash, mimetype, manifest_type = dash_conditionals(stream) if not m3u8_dash and control.setting( 'm3u8_quality_picker') == '1' and '.m3u8' in stream: try: stream = m3u8_picker(stream) except TypeError: pass if stream != url: log_debug('Stream has been resolved: ' + stream) else: log_debug('Attempting direct playback: ' + stream) # process headers if necessary: if '|' in stream: stream, sep, headers = stream.rpartition('|') headers = dict(parse_qsl(headers)) log_debug('Appending custom headers: ' + repr(headers)) stream = sep.join([stream, urlencode(headers)]) try: image = params.get('image').encode('latin-1') title = params.get('title').encode('latin-1') except (UnicodeEncodeError, UnicodeDecodeError, AttributeError): image = params.get('image') title = params.get('title') meta = {'title': title} if plot: meta.update({'plot': plot}) try: directory.resolve(stream, meta=meta, icon=image, dash=dash, manifest_type=manifest_type, mimetype=mimetype) if url.startswith('iptv://') or 'kineskop.tv' in url: control.execute('PlayerControl(RepeatOne)') except: control.execute('Dialog.Close(all)') control.infoDialog(control.lang(30112))
def player(url, params, do_not_resolve=False): if url is None: log_debug('Nothing playable was found') return url = url.replace('&', '&') log_debug('Attempting to play this url: ' + url) if 'ustream' in url: log_debug('Opening browser window for this url: {0}'.format(url)) control.open_web_browser(url) while not control.wait(1): if control.condVisibility('Window.IsActive(okdialog)'): control.execute('Dialog.Close(all)') break return if do_not_resolve: stream = url else: stream = conditionals(url, params) if not stream or (len(stream) == 2 and not stream[0]): log_debug('Failed to resolve this url: {0}'.format(url)) control.execute('Dialog.Close(all)') return plot = None try: if isinstance(stream, tuple): plot = stream[1] stream = stream[0] else: try: plot = params.get('plot').encode('latin-1') except (UnicodeEncodeError, UnicodeDecodeError, AttributeError): plot = params.get('plot') except TypeError: pass else: log_debug('Plot obtained') dash, m3u8_dash, mimetype, manifest_type = dash_conditionals(stream) if not m3u8_dash and control.setting('m3u8_quality_picker') in [ '1', '2' ] and '.m3u8' in stream: try: stream = m3u8_loader.m3u8_picker(stream) except TypeError: pass if isinstance(stream, OrderedDict): try: try: args = stream['best'].args except Exception: args = None try: json_dict = json.loads(stream['best'].json) except Exception: json_dict = None for h in args, json_dict: if 'headers' in h: headers = h['headers'] break else: headers = None if headers: try: del headers['Connection'] del headers['Accept-Encoding'] del headers['Accept'] except KeyError: pass append = ''.join(['|', urlencode(headers)]) else: append = '' except AttributeError: append = '' if control.setting('sl_quality_picker') == '0' or len(stream) == 3: stream = stream['best'].to_url() + append else: keys = stream.keys()[::-1] values = [u.to_url() + append for u in stream.values()][::-1] stream = stream_picker(keys, values) dash, m3u8_dash, mimetype, manifest_type = dash_conditionals(stream) if stream != url: log_debug('Stream has been resolved: ' + stream) if '|' in stream or '|' in url: from tulip.compat import parse_qsl log_debug('Appending custom headers: ' + repr(dict(parse_qsl(stream.rpartition('|')[2])))) try: image = params.get('image').encode('latin-1') title = params.get('title').encode('latin-1') except (UnicodeEncodeError, UnicodeDecodeError, AttributeError): image = params.get('image') title = params.get('title') meta = {'title': title} if plot: meta.update({'plot': plot}) try: directory.resolve(stream, meta=meta, icon=image, dash=dash, manifest_type=manifest_type, mimetype=mimetype) except: control.execute('Dialog.Close(all)') control.infoDialog(control.lang(30112))
def items_list(self, link): if not link.startswith('http'): link = base_link() + link link = client.quote_paths(link) link = link.replace('old.', 'oauth.' if access_boolean() else 'www.') link = link.replace('www.', 'oauth.' if access_boolean() else 'www.') #### Start of nested helper functions #### # Pulls images and thumbnails def image_generator(children_data): print(children_data) image = control.addonInfo('icon') fanart = control.fanart() try: try: m_thumb = children_data.get('media').get('oembed').get( 'thumbnail_url') except AttributeError: m_thumb = None try: s_thumb = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_thumb = None try: p_thumb = children_data.get('preview').get('oembed').get( 'thumbnail_url') except AttributeError: p_thumb = None try: u_thumb = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: u_thumb = None images = [ children_data.get('community_icon'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('thumbnail'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('banner_img'), children_data.get('url') ] if m_thumb: images.insert(-2, m_thumb) if s_thumb: images.insert(-2, s_thumb) if p_thumb: images.insert(-2, p_thumb) if u_thumb: images.insert(-2, u_thumb) for i in images: if i in ['default', 'spoiler', 'image', 'self'] or not i: continue elif '.jpg' in i or '.png' in i: image = i break if '?' in image: image = image.partition('?')[0] except (KeyError, IndexError, TypeError): pass if 'embed.ly' in image: image = dict(parse_qsl(urlparse(image).query))['url'] try: try: p_fanart = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: p_fanart = None try: s_fanart = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_fanart = None fanarts = [children_data.get('banner_background_image')] if p_fanart: fanarts.insert(0, p_fanart) if s_fanart: fanarts.insert(-1, s_fanart) for f in fanarts: if not f: continue elif f: fanart = f break if '?' in fanart: fanart = fanart.partition('?')[0] except (KeyError, IndexError): pass return image, fanart # Comment def t1_kind(children_data, next_url): author = children_data['author'] body = legacy_replace(children_data['body']) short = legacy_replace(body[:50] + '...') image = control.addonInfo('icon') subreddit = children_data['subreddit'] subreddit_id = children_data['subreddit_id'] name = children_data['name'] if children_data['replies']: reply_json = children_data['replies'] replies_children = reply_json['data']['children'] replies = len(replies_children) try: comprehension = [ base_link() + client.quote_paths(r['data']['permalink']) for r in replies_children ] replies_urls = json.dumps(comprehension) except KeyError: replies_urls = None else: replies_urls = None replies = 0 replies_num = ' | ' + control.lang(30102) + str( replies) if replies > 0 else '' title = short.replace( '\n', '') + self.formatting + '[I]' + author + '[/I]' + replies_num url = permalink = base_link() + children_data['permalink'] link_id = children_data['link_id'] pairs = { 'title': title, 'url': url, 'permalink': permalink, 'image': image, 'subreddit': subreddit, 'kind': 't1', 'subreddit_url': base_link() + '/r/' + subreddit, 'next': next_url, 'subreddit_id': subreddit_id, 'name': name, 'body': body, 'plot': body, 'query': replies_urls, 'replies_urls': replies_urls, 'link_id': link_id } return pairs # Link/Thread def t3_kind(children_data, next_url): title = client.replaceHTMLCodes(children_data['title']) name = children_data['name'] author = children_data['author'] domain = children_data['domain'] num_comments = str(children_data['num_comments']) try: if domain.startswith('self.'): selftext = legacy_replace(children_data['selftext']) if selftext == '': selftext = title else: selftext = None except KeyError: selftext = None subreddit = children_data['subreddit'] subreddit_id = children_data['subreddit_id'] url = children_data['url'] permalink = base_link() + children_data['permalink'] image, fanart = image_generator(children_data) if access_boolean() and 'reddit' in url and not 'video' in url: url = url.replace('www.reddit', 'oauth.reddit') label = title + ' | ' + subreddit + ' | ' + '[B]' + author + '[/B]' + self.formatting + '[I]' + domain + '[/I]' + ' | ' + '[B]' + control.lang( 30103) + num_comments + '[/B]' pairs = { 'label': label, 'title': title, 'url': url, 'image': image, 'fanart': fanart, 'next': next_url, 'subreddit_id': subreddit_id, 'subreddit': subreddit, 'subreddit_url': base_link() + '/r/' + subreddit, 'kind': 't3', 'permalink': permalink, 'domain': domain, 'name': name, 'selftext': selftext, 'author': author, 'plot': selftext, 'query': client.quote_paths(permalink) } return pairs # Subreddit def t5_kind(children_data, next_url): display_name = client.replaceHTMLCodes( children_data['display_name']) title = client.replaceHTMLCodes(children_data['title']) public_description = legacy_replace( children_data['public_description']) description = legacy_replace(children_data['description']) plot = json.dumps({ 'title': title, 'public_description': public_description, 'description': description }) subscribers = str(children_data['subscribers']) url = base_link() + children_data['url'] name = children_data['name'] image, fanart = image_generator(children_data) pairs = { 'title': title + ' | ' + subscribers + self.formatting + '[I]' + display_name + '[/I]', 'url': url, 'image': image, 'next': next_url, 'fanart': fanart, 'display_name': display_name, 'name': name, 'kind': 't5', 'plot': plot } return pairs # Multi def lm_kind(children_data): display_name = children_data['display_name'] name = children_data['name'] # description = html_processor(children_data['description_html']) try: image = children_data['icon_url'] if not image: raise KeyError except KeyError: image = control.addonInfo('icon') path = base_link() + children_data['path'] subreddits = json.dumps(children_data['subreddits']) pairs = { 'title': display_name, 'url': path, 'image': image, 'subreddits': subreddits, 'kind': 'LabeledMulti', 'name': name } return pairs def more_kind(children_data): # title = '' if children_data['depth'] == 0 else '>' * children_data['depth'] + ' ' + control.lang(30117) title = control.lang(30144) name, id = (children_data['name'], children_data['id']) if len(name) < 10: name = children_data['parent_id'] if len(id) < 7: id = children_data['parent_id'][3:] parsed = urlparse(link) permalink = urlunparse(parsed._replace(path=parsed.path + id)) if children_data['children']: replies_urls = json.dumps([ urlunparse(parsed._replace(path=parsed.path + u)) for u in children_data['children'] ]) else: replies_urls = None image = control.addonInfo('icon') pairs = { 'title': title, 'name': name, 'id': id, 'image': image, 'kind': 'more', 'permalink': permalink, 'replies_urls': replies_urls } return pairs def next_appender(json_data): try: next_id = json_data['after'] if not next_id: raise KeyError elif '&after=' in parsed.query: _next_url = urlunparse( parsed._replace( query=re.sub(r'&after=\w{8,9}', r'&after=' + next_id, parsed.query))) else: _next_url = urlunparse( parsed._replace(query=parsed.query + '&after=' + next_id)) except KeyError: _next_url = '' return _next_url def processor(_json): if isinstance(_json, list): for j in _json: data = j['data'] kind = j['kind'] if kind == 'LabeledMulti': pairs = lm_kind(data) self.data.append(pairs) else: children = data['children'] nu = next_appender(data) for c in children: kind = c['kind'] data = c['data'] if kind == 't3': pairs = t3_kind(data, nu) elif kind == 't1': pairs = t1_kind(data, nu) elif kind == 'more': pairs = more_kind(data) else: pairs = None self.data.append(pairs) return self.data else: data = _json['data'] children = data['children'] nu = next_appender(data) for d in children: item_data = d['data'] kind = d['kind'] # Link: if kind == 't3': pairs = t3_kind(item_data, nu) # Subreddit: elif kind == 't5': pairs = t5_kind(item_data, nu) # Comment: elif kind == 't1': pairs = t1_kind(item_data, nu) elif kind == 'more': pairs = more_kind(data) else: pairs = {'title': 'Null', 'action': None} self.data.append(pairs) return self.data #### End of nested helper functions #### parsed = urlparse(link) query = dict(parse_qsl(parsed.query)) path = parsed.path if 'limit' not in query: query.update({'limit': control.setting('items.limit')}) query = urlencode(query) if not access_boolean() and not path.endswith('.json'): path += dotjson link = urlunparse(parsed._replace(path=path, query=query)) json_object = client.request(link, headers=request_headers()) loaded = json.loads(json_object) self.list = processor(loaded) return self.list
def episodes_list_cy(self, url, title, image): if title: try: title = title.decode('utf-8') title = title.partition('|')[0] except Exception: title = title.partition('|')[0] if url.startswith(self.views_ajax): html = client.request(url.partition('#')[0], post=url.partition('#')[2]) _json = json.loads(html) html = _json[4]['data'] view_path = dict(parse_qsl(url.partition('#')[2]))['view_path'] view_args = dict(parse_qsl(url.partition('#')[2]))['view_args'] page = str(int(dict(parse_qsl(url.partition('#')[2]))['page']) + 1) else: html = client.request(url) view_path = urlparse(url).path view_args = '/'.join(view_path.split('/')[2:4]) page = '1' next_link = '#'.join( [self.views_ajax, self.ajax_post_episodes.format(view_args=view_args, view_path=view_path, page=page)] ) try: items = [i for i in client.parseDOM(html, 'div', {'class': 'box'}) if 'play-big' in i] if not items: raise Exception for item in items: itemtitle = client.parseDOM(item, 'a')[-1] if title: label = ' - '.join([title, itemtitle]) else: label = itemtitle url = client.parseDOM(item, 'a', ret='href')[0] url = urljoin(self.basecy_link, url) image = client.parseDOM(item, 'img', ret='src')[0] data = {'title': label, 'image': image, 'url': url, 'next': next_link} if title: data.update({'name': title}) self.list.append(data) except Exception: self.list = [ { 'title': u' - '.join([title, control.lang(30014)]), 'action': 'back', 'image': image, 'isFolder': 'False', 'isPlayable': 'False' } , { 'title': control.lang(30013), 'action': 'back', 'image': control.icon(), 'isFolder': 'False', 'isPlayable': 'False' } ] return self.list
def image_generator(children_data): print(children_data) image = control.addonInfo('icon') fanart = control.fanart() try: try: m_thumb = children_data.get('media').get('oembed').get( 'thumbnail_url') except AttributeError: m_thumb = None try: s_thumb = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_thumb = None try: p_thumb = children_data.get('preview').get('oembed').get( 'thumbnail_url') except AttributeError: p_thumb = None try: u_thumb = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: u_thumb = None images = [ children_data.get('community_icon'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('thumbnail'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('banner_img'), children_data.get('url') ] if m_thumb: images.insert(-2, m_thumb) if s_thumb: images.insert(-2, s_thumb) if p_thumb: images.insert(-2, p_thumb) if u_thumb: images.insert(-2, u_thumb) for i in images: if i in ['default', 'spoiler', 'image', 'self'] or not i: continue elif '.jpg' in i or '.png' in i: image = i break if '?' in image: image = image.partition('?')[0] except (KeyError, IndexError, TypeError): pass if 'embed.ly' in image: image = dict(parse_qsl(urlparse(image).query))['url'] try: try: p_fanart = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: p_fanart = None try: s_fanart = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_fanart = None fanarts = [children_data.get('banner_background_image')] if p_fanart: fanarts.insert(0, p_fanart) if s_fanart: fanarts.insert(-1, s_fanart) for f in fanarts: if not f: continue elif f: fanart = f break if '?' in fanart: fanart = fanart.partition('?')[0] except (KeyError, IndexError): pass return image, fanart
Author Twilight0 SPDX-License-Identifier: GPL-3.0-only See LICENSES/GPL-3.0-only for more information. ''' from __future__ import absolute_import import sys from tulip.compat import parse_qsl argv = sys.argv try: syshandle = int(argv[1]) except IndexError: syshandle = -1 sysaddon = argv[0] try: params_tuple = parse_qsl(argv[2].replace('?','')) params = dict(params_tuple) except IndexError: params = {'action': None} __all__ = ["syshandle", "sysaddon", "params"]