def retriever(source, destination, user_agent=None, referer=None, reporthook=None, data=None, **kwargs): if user_agent is None: user_agent = CHROME if referer is None: referer = '{0}://{1}/'.format( urlparse(source).scheme, urlparse(source).netloc) class Opener(URLopener): version = user_agent def __init__(self, **x509): URLopener.__init__(self) super(Opener, self).__init__(**x509) headers = [('User-Agent', self.version), ('Accept', '*/*'), ('Referer', referer)] if kwargs: headers.extend(iteritems(kwargs)) self.addheaders = headers Opener().retrieve(source, destination, reporthook, data)
def pseudo_live(url): if url.endswith('fifties'): url = '{0}movies.php?y=7&l=&g=&p='.format(GM_BASE) elif url.endswith('sixties'): url = '{0}movies.php?y=6&l=&g=&p='.format(GM_BASE) elif url.endswith('seventies'): url = '{0}movies.php?y=5&l=&g=&p='.format(GM_BASE) elif url.endswith('eighties'): url = '{0}movies.php?y=4&l=&g=&p='.format(GM_BASE) else: url = '{0}movies.php?g=8&y=&l=&p='.format(GM_BASE) if 'channel' in url: movie_list = list_channel_videos(urlparse(url).path[1:]) elif 'playlist' in url: movie_list = list_playlist_videos(urlparse(url).path[1:]) else: movie_list = gm_indexer().listing(url, get_listing=True) if 'youtube' in url: movie_list = [i for i in movie_list if i['duration'] >= 240] if not url.endswith('kids') and 'youtube' not in url: movie_list = [i for i in movie_list if i['url'] not in blacklister()] for i in movie_list: i.update({'action': 'play_skipped', 'isFolder': 'False'}) plot = None if control.setting('pseudo_live_mode') == '0': choice = random_choice(movie_list) meta = {'title': choice['title'], 'image': choice['image']} if 'youtube' not in url: plot = gm_source_maker(choice['url']).get('plot') if plot: meta.update({'plot': plot}) player(choice['url'], meta) else: shuffle(movie_list) directory.add(movie_list, as_playlist=True, auto_play=True)
def index_cy(self, url): html = client.request(url) items = [i for i in client.parseDOM(html, 'div', attrs={'class': 'box'}) if urlparse(url).path in i] try: next_link = client.parseDOM(html, 'a', attrs={'class': 'pager__link pager__link--next'}, ret='href')[0] next_link = urljoin(url.partition('?')[0], next_link) except Exception: next_link = None for item in items: try: title_field = client.parseDOM(item, 'div', {'class': 'box__overlay-title'})[0] except IndexError: continue title = client.replaceHTMLCodes(client.parseDOM(title_field, 'a')[0]).replace(u'ᵒ', u' μοίρες').strip() subtitle = client.replaceHTMLCodes(client.parseDOM(item, 'div', {'class': 'box__overlay-subtitle'})[0]) label = ' | '.join([title, subtitle]) url = client.parseDOM(title_field, 'a', ret='href')[0] url = urljoin(self.basecy_link, url + '/webtv') image = client.parseDOM(item, 'img', ret='src')[0] data = {'title': label, 'image': image, 'url': url, 'name': title} if next_link: data.update({'next': next_link}) self.list.append(data) return self.list
def quote_paths(url): """ This function will quote paths **only** in a given url :param url: string or unicode :return: joined url string """ try: url = py2_enc(url) if url.startswith('http'): parsed = urlparse(url) processed_path = '/'.join( [quote(i) for i in parsed.path.split('/')]) url = urlunparse(parsed._replace(path=processed_path)) return url else: path = '/'.join([quote(i) for i in url.split('/')]) return path except Exception: return url
def obtain_authorization(_cookie, _uh): data = { 'authorize': 'Allow', 'state': state, 'redirect_uri': redirect_uri, 'response_type': 'code', 'client_id': client_id, 'duration': 'permanent', 'scope': ' '.join(scope), 'uh': _uh } headers = client.request(api_link('authorize'), cookie=_cookie, post=data, redirect=False, output='headers') geturl = dict([ line.partition(': ')[::2] for line in str(headers).splitlines() ]).get('location') token = dict(parse_qsl(urlparse(geturl).query)).get('code') if not token: return get_tokens(code=token)
def more_kind(children_data): # title = '' if children_data['depth'] == 0 else '>' * children_data['depth'] + ' ' + control.lang(30117) title = control.lang(30144) name, id = (children_data['name'], children_data['id']) if len(name) < 10: name = children_data['parent_id'] if len(id) < 7: id = children_data['parent_id'][3:] parsed = urlparse(link) permalink = urlunparse(parsed._replace(path=parsed.path + id)) if children_data['children']: replies_urls = json.dumps([ urlunparse(parsed._replace(path=parsed.path + u)) for u in children_data['children'] ]) else: replies_urls = None image = control.addonInfo('icon') pairs = { 'title': title, 'name': name, 'id': id, 'image': image, 'kind': 'more', 'permalink': permalink, 'replies_urls': replies_urls } return pairs
def gk_debris(link): html = client.request(link) url = client.parseDOM(html, 'iframe', ret='src', attrs={"class": "metaframe rptss"})[0] url = dict(parse_qsl(urlparse(url).query)).get('source') return url
def retriever(source, destination, user_agent=None, referer=None, reporthook=None, data=None, allow_caching=True, **kwargs): if user_agent is None: if allow_caching: from tulip import cache user_agent = cache.get(randomagent, 12) else: user_agent = CHROME if referer is None: referer = '{0}://{1}/'.format( urlparse(source).scheme, urlparse(source).netloc) class Opener(URLopener): version = user_agent def __init__(self): URLopener.__init__(self) headers = [('User-Agent', self.version), ('Accept', '*/*'), ('Referer', referer)] if kwargs: headers.extend(iteritems(kwargs)) self.addheaders = headers Opener().retrieve(source, destination, reporthook, data)
def do_GET(self): parsed = urlparse(self.path) params = dict(parse_qsl(parsed.query)) self._set_headers() self.wfile.write( reddit_page(authorized='code' in params, token=params.get('code', ''))) if 'code' in params: control.setSetting('auth.token', params['code']) get_tokens(code=params['code']) elif 'error' in params: control.setSetting('get.toggle', 'false') tokens_reset()
def resolve(self, url): referer = url if '.m3u8' in url or '.mp4' in url or url.startswith('plugin'): return url html = client.request(url) if url == self.live_link_gr: url = client.parseDOM(html, 'div', attrs={'class': 'livePlayer'}, ret='data-liveurl')[0] elif url == self.live_link_cy: url = re.search(r'hls: [\'"](.+?)[\'"]', html).group(1) elif 'cloudskep' in html: url = client.parseDOM(html, 'a', {'class': 'player-play-inline hidden'}, ret='href')[0] signature = client.parseDOM(html, 'footer', {'class': 'footer'}, ret='player-signature') if signature: url = '?wmsAuthSign='.join([url, signature[0]]) else: if 'data-plugin-player' not in html: qs = parse_qs(urlparse(url).query) video_id = qs['vid'][0] year = qs['year'][0] show_id = qs['showId'][0] html = client.request(self.player_query.format(video_id=video_id, show_id=show_id, year=year)) try: object_ = client.parseDOM(html, 'div', attrs={'id': 'Video-1'}, ret='data-plugin-player')[0] except Exception: object_ = client.parseDOM(html, 'div', attrs={'id': 'currentvideourl'}, ret='data-plugin-player')[0] url = json.loads(client.replaceHTMLCodes(object_))['Url'] if len(url) == 11: return self.yt_session(url) return url + user_agents.spoofer(referer=True, ref_str=referer)
def search(self, url): try: query = parse_qs(urlparse(url).query)['q'][0] url = self.search_link % quote_plus(query) + self.key_link result = client.request(url) items = json.loads(result)['items'] items = [(i['id']['videoId']) for i in items] for url in items: url = self.resolve(url) if url is not None: return url except Exception: return
def activate_other_addon(url, query=None): if not url.startswith('plugin://'): url = ''.join(['plugin://', url, '/']) parsed = urlparse(url) if not control.condVisibility('System.HasAddon({0})'.format( parsed.netloc)): control.execute('InstallAddon({0})'.format(parsed.netloc)) params = dict(parse_qsl(parsed.query)) action = params.get('action') url = params.get('url') directory.run_builtin(addon_id=parsed.netloc, action=action, url=url, content_type=query)
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', username=None, password=None, verify=True, as_bytes=False): try: url = url.decode('utf-8') except Exception: pass if isinstance(post, dict): post = bytes(urlencode(post), encoding='utf-8') elif isinstance(post, basestring) and is_py3: post = bytes(post, encoding='utf-8') try: handlers = [] if username is not None and password is not None and not proxy: passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm() passmgr.add_password(None, uri=url, user=username, passwd=password) handlers += [urllib2.HTTPBasicAuthHandler(passmgr)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if proxy is not None: if username is not None and password is not None: passmgr = urllib2.ProxyBasicAuthHandler() passmgr.add_password(None, uri=url, user=username, passwd=password) handlers += [ urllib2.ProxyHandler({'http': '{0}'.format(proxy)}), urllib2.HTTPHandler, urllib2.ProxyBasicAuthHandler(passmgr) ] else: handlers += [ urllib2.ProxyHandler({'http': '{0}'.format(proxy)}), urllib2.HTTPHandler ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if output == 'cookie' or output == 'extended' or close is not True: cookies = cookielib.LWPCookieJar() handlers += [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) try: import platform is_XBOX = platform.uname()[1] == 'XboxOne' except Exception: is_XBOX = False if not verify and sys.version_info >= (2, 7, 12): try: import ssl ssl_context = ssl._create_unverified_context() handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) except Exception: pass elif verify and ((2, 7, 8) < sys.version_info < (2, 7, 12) or is_XBOX): try: import ssl try: import _ssl CERT_NONE = _ssl.CERT_NONE except Exception: CERT_NONE = ssl.CERT_NONE ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) except Exception: pass try: headers.update(headers) except Exception: headers = {} if 'User-Agent' in headers: pass elif mobile is not True: #headers['User-Agent'] = agent() headers['User-Agent'] = cache.get(randomagent, 12) else: headers['User-Agent'] = cache.get(random_mobile_agent, 12) if 'Referer' in headers: pass elif referer is None: headers['Referer'] = '%s://%s/' % (urlparse(url).scheme, urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'en-US' if 'Cookie' in headers: pass elif cookie is not None: headers['Cookie'] = cookie if redirect is False: class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, reqst, fp, code, msg, head): infourl = addinfourl(fp, head, reqst.get_full_url()) infourl.status = code infourl.code = code return infourl http_error_300 = http_error_302 http_error_301 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 opener = urllib2.build_opener(NoRedirectHandler()) urllib2.install_opener(opener) try: del headers['Referer'] except Exception: pass req = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(req, timeout=int(timeout)) except urllib2.HTTPError as response: if response.code == 503: if 'cf-browser-verification' in response.read(5242880): netloc = '{0}://{1}'.format( urlparse(url).scheme, urlparse(url).netloc) cf = cache.get(cfcookie, 168, netloc, headers['User-Agent'], timeout) headers['Cookie'] = cf req = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(req, timeout=int(timeout)) elif error is False: return elif error is False: return if output == 'cookie': try: result = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except Exception: pass try: result = cf except Exception: pass elif output == 'response': if limit == '0': result = (str(response.code), response.read(224 * 1024)) elif limit is not None: result = (str(response.code), response.read(int(limit) * 1024)) else: result = (str(response.code), response.read(5242880)) elif output == 'chunk': try: content = int(response.headers['Content-Length']) except Exception: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) elif output == 'extended': try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except Exception: pass try: cookie = cf except Exception: pass content = response.headers result = response.read(5242880) return result, headers, content, cookie elif output == 'geturl': result = response.geturl() elif output == 'headers': content = response.headers return content else: if limit == '0': result = response.read(224 * 1024) elif limit is not None: result = response.read(int(limit) * 1024) else: result = response.read(5242880) if close is True: response.close() if is_py3 and not as_bytes and isinstance(result, bytes): return result.decode('utf-8') else: return result except Exception as reason: log('Client module failed, reason of failure: ' + repr(reason)) return
def image_generator(children_data): print(children_data) image = control.addonInfo('icon') fanart = control.fanart() try: try: m_thumb = children_data.get('media').get('oembed').get( 'thumbnail_url') except AttributeError: m_thumb = None try: s_thumb = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_thumb = None try: p_thumb = children_data.get('preview').get('oembed').get( 'thumbnail_url') except AttributeError: p_thumb = None try: u_thumb = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: u_thumb = None images = [ children_data.get('community_icon'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('thumbnail'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('banner_img'), children_data.get('url') ] if m_thumb: images.insert(-2, m_thumb) if s_thumb: images.insert(-2, s_thumb) if p_thumb: images.insert(-2, p_thumb) if u_thumb: images.insert(-2, u_thumb) for i in images: if i in ['default', 'spoiler', 'image', 'self'] or not i: continue elif '.jpg' in i or '.png' in i: image = i break if '?' in image: image = image.partition('?')[0] except (KeyError, IndexError, TypeError): pass if 'embed.ly' in image: image = dict(parse_qsl(urlparse(image).query))['url'] try: try: p_fanart = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: p_fanart = None try: s_fanart = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_fanart = None fanarts = [children_data.get('banner_background_image')] if p_fanart: fanarts.insert(0, p_fanart) if s_fanart: fanarts.insert(-1, s_fanart) for f in fanarts: if not f: continue elif f: fanart = f break if '?' in fanart: fanart = fanart.partition('?')[0] except (KeyError, IndexError): pass return image, fanart
def items_list(self, link): if not link.startswith('http'): link = base_link() + link link = client.quote_paths(link) link = link.replace('old.', 'oauth.' if access_boolean() else 'www.') link = link.replace('www.', 'oauth.' if access_boolean() else 'www.') #### Start of nested helper functions #### # Pulls images and thumbnails def image_generator(children_data): print(children_data) image = control.addonInfo('icon') fanart = control.fanart() try: try: m_thumb = children_data.get('media').get('oembed').get( 'thumbnail_url') except AttributeError: m_thumb = None try: s_thumb = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_thumb = None try: p_thumb = children_data.get('preview').get('oembed').get( 'thumbnail_url') except AttributeError: p_thumb = None try: u_thumb = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: u_thumb = None images = [ children_data.get('community_icon'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('thumbnail'), children_data.get('icon_img'), children_data.get('header_img'), children_data.get('banner_img'), children_data.get('url') ] if m_thumb: images.insert(-2, m_thumb) if s_thumb: images.insert(-2, s_thumb) if p_thumb: images.insert(-2, p_thumb) if u_thumb: images.insert(-2, u_thumb) for i in images: if i in ['default', 'spoiler', 'image', 'self'] or not i: continue elif '.jpg' in i or '.png' in i: image = i break if '?' in image: image = image.partition('?')[0] except (KeyError, IndexError, TypeError): pass if 'embed.ly' in image: image = dict(parse_qsl(urlparse(image).query))['url'] try: try: p_fanart = children_data.get('preview').get( 'images')[0].get('source').get('url') except AttributeError: p_fanart = None try: s_fanart = children_data.get('secure_media').get( 'oembed').get('thumbnail_url') except AttributeError: s_fanart = None fanarts = [children_data.get('banner_background_image')] if p_fanart: fanarts.insert(0, p_fanart) if s_fanart: fanarts.insert(-1, s_fanart) for f in fanarts: if not f: continue elif f: fanart = f break if '?' in fanart: fanart = fanart.partition('?')[0] except (KeyError, IndexError): pass return image, fanart # Comment def t1_kind(children_data, next_url): author = children_data['author'] body = legacy_replace(children_data['body']) short = legacy_replace(body[:50] + '...') image = control.addonInfo('icon') subreddit = children_data['subreddit'] subreddit_id = children_data['subreddit_id'] name = children_data['name'] if children_data['replies']: reply_json = children_data['replies'] replies_children = reply_json['data']['children'] replies = len(replies_children) try: comprehension = [ base_link() + client.quote_paths(r['data']['permalink']) for r in replies_children ] replies_urls = json.dumps(comprehension) except KeyError: replies_urls = None else: replies_urls = None replies = 0 replies_num = ' | ' + control.lang(30102) + str( replies) if replies > 0 else '' title = short.replace( '\n', '') + self.formatting + '[I]' + author + '[/I]' + replies_num url = permalink = base_link() + children_data['permalink'] link_id = children_data['link_id'] pairs = { 'title': title, 'url': url, 'permalink': permalink, 'image': image, 'subreddit': subreddit, 'kind': 't1', 'subreddit_url': base_link() + '/r/' + subreddit, 'next': next_url, 'subreddit_id': subreddit_id, 'name': name, 'body': body, 'plot': body, 'query': replies_urls, 'replies_urls': replies_urls, 'link_id': link_id } return pairs # Link/Thread def t3_kind(children_data, next_url): title = client.replaceHTMLCodes(children_data['title']) name = children_data['name'] author = children_data['author'] domain = children_data['domain'] num_comments = str(children_data['num_comments']) try: if domain.startswith('self.'): selftext = legacy_replace(children_data['selftext']) if selftext == '': selftext = title else: selftext = None except KeyError: selftext = None subreddit = children_data['subreddit'] subreddit_id = children_data['subreddit_id'] url = children_data['url'] permalink = base_link() + children_data['permalink'] image, fanart = image_generator(children_data) if access_boolean() and 'reddit' in url and not 'video' in url: url = url.replace('www.reddit', 'oauth.reddit') label = title + ' | ' + subreddit + ' | ' + '[B]' + author + '[/B]' + self.formatting + '[I]' + domain + '[/I]' + ' | ' + '[B]' + control.lang( 30103) + num_comments + '[/B]' pairs = { 'label': label, 'title': title, 'url': url, 'image': image, 'fanart': fanart, 'next': next_url, 'subreddit_id': subreddit_id, 'subreddit': subreddit, 'subreddit_url': base_link() + '/r/' + subreddit, 'kind': 't3', 'permalink': permalink, 'domain': domain, 'name': name, 'selftext': selftext, 'author': author, 'plot': selftext, 'query': client.quote_paths(permalink) } return pairs # Subreddit def t5_kind(children_data, next_url): display_name = client.replaceHTMLCodes( children_data['display_name']) title = client.replaceHTMLCodes(children_data['title']) public_description = legacy_replace( children_data['public_description']) description = legacy_replace(children_data['description']) plot = json.dumps({ 'title': title, 'public_description': public_description, 'description': description }) subscribers = str(children_data['subscribers']) url = base_link() + children_data['url'] name = children_data['name'] image, fanart = image_generator(children_data) pairs = { 'title': title + ' | ' + subscribers + self.formatting + '[I]' + display_name + '[/I]', 'url': url, 'image': image, 'next': next_url, 'fanart': fanart, 'display_name': display_name, 'name': name, 'kind': 't5', 'plot': plot } return pairs # Multi def lm_kind(children_data): display_name = children_data['display_name'] name = children_data['name'] # description = html_processor(children_data['description_html']) try: image = children_data['icon_url'] if not image: raise KeyError except KeyError: image = control.addonInfo('icon') path = base_link() + children_data['path'] subreddits = json.dumps(children_data['subreddits']) pairs = { 'title': display_name, 'url': path, 'image': image, 'subreddits': subreddits, 'kind': 'LabeledMulti', 'name': name } return pairs def more_kind(children_data): # title = '' if children_data['depth'] == 0 else '>' * children_data['depth'] + ' ' + control.lang(30117) title = control.lang(30144) name, id = (children_data['name'], children_data['id']) if len(name) < 10: name = children_data['parent_id'] if len(id) < 7: id = children_data['parent_id'][3:] parsed = urlparse(link) permalink = urlunparse(parsed._replace(path=parsed.path + id)) if children_data['children']: replies_urls = json.dumps([ urlunparse(parsed._replace(path=parsed.path + u)) for u in children_data['children'] ]) else: replies_urls = None image = control.addonInfo('icon') pairs = { 'title': title, 'name': name, 'id': id, 'image': image, 'kind': 'more', 'permalink': permalink, 'replies_urls': replies_urls } return pairs def next_appender(json_data): try: next_id = json_data['after'] if not next_id: raise KeyError elif '&after=' in parsed.query: _next_url = urlunparse( parsed._replace( query=re.sub(r'&after=\w{8,9}', r'&after=' + next_id, parsed.query))) else: _next_url = urlunparse( parsed._replace(query=parsed.query + '&after=' + next_id)) except KeyError: _next_url = '' return _next_url def processor(_json): if isinstance(_json, list): for j in _json: data = j['data'] kind = j['kind'] if kind == 'LabeledMulti': pairs = lm_kind(data) self.data.append(pairs) else: children = data['children'] nu = next_appender(data) for c in children: kind = c['kind'] data = c['data'] if kind == 't3': pairs = t3_kind(data, nu) elif kind == 't1': pairs = t1_kind(data, nu) elif kind == 'more': pairs = more_kind(data) else: pairs = None self.data.append(pairs) return self.data else: data = _json['data'] children = data['children'] nu = next_appender(data) for d in children: item_data = d['data'] kind = d['kind'] # Link: if kind == 't3': pairs = t3_kind(item_data, nu) # Subreddit: elif kind == 't5': pairs = t5_kind(item_data, nu) # Comment: elif kind == 't1': pairs = t1_kind(item_data, nu) elif kind == 'more': pairs = more_kind(data) else: pairs = {'title': 'Null', 'action': None} self.data.append(pairs) return self.data #### End of nested helper functions #### parsed = urlparse(link) query = dict(parse_qsl(parsed.query)) path = parsed.path if 'limit' not in query: query.update({'limit': control.setting('items.limit')}) query = urlencode(query) if not access_boolean() and not path.endswith('.json'): path += dotjson link = urlunparse(parsed._replace(path=path, query=query)) json_object = client.request(link, headers=request_headers()) loaded = json.loads(json_object) self.list = processor(loaded) return self.list
def root(url): root_list = [] groups_list = [] html = client.request(url) if url == SPORTS: sports_index = client.parseDOM(html, 'div', attrs={'class': 'col-xs-6 text-center'})[0] return sports_index elif url == MUSIC: music_index = client.parseDOM(html, 'div', attrs={'class': 'col-sm-5 col-md-4'})[0] return music_index else: result = client.parseDOM(html, 'div', attrs={ 'class': 'row', 'style': 'margin-bottom: 20px;' })[0] items = re.findall('(<option ?value=.*?</option>)', result, re.U) groups = client.parseDOM(result, 'option', attrs={'selected value': '.+?'}) for group in groups: if group == u'ΑΡΧΙΚΑ': group = group.replace(u'ΑΡΧΙΚΑ', '30213') elif group == u'ΕΤΟΣ': group = group.replace(u'ΕΤΟΣ', '30090') elif group == u'ΚΑΝΑΛΙ': group = group.replace(u'ΚΑΝΑΛΙ', '30211') elif group == u'ΕΙΔΟΣ': group = group.replace(u'ΕΙΔΟΣ', '30200') elif group == u'ΠΑΡΑΓΩΓΗ': group = group.replace(u'ΠΑΡΑΓΩΓΗ', '30212') groups_list.append(group) for item in items: name = client.parseDOM(item, 'option', attrs={'value': '.+?.php.+?'})[0] name = name.replace(u'σήμερα', control.lang(30268)) title = name[0].capitalize() + name[1:] link = client.parseDOM(item, 'option', ret='value')[0] indexer = urlparse(link).query index = urljoin(GM_BASE, link) if indexer.startswith('l='): group = '30213' elif indexer.startswith('y='): group = '30090' elif indexer.startswith('c='): group = '30211' elif indexer.startswith('g='): group = '30200' elif indexer.startswith('p='): group = '30212' else: group = '' root_list.append({'title': title, 'group': group, 'url': index}) return root_list, groups_list
def items_list(self, url, post=None): indexer = urlparse(url).query ################################################################################################ # # if 'movies.php' in url: # length = 9 # elif all(['shortfilm.php' in url, 'theater.php' in url]): # length = 6 # # else: # length = 2 # # # ################################################################################################ for year in list(range(1, length)): if indexer.startswith('l='): p = 'y=' + str(year) + '&g=&p=' elif indexer.startswith('g='): p = 'y=' + str(year) + '&l=&p=' elif indexer.startswith('p='): p = 'y=' + str(year) + '&l=&g=' elif indexer.startswith('c='): p = 'y=' + str(year) + '&l=&g=' else: p = '' self.years.append(p) if indexer.startswith( ('l=', 'g=', 's=', 'p=', 'c=') ) and 'movies.php' in url or 'shortfilm.php' in url or 'theater.php' in url: for content in self.years: links = GM_BASE + url.rpartition('/')[2].partition( '&')[0] + '&' + content try: htmls = client.request(links).decode('utf-8') except AttributeError: htmls = client.request(links) self.data.append(htmls) result = u''.join(self.data) content = client.parseDOM( result, 'div', attrs={'class': 'col-xs-6 col-sm-4 col-md-3'}) else: html = client.request(url, post=post) content = client.parseDOM( html, 'div', attrs={'class': 'col-xs-6 col-sm-4 col-md-3'}) contents = ''.join(content) items = re.findall('(<a.*?href.*?div.*?</a>)', contents, re.U) for item in items: title = client.parseDOM(item, 'h4')[0] image = client.parseDOM(item, 'img', ret='src')[0] name = title.rpartition(' (')[0] image = urljoin(GM_BASE, image) link = client.parseDOM(item, 'a', ret='href')[0] link = urljoin(GM_BASE, link) year = re.findall(r'.*?\((\d{4})', title, re.U)[0] self.list.append({ 'title': title, 'url': link, 'image': image, 'year': int(year), 'name': name }) return self.list
def _parsed_url(url): parsed_url = urlparse(url) prefix = parsed_url.scheme + '://' + parsed_url.netloc base_path = posixpath.normpath(parsed_url.path + '/..') return urljoin(prefix, base_path)
def conditionals(url): add_plugin_dirs(control.transPath(PLUGINS_PATH)) def yt(uri): if uri.startswith('plugin://'): return uri if len(uri) == 11: uri = YT_URL + uri try: return youtube.wrapper(uri) except YouTubeException as exp: log_debug('Youtube resolver failure, reason: ' + repr(exp)) return if 'youtu' in url or len(url) == 11: log_debug('Resolving with youtube addon...') return yt(url) elif HOSTS(url) and HostedMediaFile(url).valid_url(): try: stream = resolve_url(url) log_debug('Resolving with Resolveurl...') except HTTPError: return url return stream elif HostedMediaFile(url).valid_url(): if control.setting('show_alt_vod') == 'true': try: stream = resolve_url(url) log_debug('Resolving with Resolveurl...') except ResolverError: return except HTTPError: return url return stream else: control.okDialog('AliveGR', control.lang(30354)) return 'https://static.adman.gr/inpage/blank.mp4' elif GM_BASE in url: sources = gm_source_maker(url) stream = mini_picker(sources['links']) return conditionals(stream) elif urlparse(GK_BASE).netloc in url: streams = gk_source_maker(url) stream = mini_picker(streams['links']) if control.setting('check_streams') == 'true': return stream else: return conditionals(stream) else: log_debug('Passing direct link...') return url
def my_account(self): choices = [ control.lang(30091), control.lang(30092), control.lang(30093) ] username = control.setting('username.string') icon_img = control.setting('avatar.url') if not username or not icon_img: ai = account_info() username, icon_img = (ai['name'], ai['icon_img']) parsed = urlparse(icon_img) query = 'fit=crop&crop=faces%2Centropy&arh=1.0&w=256&h=256&s=' icon_img = urlunparse( parsed._replace(query=query + icon_img.rpartition('s=')[2])) control.setSetting('username.string', username) control.setSetting('avatar.url', icon_img) if control.setting('window.action') == '0': choice = control.selectDialog(choices) if choice == 0: self.listing(reddit_url('/user/{0}/saved/'.format(username))) elif choice == 1: self.bookmarks(table='reddit') elif choice == 2: self.listing(reddit_url('/api/multi/mine/')) else: control.execute('Dialog.Close(all)') return else: self.list = [{ 'title': choices[0], 'action': 'listing', 'url': reddit_url('/user/{0}/saved/'.format(username)), 'image': icon_img }, { 'title': choices[1], 'action': 'subscribed_subreddits', 'image': icon_img }, { 'title': choices[2], 'action': 'listing', 'url': reddit_url('/api/multi/mine/'), 'image': icon_img }] if control.setting('user.icon') == 'false': for i in self.list: del i['image'] directory.add(self.list)
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', username=None, password=None, verify=True, as_bytes=False, allow_caching=True): try: url = url.decode('utf-8') except Exception: pass if isinstance(post, dict): post = bytes(urlencode(post), encoding='utf-8') elif isinstance(post, str) and is_py3: post = bytes(post, encoding='utf-8') try: handlers = [] if username is not None and password is not None and not proxy: passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm() passmgr.add_password(None, uri=url, user=username, passwd=password) handlers += [urllib2.HTTPBasicAuthHandler(passmgr)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if proxy is not None: if username is not None and password is not None: if is_py3: passmgr = urllib2.HTTPPasswordMgr() passmgr.add_password(None, uri=url, user=username, passwd=password) else: passmgr = urllib2.ProxyBasicAuthHandler() passmgr.add_password(None, uri=url, user=username, passwd=password) handlers += [ urllib2.ProxyHandler({'http': '{0}'.format(proxy)}), urllib2.HTTPHandler, urllib2.ProxyBasicAuthHandler(passmgr) ] else: handlers += [ urllib2.ProxyHandler({'http': '{0}'.format(proxy)}), urllib2.HTTPHandler ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if output == 'cookie' or output == 'extended' or close is not True: cookies = cookielib.LWPCookieJar() handlers += [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if not verify or ((2, 7, 8) < sys.version_info < (2, 7, 12)): try: ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE handlers += [urllib2.HTTPSHandler(context=ssl_context)] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) except Exception: pass try: headers.update(headers) except Exception: headers = {} if 'User-Agent' in headers: pass elif mobile is not True: if allow_caching: from tulip import cache headers['User-Agent'] = cache.get(randomagent, 12) else: headers['User-Agent'] = CHROME else: if allow_caching: from tulip import cache headers['User-Agent'] = cache.get(random_mobile_agent, 12) else: headers['User-Agent'] = ANDROID if 'Referer' in headers: pass elif referer is None: headers['Referer'] = '%s://%s/' % (urlparse(url).scheme, urlparse(url).netloc) else: headers['Referer'] = referer if not 'Accept-Language' in headers: headers['Accept-Language'] = 'en-US' if 'Cookie' in headers: pass elif cookie is not None: headers['Cookie'] = cookie if redirect is False: class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, reqst, fp, code, msg, head): infourl = addinfourl(fp, head, reqst.get_full_url()) infourl.status = code infourl.code = code return infourl http_error_300 = http_error_302 http_error_301 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 opener = urllib2.build_opener(NoRedirectHandler()) urllib2.install_opener(opener) try: del headers['Referer'] except Exception: pass req = urllib2.Request(url, data=post, headers=headers) try: response = urllib2.urlopen(req, timeout=int(timeout)) except HTTPError as response: if response.code == 503: if 'cf-browser-verification' in response.read(5242880): if log_debug: log_debug( 'This request cannot be handled due to human verification gate' ) else: print( 'This request cannot be handled due to human verification gate' ) return elif error is False: return elif error is False: return if output == 'cookie': try: result = '; '.join( ['{0}={1}'.format(i.name, i.value) for i in cookies]) except Exception: pass elif output == 'response': if limit == '0': result = (str(response.code), response.read(224 * 1024)) elif limit is not None: result = (str(response.code), response.read(int(limit) * 1024)) else: result = (str(response.code), response.read(5242880)) elif output == 'chunk': try: content = int(response.headers['Content-Length']) except Exception: content = (2049 * 1024) if content < (2048 * 1024): return result = response.read(16 * 1024) elif output == 'extended': try: cookie = '; '.join( ['%s=%s' % (i.name, i.value) for i in cookies]) except Exception: pass content = response.headers result = response.read(5242880) if not as_bytes: result = py3_dec(result) return result, headers, content, cookie elif output == 'geturl': result = response.geturl() elif output == 'headers': content = response.headers if close: response.close() return content elif output == 'file_size': try: content = int(response.headers['Content-Length']) except Exception: content = '0' response.close() return content elif output == 'json': content = json.loads(response.read(5242880)) response.close() return content else: if limit == '0': result = response.read(224 * 1024) elif limit is not None: if isinstance(limit, int): result = response.read(limit * 1024) else: result = response.read(int(limit) * 1024) else: result = response.read(5242880) if close is True: response.close() if not as_bytes: result = py3_dec(result) return result except Exception as reason: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) if log_debug: log_debug('Request failed, reason: ' + repr(reason) + ' on url: ' + url) else: print('Request failed, reason: ' + repr(reason) + ' on url: ' + url) return
def cfcookie(netloc, ua, timeout): try: headers = {'User-Agent': ua} req = urllib2.Request(netloc, headers=headers) try: urllib2.urlopen(req, timeout=int(timeout)) except urllib2.HTTPError as response: result = response.read(5242880) jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0] init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};', result)[-1] builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0] decryptVal = parseJSString(init) lines = builder.split(';') for line in lines: if len(line) > 0 and '=' in line: sections = line.split('=') line_val = parseJSString(sections[1]) decryptVal = int( eval( str(decryptVal) + str(sections[0][-1]) + str(line_val))) answer = decryptVal + len(urlparse(netloc).netloc) query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % ( netloc, jschl, answer) if 'type="hidden" name="pass"' in result: passval = re.findall('name="pass" value="(.*?)"', result)[0] query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % ( netloc, quote_plus(passval), jschl, answer) time.sleep(5) cookies = cookielib.LWPCookieJar() handlers = [ urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies) ] opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) try: req = urllib2.Request(query, headers=headers) urllib2.urlopen(req, timeout=int(timeout)) except Exception: pass cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies]) return cookie except Exception: pass
def gm_source_maker(url): if 'episode' in url: html = client.request(url=url.partition('?')[0], post=url.partition('?')[2]) else: html = client.request(url) html = py2_uni(html) if 'episode' in url: episodes = re.findall(r'''(?:<a.+?/a>|<p.+?/p>)''', html) hl = [] links = [] for episode in episodes: if '<p style="margin-top:0px; margin-bottom:4px;">' in episode: host = client.parseDOM(episode, 'p')[0].split('<')[0] pts = client.parseDOM(episode, 'a') lks = client.parseDOM(episode, 'a', ret='href') for p in pts: hl.append(u''.join([host, control.lang(30225), p])) for link_ in lks: links.append(link_) else: pts = client.parseDOM(episode, 'a') lks = client.parseDOM(episode, 'a', ret='href') for p in pts: hl.append(p) for link_ in lks: links.append(link_) links = [urljoin(GM_BASE, link) for link in links] hosts = [host.replace(u'προβολή στο ', control.lang(30015)) for host in hl] links_list = list(zip(hosts, links)) data = {'links': links_list} if '<p class="text-muted text-justify">' in html: plot = client.parseDOM(html, 'p')[0] data.update({'plot': plot}) return data elif 'view' in url: link = client.parseDOM(html, 'a', ret='href', attrs={"class": "btn btn-primary"})[0] host = urlparse(link).netloc.replace('www.', '').capitalize() return {'links': [(''.join([control.lang(30015), host]), link)]} elif 'music' in url: title = re.search(r'''search\(['"](.+?)['"]\)''', html).group(1) link = list_search(query=title, limit=1)[0]['url'] return {'links': [(''.join([control.lang(30015), 'Youtube']), link)]} else: try: info = client.parseDOM(html, 'h4', attrs={'style': 'text-indent:10px;'}) if ',' in info[1]: genre = info[1].lstrip(u'Είδος:').split(',') genre = random.choice(genre) genre = genre.strip() else: genre = info[1].lstrip(u'Είδος:').strip() except: genre = control.lang(30147) div_tags = parsers.itertags(html, 'div') buttons = [i.text for i in list(div_tags) if 'margin: 0px 0px 10px 10px;' in i.attributes.get('style', '')] links = [] hl = [] for button in buttons: if 'btn btn-primary dropdown-toggle' in button: host = client.stripTags(client.parseDOM(button, 'button')[0]).strip() parts = client.parseDOM(button, 'li') for part in parts: part_ = client.parseDOM(part, 'a')[0] link = client.parseDOM(part, 'a', ret='href')[0] hl.append(', '.join([host, part_])) links.append(link) else: host = client.parseDOM(button, 'a')[0] link = client.parseDOM(button, 'a', ret='href')[0] hl.append(host) links.append(link) links = [urljoin(GM_BASE, link) for link in links] hosts = [host.replace( u'προβολή στο ', control.lang(30015) ).replace( u'προβολή σε ', control.lang(30015) ).replace( u'μέρος ', control.lang(30225) ) for host in hl] links_list = list(zip(hosts, links)) data = {'links': links_list, 'genre': genre} if 'text-align: justify' in html: plot = client.parseDOM(html, 'p', attrs={'style': 'text-align: justify'})[0] elif 'text-justify' in html: plot = client.parseDOM(html, 'p', attrs={'class': 'text-justify'})[0] else: plot = control.lang(30085) data.update({'plot': plot}) imdb_code = re.search(r'imdb.+?/title/([\w]+?)/', html) if imdb_code: code = imdb_code.group(1) data.update({'code': code}) return data
def episodes_list_cy(self, url, title, image): if title: try: title = title.decode('utf-8') title = title.partition('|')[0] except Exception: title = title.partition('|')[0] if url.startswith(self.views_ajax): html = client.request(url.partition('#')[0], post=url.partition('#')[2]) _json = json.loads(html) html = _json[4]['data'] view_path = dict(parse_qsl(url.partition('#')[2]))['view_path'] view_args = dict(parse_qsl(url.partition('#')[2]))['view_args'] page = str(int(dict(parse_qsl(url.partition('#')[2]))['page']) + 1) else: html = client.request(url) view_path = urlparse(url).path view_args = '/'.join(view_path.split('/')[2:4]) page = '1' next_link = '#'.join( [self.views_ajax, self.ajax_post_episodes.format(view_args=view_args, view_path=view_path, page=page)] ) try: items = [i for i in client.parseDOM(html, 'div', {'class': 'box'}) if 'play-big' in i] if not items: raise Exception for item in items: itemtitle = client.parseDOM(item, 'a')[-1] if title: label = ' - '.join([title, itemtitle]) else: label = itemtitle url = client.parseDOM(item, 'a', ret='href')[0] url = urljoin(self.basecy_link, url) image = client.parseDOM(item, 'img', ret='src')[0] data = {'title': label, 'image': image, 'url': url, 'next': next_link} if title: data.update({'name': title}) self.list.append(data) except Exception: self.list = [ { 'title': u' - '.join([title, control.lang(30014)]), 'action': 'back', 'image': image, 'isFolder': 'False', 'isPlayable': 'False' } , { 'title': control.lang(30013), 'action': 'back', 'image': control.icon(), 'isFolder': 'False', 'isPlayable': 'False' } ] return self.list
def download(self, path, url): if url.startswith('http'): log_debug( 'Vipsubs.gr: Attempting downloading from this url: {0}'.format( url)) _filename = unquote('.'.join(urlparse(url).path.split('/')[3:5])) filename = control.join(path, _filename) else: filename = control.join(path, url) try: if url.startswith('http'): if 'dropbox' in url: url = client.request(url, output='geturl', timeout=control.setting('timeout')) req = Request(url) req.add_header('User-Agent', randomagent()) opener = urlopen(req) data = opener.read() zip_file = zipfile.ZipFile(BytesIO(data)) opener.close() if control.setting('keep_zips') == 'true': if control.setting('output_folder').startswith( 'special://'): output_path = control.transPath( control.setting('output_folder')) else: output_path = control.setting('output_folder') if not os.path.exists(output_path): control.makeFile(output_path) # noinspection PyUnboundLocalVariable output_filename = control.join(output_path, _filename) with open(output_filename, 'wb') as f: f.write(data) if control.setting('extract') == 'true': zip_file = zipfile.ZipFile(output_filename) output_path = control.join( output_path, os.path.splitext(_filename)[0]) if not os.path.exists(output_path): control.makeFile(output_path) zip_file.extractall(output_path) control.infoDialog(control.lang(30007)) else: if zipfile.is_zipfile(filename): zip_file = zipfile.ZipFile(filename) else: log_debug( 'Failed to load zip with regular python library, attempting built-in method' ) control.execute('Extract({0},{1})'.format(filename, path)) zip_file = None if zip_file: files = zip_file.namelist() subs = [ i for i in files if i.endswith(('.srt', '.sub', '.zip')) ] else: subs = [] for root, _, file_ in os.walk(path): for f in file_: subs.append(os.path.join(root, f)) subtitle = multichoice(subs) if not subtitle: return if zip_file: try: zip_file.extract(subtitle, path) except Exception: path = path.encode('utf-8') zip_file.extract(subtitle, path) subtitle = control.join(path, subtitle) if subtitle.endswith('.zip'): return self.download(path, subtitle) else: try: with closing(control.openFile(subtitle)) as fn: try: output = bytes(fn.readBytes()) except Exception: output = bytes(fn.read()) content = output.decode('utf-16') with closing(control.openFile(subtitle, 'w')) as subFile: subFile.write(bytearray(content.encode('utf-8'))) except Exception: pass return subtitle except Exception as e: _, __, tb = sys.exc_info() print(traceback.print_tb(tb)) log_debug( 'Vipsubs.gr subtitle download failed for the following reason: ' + str(e)) return