def apiSearchSeason(season): r = client.request("%s%s/search/%s" % (api_url, "/premium" if ispremium else "", param), cookie="jwt=%s" % jwtToken if jwtToken else None) data = json.loads(r) ribbons = [] index = 0 plot = '' thumb = '' if data["contentType"] == "channel": ribbons = data["ribbonIds"] else: if "seasonNumbers" in data and len(data["seasonNumbers"])>0: for page in data["pages"]: if page["seasonNr"] == season: break index+=1 for tab in data["pages"][index]["tabs"]: if tab["tabType"] == "RIBBON": ribbons += tab["ribbonIds"] if tab["tabType"] == 'SHOW_INFO': if plot == '' and "description" in tab["showData"]: plot = tab["showData"]["description"].encode('utf-8') if thumb == '' and "imageUrl" in tab["showData"]: thumb = "%s/%s" % (base_url, tab["showData"]["imageUrl"]) if "https://" not in tab["showData"]["imageUrl"] else tab["showData"]["imageUrl"] for ribbon in ribbons: r = client.request("%s/ribbons/%s" % (api_url, ribbon), cookie="jwt=%s" % jwtToken if jwtToken else None) if r: data = json.loads(r) addDirectoryItem(data["title"].encode("utf-8"), "apiribbons¶m=%s&page=0" % data["id"], thumb, "DefaultFolder.png", meta={'title': data["title"].encode("utf-8"), 'plot': plot}) endDirectory(type="tvshows")
def resolve(url): try: if check(url) == False: return id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0] url = 'https://api.openload.io/1/file/dlticket?file=%s' % id result = client.request(url) result = json.loads(result) cap = result['result']['captcha_url'] if not cap == None: cap = captcha.keyboard(cap) time.sleep(result['result']['wait_time']) url = 'https://api.openload.io/1/file/dl?file=%s&ticket=%s' % ( id, result['result']['ticket']) if not cap == None: url += '&captcha_response=%s' % urllib.quote(cap) result = client.request(url) result = json.loads(result) url = result['result']['url'] + '?mime=true' return url except: return
def resolve(url): try: if check(url) == False: return id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0] url = 'https://api.openload.io/1/file/dlticket?file=%s' % id result = client.request(url) result = json.loads(result) cap = result['result']['captcha_url'] if not cap == None: cap = captcha.keyboard(cap) time.sleep(result['result']['wait_time']) url = 'https://api.openload.io/1/file/dl?file=%s&ticket=%s' % (id, result['result']['ticket']) if not cap == None: url += '&captcha_response=%s' % urllib.quote(cap) result = client.request(url) result = json.loads(result) url = result['result']['url'] + '?mime=true' return url except: return
def solvemedia(data): try: url = client.parseDOM(data, 'iframe', ret='src') url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, 'iframe', ret='src') response += client.parseDOM(result, 'img', ret='src') response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, 'form', attrs = {'action': 'verify.noscript'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)) return {'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'} except: pass
def get_Tv(): m = client.request('https://player.mediaklikk.hu/playernew/player.php?video=' + url) m = m.replace('\\', '') direct_url = re.search('"file"\s*:\s*"([^"]+)', m).group(1) chunk_list = client.request('http:%s' % direct_url) chunk_list = chunk_list.replace('\n', '') chunk_list = re.compile('BANDWIDTH=([0-9]+)(.+?m3u8)').findall(chunk_list) if len(chunk_list) == 0: direct_url = direct_url[0] else: chunk_list = [(int(i[0]), i[1]) for i in chunk_list] chunk_list = sorted(chunk_list, reverse=True) q_list = [str(i[0]) for i in chunk_list] q_list = [q.replace('3000000', '720p').replace('1600000', '480p').replace('1200000', '360p').replace('800000', '290p').replace('400000', '180p') for q in q_list] auto_pick = control.setting('autopick') == '1' if auto_pick == True: stream = chunk_list[0][1] else: q = xbmcgui.Dialog().select(u'Min\u0151s\u00E9g', q_list) if q == -1: return else: stream = chunk_list[q][1] direct_url = direct_url.split('playlist')[0] + stream play_url('http:%s' % direct_url, iconimage, name)
def get_tvshows(self, page=1): tvshows = [] re_sub_box = "<a href=\"/sub.asp\?sub_id=(?P<id>\d+)\"" re_series = "<div class=\"sub_tbox\".*?{0}.*?</div>".format(re_sub_box) re_imdb = "<a href=\"http\://www\.imdb\.com/title/tt(?P<imdbid>\d+)/" re_sub_rank = "<div class=\"sub_rank_div\".*?{0}.*?</div>".format(re_imdb) try: page_content = str(client.request(self.torec_link % page)) for item in re.finditer(re_series, page_content, re.DOTALL): import xbmc import sys if xbmc.abortRequested: return sys.exit() series_link = self.torec_series_link % str(item.group('id').strip()) series_content = str(client.request(series_link)) for series_item in re.finditer(re_sub_rank, series_content, re.DOTALL): import xbmc import sys if xbmc.abortRequested: return sys.exit() imdbid = "tt" + series_item.group('imdbid').strip() tvshow = {} try: import hashlib tvshowhash = "torec::" + hashlib.sha224("%s" % (imdbid)).hexdigest() fromcache = cache.get(tvshowhash) if fromcache is not None: tvshow = eval(fromcache[1].encode('utf-8')) tvshow['cached'] = "true" else: from libra.imdb import imdb imdb_data = imdb(imdbid=imdbid) tvshow['title'] = imdb_data.get_title() tvshow['year'] = imdb_data.get_year() tvshow['imdbid'] = imdb_data.get_imdbid() tvshow['runtime'] = imdb_data.get_runtime() tvshow['rating'] = imdb_data.get_rating() tvshow['genres'] = imdb_data.get_genres() tvshow['name'] = "%s (%s)" % (tvshow['title'], tvshow['year']) from libra.tvdb import tvdb tvdb_data = tvdb(imdbid=imdbid) tvshow['tvdb'] = tvdb_data.get_seriesid() cache.set(tvshowhash, tvshow) except: continue tvshows.append(tvshow) except Exception, e: import xbmc import traceback map(xbmc.log, traceback.format_exc().split("\n")) raise
def apiRibbons(): r = client.request("%s/ribbons/%s/%s" % (api_url, param, page)) data = json.loads(r) dirType = 'videos' for card in data["cards"]: thumb = "%s/%s" % ( base_url, card["imageUrl"] ) if "https://" not in card["imageUrl"] else card["imageUrl"] title = card["title"].encode('utf-8') if "contentLength" in card: plot = "" if control.setting('fillLead') == 'true': try: r = client.request("%s/search/%s" % (api_url, card["slug"])) episode = json.loads(r) plot = episode["lead"] if "lead" in episode else "" if plot.startswith("<p>"): plot = plot[3:] if plot.endswith("</p>"): plot = plot[:-4] except: pass if 'EPISODE' in card['cardType']: dirType = 'episodes' if 'MOVIE' in card['cardType']: dirType = 'movies' addDirectoryItem(title, "playvideo¶m=%s" % card["slug"], thumb, "DefaultFolder.png", meta={ 'title': title, 'duration': int(card["contentLength"]), 'plot': plot }, isFolder=False) else: if card["cardType"] != "ARTICLE": addDirectoryItem( title, "apisearch¶m=%s" % quote_plus(card["slug"]), thumb, "DefaultFolder.png", meta={ 'title': title, 'plot': card["lead"].encode('utf-8') if "lead" in card else '' }) r = client.request("%s/ribbons/%s/%d" % (api_url, param, int(page) + 1)) if r != None: addDirectoryItem( u'[I]K\u00F6vetkez\u0151 oldal >>[/I]', 'apiribbons¶m=%s&page=%d' % (param, int(page) + 1), '', 'DefaultFolder.png') endDirectory(type=dirType)
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'): try: #control.log('[cloudflare] request %s' % url) try: headers.update(headers) except: headers = {} agent = cache.get(cloudflareAgent, 168) if not 'User-Agent' in headers: headers['User-Agent'] = agent u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True) if result[0] == '503': agent = cache.get(cloudflareAgent, 0) headers['User-Agent'] = agent cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout) result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout) else: result = result[1] #control.log('[cloudflare] result %s' % result) return result except: return
def getVideo(): r = client.request(url) token = re.search('[\'"]token[\'"]\s*:\s*[\'"]([^\'"]+)', r).group(1) m = client.request('http://player.mediaklikk.hu/playernew/player.php?video=' + token) link = re.search('"file"\s*:\s*"([^"]+)', m).group(1) link = link.replace('\\', '') if (not link.startswith("http:") or not link.startswith("https:")): link = "%s%s" % ("https:", link) stream = get_Stream(link) if stream: resolve(stream, image, title) else: return
def login(): loginData = json.dumps({"email": control.setting("email"), "password": control.setting("password"), "stayLoggedIn": True, "deviceToken": {"token": None, "platform": "web"}}).encode('utf-8') jwtToken = "" try: authAnswer = json.loads(client.request(auth_url, post=loginData, headers={"Content-Type": "application/json;charset=UTF-8", "Content-Length": len(loginData)})) if authAnswer["success"]: jwtToken = authAnswer["token"] res = json.loads(client.request(userinfo_url, cookie="jwt=%s" % jwtToken)) if res["success"]: control.setSetting("hasPremium", str(res["isPremium"])) except: pass return jwtToken
def playVideo(): from resources.lib import m3u8_parser try: r = client.request("%s/search/%s" % (api_url, param)) data = json.loads(r) playerId = data["playerId"] title = data["title"] thumb = "%s/%s" % ( base_url, data["imageUrl"] ) if "https://" not in data["imageUrl"] else data["imageUrl"] r = client.request("%s/streaming-url?playerId=%s" % (api_url, playerId)) data = json.loads(r) r = client.request(data["url"]) json_data = json.loads(r) m3u_url = json_data['bitrates']['hls'] m3u_url = json_url = re.sub('^//', 'https://', m3u_url) r = client.request(m3u_url) root = os.path.dirname(m3u_url) sources = m3u8_parser.parse(r) try: sources.sort(key=lambda x: int(x['resolution'].split('x')[0]), reverse=True) except: pass auto_pick = control.setting('autopick') == '1' if len(sources) == 1 or auto_pick == True: source = sources[0]['uri'] else: result = xbmcgui.Dialog().select(u'Min\u0151s\u00E9g', [ str(source['resolution']) if 'resolution' in source else 'Unknown' for source in sources ]) if result == -1: source = sources[0]['uri'] else: source = sources[result]['uri'] stream_url = root + '/' + source item = control.item(path=stream_url) item.setArt({'icon': thumb, 'thumb': thumb}) item.setInfo(type='Video', infoLabels={'Title': title}) control.resolve(int(sys.argv[1]), True, item) except: xbmcgui.Dialog().notification( "TV2 Play", "Hiba a forrás elérésekor! Nem található?", xbmcgui.NOTIFICATION_ERROR) return
def cloudflareCookie(url, post, headers, mobile, safe, timeout): try: result = client.request(url, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, error=True) jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall( result)[0] init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall( result)[0] builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall( result)[0] decryptVal = parseJSString(init) lines = builder.split(';') for line in lines: if len(line) > 0 and '=' in line: sections = line.split('=') line_val = parseJSString(sections[1]) decryptVal = int( eval(str(decryptVal) + sections[0][-1] + str(line_val))) answer = decryptVal + len(urlparse.urlparse(url).netloc) query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % ( url, jschl, answer) if 'type="hidden" name="pass"' in result: passval = re.compile('name="pass" value="(.*?)"').findall( result)[0] query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % ( url, urllib.quote_plus(passval), jschl, answer) time.sleep(5) cookie = client.request(query, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='cookie', error=True) return cookie except: pass
def get_liveRadio(): r = client.request(mklikk_url + url) r = r.replace('\'','') direct_url = re.compile('radioStreamUrl\s*?=\s*?(http.+?mp3)').findall(r) if direct_url: play_url(direct_url[0], iconimage, name) return
def musorok(): pageOffset = 0 allItemCnt = -1 allItems = [] while len(allItems) != allItemCnt: r = client.request("https://tv2-bud.gravityrd-services.com/grrec-tv2-war/JSServlet4?rd=0,TV2_W_CONTENT_LISTING,800,[*platform:web;*domain:tv2play;*currentContent:SHOW;*country:HU;*userAge:16;*pagingOffset:%d],[displayType;channel;title;itemId;duration;isExtra;ageLimit;showId;genre;availableFrom;director;isExclusive;lead;url;contentType;seriesTitle;availableUntil;showSlug;videoType;series;availableEpisode;imageUrl;totalEpisode;category;playerId;currentSeasonNumber;currentEpisodeNumber;part;isPremium]" % pageOffset) matches=re.search(r'(.*)var data = (.*)};(.*)', r, re.S) if matches: result = json.loads("%s}" % matches.group(2)) if allItemCnt == -1: onv = result["recommendationWrappers"][0]["recommendation"]["outputNameValues"] for variable in onv: if variable["name"] == "allItemCount": allItemCnt = int(variable["value"]) break allItems.extend(result["recommendationWrappers"][0]["recommendation"]["items"]) else: allItemCnt = 0 allItems = [] pageOffset=len(allItems) allItemsSorted=sorted(allItems, key=lambda k:k["title"]) for item in allItemsSorted: if hasPremium or not "isPremium" in item or item["isPremium"] == "false": addDirectoryItem(item["title"].encode("utf-8"), "apisearch¶m=%s&ispremium=%s" % (quote_plus(item["url"]), item["isPremium"] if "isPremium" in item else ""), ("%s/%s" % (base_url, item["imageUrl"]) if "https://" not in item["imageUrl"] else item["imageUrl"]) if "imageUrl" in item else "", "DefaultFolder.png", meta={'title': item["title"].encode("utf-8"), 'plot': item["lead"].encode('utf-8') if "lead" in item else ''}) endDirectory(type="tvshows")
def recaptcha(data): try: url = [] if data.startswith('http://www.google.com'): url += [data] url += client.parseDOM(data, 'script', ret='src', attrs={'type': 'text/javascript'}) url = [i for i in url if 'http://www.google.com' in i] if not len(url) > 0: return result = client.request(url[0]) challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0] response = 'http://www.google.com/recaptcha/api/image?c=' + challenge response = keyboard(response) return { 'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response } except: pass
def resolve(url): try: result = client.request(url, mobile=True) url = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'})[0] return url except: return
def logout(): if control.yesnoDialog("Valóban ki szeretne jelentkezni?") == 1: try: res = client.request(logout_url, cookie="jwt=%s" % jwtToken if jwtToken else None) except: pass doLogout()
def supertv2_videok(): r = client.request(url + page) result = client.parseDOM(r, 'div', attrs={'id': 'listablokk_wrapper'}) result = client.parseDOM(r, 'div', attrs={'class': 'listaelem_kicsi\s*'}) result += client.parseDOM( r, 'div', attrs={'class': 'listaelem_kicsi\s*margin10b\s*'}) for i in result: try: img = client.parseDOM(i, 'img', ret='src')[0] cim = client.parseDOM(i, 'div', attrs={'class': 'cim'})[0] name = client.parseDOM(cim, 'a')[0] link = client.parseDOM(cim, 'a', ret='href')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2', IsPlayable=True) except: pass if 'class="pager"' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 27, '', '', '', str(int(page) + 1)) return
def supertv2_filmek(): r = client.request(url + page) result = client.parseDOM(r, 'div', attrs={'id': 'leftblock'}) result = client.parseDOM(result, 'div', attrs={'class': 'cikk_listaelem_nagy'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0] name = client.replaceHTMLCodes(name) link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2', IsPlayable=True) except: pass if 'következő' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 26, '', '', '', str(int(page) + 1)) return
def resolve(url): try: try: oid, id = urlparse.parse_qs(urlparse.urlparse(url).query)['oid'][0] , urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0] except: oid, id = re.compile('\/video(.*)_(.*)').findall(url)[0] try: hash = urlparse.parse_qs(urlparse.urlparse(url).query)['hash'][0] except: hash = _hash(oid, id) u = 'http://api.vk.com/method/video.getEmbed?oid=%s&video_id=%s&embed_hash=%s' % (oid, id, hash) result = client.request(u) result = re.sub(r'[^\x00-\x7F]+',' ', result) try: result = json.loads(result)['response'] except: result = _private(oid, id) url = [] try: url += [{'quality': 'HD', 'url': result['url720']}] except: pass try: url += [{'quality': 'SD', 'url': result['url540']}] except: pass try: url += [{'quality': 'SD', 'url': result['url480']}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': result['url360']}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': result['url240']}] except: pass if not url == []: return url except: return
def tv2_filmek(): r = client.request("%s%s" % (url, page)).decode('iso-8859-2').encode('utf-8') result = client.parseDOM(r, 'div', attrs={'class': 'oldalbefoglalo'})[0] result = client.parseDOM(result, 'div', attrs={'class': 'listaelem_kereses[^"]*'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0].encode('iso-8859-2') link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name, '%s%s' % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2', IsPlayable=True) except: pass if '/search/teljes film/oldal%s' % str(int(page) + 1) in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 24, '', '', '', str(int(page) + 1)) return
def epizod_lista_TV2_class(): r = client.request(url + '/oldal' + page) result = client.parseDOM(r, 'div', attrs={'class': 'listaelem_kicsi\s*'}) result += client.parseDOM( r, 'div', attrs={'class': 'listaelem_kicsi\s*margin10b\s*'}) for i in result: try: name = client.parseDOM(i, 'span') name = client.parseDOM(name, 'a') img = client.parseDOM(i, 'img', ret='src')[0] link = client.parseDOM(i, 'a', ret='href')[0] addFile(name[0].encode('utf-8') + ' - ' + name[1].encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), 'TV2 Klasszikusok', '', IsPlayable=True) except: pass if 'következő oldal' in r or 'következő »' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 8, '', '', '', str(int(page) + 1))
def get_Stream(url): if xbmcaddon.Addon().getSetting('quality') == 'true': return url result = client.request(url) from resources.lib import m3u8_parser playlist = m3u8_parser.parse(result)['playlists'] if not playlist: return url try: playlist = sorted(playlist, key=lambda tup: tup['stream_info']['bandwidth'], reverse=True) except: pass qkey = 'resolution' if 'resolution' in playlist[0]['stream_info'] else 'bandwidth' qualities = [] urls = [] for item in playlist: quality = str(item['stream_info'][qkey]) uri = item['uri'] uri = urlparse.urljoin(url, uri) qualities.append(quality) urls.append(uri) dialog = xbmcgui.Dialog() q = dialog.select('Minőség', qualities) if q <= len(qualities) and not q == -1: return(urls[q]) else: return None
def epizod_lista_izaura(): r = client.request(url + page) r = client.parseDOM(r, 'div', attrs={'class': 'leftblock'}) r = r[0].replace('\n', '') result = client.parseDOM(r, 'div', attrs={'class': 'cikk_listaelem'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0] name = client.replaceHTMLCodes(name) link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'Izaura TV', IsPlayable=True) except: pass if '/pager_next' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 12, '', '', '', str(int(page) + 1)) return
def playVideo(): from resources.lib import m3u8_parser try: splittedParam = param.split("/") splittedParam[-1] = quote_plus(splittedParam[-1]) joinedParam = "/".join(splittedParam) r = client.request("%s%s/search/%s" % (api_url, "/premium" if ispremium else "", joinedParam), cookie="jwt=%s" % jwtToken if jwtToken else None) data = json.loads(r) playerId = data["playerId"] title = data["title"] plot = data["lead"] if "lead" in data else "" thumb = "%s/%s" % (base_url, data["imageUrl"]) if "https://" not in data["imageUrl"] else data["imageUrl"] r = client.request("%s%s/streaming-url?playerId=%s&stream=undefined" % (api_url, "/premium" if ispremium else "", playerId), cookie="jwt=%s" % jwtToken if jwtToken else None) data = json.loads(r) if (data["geoBlocked"] != False): xbmcgui.Dialog().notification("TV2 Play", "A tartalom a tartózkodási helyedről sajnos nem elérhető!", xbmcgui.NOTIFICATION_ERROR) return r = client.request(data["url"]) json_data = json.loads(r) m3u_url = json_data['bitrates']['hls'] m3u_url = json_url = re.sub('^//', 'https://', m3u_url) r = client.request(m3u_url) root = os.path.dirname(m3u_url) sources = m3u8_parser.parse(r) try: sources.sort(key=lambda x: int(x['resolution'].split('x')[0]), reverse=True) except: pass auto_pick = control.setting('autopick') == '1' if len(sources) == 1 or auto_pick == True: source = sources[0]['uri'] else: result = xbmcgui.Dialog().select(u'Min\u0151s\u00E9g', [str(source['resolution']) if 'resolution' in source else 'Unknown' for source in sources]) if result == -1: source = sources[0]['uri'] else: source = sources[result]['uri'] stream_url = root + '/' + source item = control.item(path=stream_url) item.setArt({'icon': thumb, 'thumb': thumb}) item.setInfo(type='Video', infoLabels = {'Title': title, 'Plot': plot}) control.resolve(int(sys.argv[1]), True, item) except: xbmcgui.Dialog().notification("TV2 Play", "Hiba a forrás elérésekor! Nem található?", xbmcgui.NOTIFICATION_ERROR)
def getVideo(): from resources.lib import m3u8_parser r = client.request(url) try: try: json_url = re.search('jsonUrl\s*=\s*[\'"]([^\'"]+)', r).group(1) json_url = re.sub('^//', 'https://', json_url) except: json_url = re.findall(r'&q;originalUrl&q;:&q;([^}]*)&q;', r) json_url = json_url[len(json_url) - 1].replace("&a;", "&") r = client.request(json_url) json_data = json.loads(r) m3u_url = json_data['bitrates']['hls'] m3u_url = json_url = re.sub('^//', 'https://', m3u_url) r = client.request(m3u_url) root = os.path.dirname(m3u_url) sources = m3u8_parser.parse(r) try: sources.sort(key=lambda x: int(x['resolution'].split('x')[0]), reverse=True) except: pass auto_pick = control.setting('autopick') == '1' if len(sources) == 1 or auto_pick == True: source = sources[0]['uri'] else: result = xbmcgui.Dialog().select(u'Min\u0151s\u00E9g', [ str(source['resolution']) if 'resolution' in source else 'Unknown' for source in sources ]) if result == -1: source = sources[0]['uri'] else: source = sources[result]['uri'] stream_url = root + '/' + source item = control.item(path=stream_url) item.setArt({'icon': iconimage, 'thumb': iconimage}) item.setInfo(type='Video', infoLabels={'Title': name}) control.resolve(int(sys.argv[1]), True, item) except: return
def check(url): try: id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0] url = 'https://openload.co/embed/%s/' % id result = client.request(url) if result == None: return False if '>We are sorry!<' in result: return False return True except: return False
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://streamin.to/embed-%s.html' % url result = client.request(url, mobile=True) url = re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result)[-1] return url except: return
def _private(oid, id): try: url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, id) result = client.request(url) result = re.compile('var vars *= *({.+?});').findall(result)[0] result = re.sub(r'[^\x00-\x7F]+',' ', result) result = json.loads(result) return result except: return
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://streamin.to/embed-%s.html' % url result = client.request(url, mobile=True) url = re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall( result)[-1] return url except: return
def musor_lista_TV2_class(): #TV2 Klasszikusok r = client.request(url) m = client.parseDOM(r, 'div', attrs={'id': 'musorokdropdown'}) m = client.parseDOM(m, 'a'), client.parseDOM(m, 'a', ret='href') m = zip(m[0], m[1]) for name, link in m: try: name = client.replaceHTMLCodes(name) except: pass addDir(name.encode('utf-8'), "%s%s" % (base_url, link), 8, iconimage, fanart, '', '1')
def resolve(url): try: try: oid, id = urlparse.parse_qs( urlparse.urlparse(url).query)['oid'][0], urlparse.parse_qs( urlparse.urlparse(url).query)['id'][0] except: oid, id = re.compile('\/video(.*)_(.*)').findall(url)[0] try: hash = urlparse.parse_qs(urlparse.urlparse(url).query)['hash'][0] except: hash = _hash(oid, id) u = 'http://api.vk.com/method/video.getEmbed?oid=%s&video_id=%s&embed_hash=%s' % ( oid, id, hash) result = client.request(u) result = re.sub(r'[^\x00-\x7F]+', ' ', result) try: result = json.loads(result)['response'] except: result = _private(oid, id) url = [] try: url += [{'quality': 'HD', 'url': result['url720']}] except: pass try: url += [{'quality': 'SD', 'url': result['url540']}] except: pass try: url += [{'quality': 'SD', 'url': result['url480']}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': result['url360']}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': result['url240']}] except: pass if not url == []: return url except: return
def epizod_lista(): if description == 'Tv': r = client.request(mklikk_url + '/wp-content/plugins/hms-mediaklikk/interfaces/mediaStoreData.php?action=videos&id=' + url) m = json.loads(r)['Items'] for i in m: try: title = i['Date'] + ' - ' + i['Title'] addFile(title.encode('utf-8'), i['Token'].encode('utf-8'), 3, i['Image'].encode('utf-8'), description) except: pass elif description == 'Rádió': r = client.request(mklikk_url + '/wp-content/plugins/hms-mediaklikk/interfaces/mediaStoreData.php?action=audios&id=' + url + '&page=0&count=100') result = json.loads(r)['Items'] for item in result: try: dataDate = re.sub(r'[\W_]+', '', item['DataDate']) dataEndDate = re.sub(r'[\W_]+', '', item['DataDateEnd']) date = item['BeginDate'] file = re.search('from=([^&]+)', item['PlayParam']).group(1) addFile(name + ' - ' + date.encode('utf-8'), 'https://hangtar-cdn.connectmedia.hu/' + dataDate + '/' + dataEndDate + '/' + item['DataCh'] + '.mp3', 14, iconimage, description) except: pass
def _hash(oid, id): try: url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, id) result = client.request(url) result = result.replace('\'', '"').replace(' ', '') hash = re.compile('"hash2":"(.+?)"').findall(result) hash += re.compile('"hash":"(.+?)"').findall(result) hash = hash[0] return hash except: return
def _private(oid, id): try: url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % ( oid, id) result = client.request(url) result = re.compile('var vars *= *({.+?});').findall(result)[0] result = re.sub(r'[^\x00-\x7F]+', ' ', result) result = json.loads(result) return result except: return
def getLegenda(url): try: url = urlparse.urlparse(url).query url = urlparse.parse_qsl(url)[0][1] url = 'http://videomega.tv/cdn.php?ref=%s' % url result = client.request(url, mobile=True) sub = client.parseDOM(result, 'track', ret='src')[0] return sub except: return '-'
def musor_lista(): radio_list = ['ks', 'pf', 'br', 'dk', 'nm', 'pm'] tv_list = ['m1', 'm2', 'm4', 'dn', 'dw','m5'] if description == 'Tv': chan_list = tv_list else: chan_list = radio_list r = client.request(url) m = re.compile('\{"Id":([0-9]+),"Channel":"(.+?)","Title":"(.+?)"').findall(r) for id, chan, title in m: try: if chan in chan_list: addDir(title.decode('unicode-escape').encode('utf-8'), id, 2, iconimage, description, '') except: pass
def getLegenda(url): try: url = urlparse.urlparse(url).query url = urlparse.parse_qsl(url)[0][1] url = 'http://videomega.tv/cdn.php?ref=%s' % url result = client.request(url, mobile=True) sub = client.parseDOM(result, 'track' , ret='src')[0] return sub except: return '-'
def resolve(url): try: result = client.request(url, referer=url) for match in re.finditer('(eval\(function.*?)</script>', result, re.DOTALL): js_data = jsunpack.unpack(match.group(1)) r = re.search('file\s*:\s*"([^"]+)', js_data) if r: return r.group(1) r = re.search('file\s*:\s*"([^"]+)', html) if r: return r.group(1) except: return
def __init__(self, imdbid): self.imdbid = imdbid self.tvdb_data = {} if not imdbid.startswith("tt"): self.imdbid = "tt%s" % self.imdbid self.tvdb_link = "http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s" % imdbid try: self.tvdb_data = client.request(self.tvdb_link) except Exception, e: import xbmc import traceback map(xbmc.log, traceback.format_exc().split("\n")) raise
def recaptcha(data): try: url = [] if data.startswith('http://www.google.com'): url += [data] url += client.parseDOM(data, 'script', ret='src', attrs = {'type': 'text/javascript'}) url = [i for i in url if 'http://www.google.com' in i] if not len(url) > 0: return result = client.request(url[0]) challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0] response = 'http://www.google.com/recaptcha/api/image?c=' + challenge response = keyboard(response) return {'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response} except: pass
def get_movies(self, page=1): movies = [] re_movie_anchor = "<a href=\"view.php\?id=(?P<id>\d+)#\d+\" title=\"(?P<nameHe>.*?)\|(?P<nameEn>.*?)\|(?P<year>.*?)\"" re_movie = "<table.*?{0}.*?Israel-Flag.png.*?</table>".format(re_movie_anchor) try: page_content = str(client.request(self.ktuvit_link % page)) for item in re.finditer(re_movie, page_content, re.DOTALL): import xbmc import sys if xbmc.abortRequested: return sys.exit() movie = {'title': item.group('nameEn').strip(), 'year': item.group('year').strip()} movie['name'] = "%s (%s)" % (movie['title'], movie['year']) try: import hashlib moviehash = "ktuvit::" + hashlib.sha224("%s (%s)" % (movie['title'], movie['year'])).hexdigest() fromcache = cache.get(moviehash) if fromcache is not None: movie = eval(fromcache[1].encode('utf-8')) movie['cached'] = "true" else: from libra.imdb import imdb imdb_data = imdb(title=movie['title'], year=movie['year']) if imdb_data.imdb_data.get('Error'): continue movie['imdbid'] = imdb_data.get_imdbid() movie['runtime'] = imdb_data.get_runtime() movie['rating'] = imdb_data.get_rating() movie['genres'] = imdb_data.get_genres() cache.set(moviehash, movie) except: continue movies.append(movie) except Exception, e: import xbmc import traceback map(xbmc.log, traceback.format_exc().split("\n")) raise
def resolve(url): try: try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['urlback'][0] except: pass id = re.compile('/video/([\w]+)').findall(url)[0] u = 'http://www.dailymotion.com/sequence/full/%s' % id result = client.request(u) result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '') content = re.compile('"content_type":"(.+?)"').findall(result) content = '' if len(content) == 0 else content[0] if content == 'live': url = re.compile('"autoURL":"(.+?)"').findall(result)[0] protocol = urlparse.parse_qs(urlparse.urlparse(url).query)['protocol'][0] url = url.replace('protocol=%s' % protocol, 'protocol=hls') url += '&redirect=0' url = client.request(url) return url else: u = 'http://www.dailymotion.com/embed/video/%s' % id result = client.request(u, cookie='ff=off') result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '') url = [] try: url += [{'quality': 'HD', 'url': client.request(re.compile('"720":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}] except: pass try: url += [{'quality': 'SD', 'url': client.request(re.compile('"480":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': client.request(re.compile('"380":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}] except: pass if not url == []: return url try: url += [{'quality': 'SD', 'url': client.request(re.compile('"240":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}] except: pass if url == []: return return url except: return
def keyboard(response): try: i = os.path.join(control.dataPath,'img') f = control.openFile(i, 'w') f.write(client.request(response)) f.close() f = control.image(450,5,375,115, i) d = control.windowDialog d.addControl(f) control.deleteFile(i) d.show() t = 'Type the letters in the image' k = control.keyboard('', t) k.doModal() c = k.getText() if k.isConfirmed() else None if c == '': c = None d.removeControl(f) d.close() return c except: return
def __init__(self, imdbid=None, title=None, year=None): self.imdbid = imdbid self.title = title self.year = year self.imdb_data = {} if imdbid is not None: if not imdbid.startswith("tt"): self.imdbid = "tt%s" % self.imdbid self.omdb_link = "http://www.omdbapi.com/?i=%s" % imdbid elif title is not None and year is not None: title = re.sub('\s','+', title) self.omdb_link = "http://www.omdbapi.com/?t=%s&y=%s" % (title, year) try: omdb_info = client.request(self.omdb_link) self.imdb_data = json.loads(omdb_info) except: pass
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://turbovideos.net/embed-%s.html' % url result = client.request(url) url = re.compile('file *: *"(.+?)"').findall(result) if len(url) > 0: return url[0] result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def resolve(url): try: url = url.split('/preview', 1)[0] url = url.replace('drive.google.com', 'docs.google.com') result = client.request(url) result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0] u = json.loads(result) u = [i.split('|')[-1] for i in u.split(',')] u = sum([tag(i) for i in u], []) url = [] try: url += [[i for i in u if i['quality'] == '1080p'][0]] except: pass try: url += [[i for i in u if i['quality'] == 'HD'][0]] except: pass try: url += [[i for i in u if i['quality'] == 'SD'][0]] except: pass if url == []: return return url except: return