def supertv2_filmek(): r = client.request(url + page) result = client.parseDOM(r, 'div', attrs={'id': 'leftblock'}) result = client.parseDOM(result, 'div', attrs={'class': 'cikk_listaelem_nagy'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0] name = client.replaceHTMLCodes(name) link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2', IsPlayable=True) except: pass if 'következő' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 26, '', '', '', str(int(page) + 1)) return
def epizod_lista_izaura(): r = client.request(url + page) r = client.parseDOM(r, 'div', attrs={'class': 'leftblock'}) r = r[0].replace('\n', '') result = client.parseDOM(r, 'div', attrs={'class': 'cikk_listaelem'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0] name = client.replaceHTMLCodes(name) link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'Izaura TV', IsPlayable=True) except: pass if '/pager_next' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 12, '', '', '', str(int(page) + 1)) return
def tv2_filmek(): r = client.request("%s%s" % (url, page)).decode('iso-8859-2').encode('utf-8') result = client.parseDOM(r, 'div', attrs={'class': 'oldalbefoglalo'})[0] result = client.parseDOM(result, 'div', attrs={'class': 'listaelem_kereses[^"]*'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0].encode('iso-8859-2') link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name, '%s%s' % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2', IsPlayable=True) except: pass if '/search/teljes film/oldal%s' % str(int(page) + 1) in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 24, '', '', '', str(int(page) + 1)) return
def musor_lista_TV2_class(): #TV2 Klasszikusok r = client.request(url) m = client.parseDOM(r, 'div', attrs={'id': 'musorokdropdown'}) m = client.parseDOM(m, 'a'), client.parseDOM(m, 'a', ret='href') m = zip(m[0], m[1]) for name, link in m: try: name = client.replaceHTMLCodes(name) except: pass addDir(name.encode('utf-8'), "%s%s" % (base_url, link), 8, iconimage, fanart, '', '1')
def get_list(channel): try: items = cache.get(list_cache, 15, channel) if len(items) == 0: raise Exception() list = [] for i in items: try: start = client.parseDOM(i, 'Date')[0] try: dstart = datetime.datetime.strptime( start, "%Y-%m-%d %H:%M:%S") except TypeError: dstart = datetime.datetime( *(time.strptime(start, "%Y-%m-%d %H:%M:%S")[0:6])) start = datetime.datetime.strftime(dstart, "%H:%M") stop = client.parseDOM(i, 'Length')[0] stop = stop.split(':') stop = dstart + datetime.timedelta(0, int(stop[2]), 0, 0, int(stop[1]), int(stop[0])) stop = datetime.datetime.strftime(stop, "%H:%M") try: title2 = client.parseDOM(i, 'Title')[0] except: title2 = '0' title2 = title2.encode('utf-8') try: title1 = client.parseDOM(i, 'SeriesTitle')[0] except: title1 = '0' title1 = title1.encode('utf-8') channel = channel.encode('utf-8') if not title2 == '0' and not title1 == '0': title = '%s - %s' % (title1, title2) elif title2 == '0' and not title1 == '0': title = title1 elif title1 == '0' and not title2 == '0': title = title2 else: title = '' list.append({ 'channel': channel, 'title': title, 'start': start, 'stop': stop }) except: pass return list except: return list
def live_tv(): r = client.request(mklikk_url) m = client.parseDOM(r, 'div', attrs = {'class': 'liveStreamsMenu'})[0] m = client.parseDOM(m, 'div', attrs = {'class': 'col'})[0] m = client.parseDOM(m, 'li') for item in m: url = client.parseDOM(item, 'a', ret='href')[0] if "-elo" in url: if not url.startswith('http:'): url = 'http:' + url name = client.parseDOM(item, 'a')[0] name = name.split('>')[-1].strip() try: label = epglist.get_epg(name, active=True) except: label = name addFile(label, url, 7, MediaDir + '\\' + name.lower() + '.png', '') return
def resolve(url): try: result = client.request(url, mobile=True) url = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'})[0] return url except: return
def musor_lista_izaura(): addDir('[COLOR orange]' 'Videók' '[/COLOR]', url + '/search/', 28, os.path.join(artPath, 'video.png'), fanart, '', '1') r = client.request(url) m = client.parseDOM(r, 'div', attrs={'id': 'dropdown_sorozataink'}) m = client.parseDOM(m, 'a'), client.parseDOM(m, 'a', ret='href') m = zip(m[0], m[1]) for name, link in m: try: name = client.replaceHTMLCodes(name) except: pass addDir(name.encode('utf-8'), "%s%s/oldal" % (base_url, link), 12, iconimage, fanart, '', '1')
def recaptcha(data): try: url = [] if data.startswith('http://www.google.com'): url += [data] url += client.parseDOM(data, 'script', ret='src', attrs={'type': 'text/javascript'}) url = [i for i in url if 'http://www.google.com' in i] if not len(url) > 0: return result = client.request(url[0]) challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0] response = 'http://www.google.com/recaptcha/api/image?c=' + challenge response = keyboard(response) return { 'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response } except: pass
def supertv2_videok(): r = client.request(url + page) result = client.parseDOM(r, 'div', attrs={'id': 'listablokk_wrapper'}) result = client.parseDOM(r, 'div', attrs={'class': 'listaelem_kicsi\s*'}) result += client.parseDOM( r, 'div', attrs={'class': 'listaelem_kicsi\s*margin10b\s*'}) for i in result: try: img = client.parseDOM(i, 'img', ret='src')[0] cim = client.parseDOM(i, 'div', attrs={'class': 'cim'})[0] name = client.parseDOM(cim, 'a')[0] link = client.parseDOM(cim, 'a', ret='href')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2', IsPlayable=True) except: pass if 'class="pager"' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 27, '', '', '', str(int(page) + 1)) return
def capimage(data): try: url = client.parseDOM(data, 'img', ret='src') url = [i for i in url if 'captcha' in i] if not len(url) > 0: return response = keyboard(url[0]) return {'code': response} except: pass
def epizod_lista_kids(): r = client.request("%s%s/oldal%s" % (url, keyword, page)) if len(keyword) == 0: addDir('[COLOR orange]' 'Szűkítés' '[/COLOR]', url, 29, '', '', description, '15') else: addDir( '[COLOR orange]' 'TV2 Kids szűrés: [COLOR lime]%s[/COLOR]' '[/COLOR]' % urllib.unquote_plus(keyword), url, 15, os.path.join(artPath, 'kids.png'), '', description, page, keyword) r = client.parseDOM(r, 'div', attrs={'class': 'leftblock'}) r = r[0].replace('\n', '') result = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) for i in result: try: name = client.parseDOM(i[1], 'div', attrs={'class': 'cim'})[0] name = client.replaceHTMLCodes(name) img = client.parseDOM(i[1], 'img', ret='src')[0] img = urlparse.urljoin(url, img) addFile(name.encode('utf-8'), "%s%s" % (base_url, i[0]), 20, img, '', 'Kiwi TV', IsPlayable=True) except: pass if '/pager_next' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 15, '', '', description, str(int(page) + 1), keyword) return
def epizod_lista_TV2_class(): r = client.request(url + '/oldal' + page) result = client.parseDOM(r, 'div', attrs={'class': 'listaelem_kicsi\s*'}) result += client.parseDOM( r, 'div', attrs={'class': 'listaelem_kicsi\s*margin10b\s*'}) for i in result: try: name = client.parseDOM(i, 'span') name = client.parseDOM(name, 'a') img = client.parseDOM(i, 'img', ret='src')[0] link = client.parseDOM(i, 'a', ret='href')[0] addFile(name[0].encode('utf-8') + ' - ' + name[1].encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), 'TV2 Klasszikusok', '', IsPlayable=True) except: pass if 'következő oldal' in r or 'következő »' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 8, '', '', '', str(int(page) + 1))
def solvemedia(data): try: url = client.parseDOM(data, 'iframe', ret='src') url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, 'iframe', ret='src') response += client.parseDOM(result, 'img', ret='src') response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, 'form', attrs = {'action': 'verify.noscript'})[0] k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'}) for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]}) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)) return {'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'} except: pass
def epizod_lista_sef(): r = client.request("%s%s/oldal%s" % (url, keyword, page)) if len(keyword) == 0: addDir('[COLOR orange]' 'Szűkítés' '[/COLOR]', url, 29, '', '', description, '14') else: addDir( '[COLOR orange]' 'TV2 Séf szűrés: [COLOR lime]%s[/COLOR]' '[/COLOR]' % urllib.unquote_plus(keyword), url, 14, os.path.join(artPath, 'sef.png'), '', description, page, keyword) result = client.parseDOM(r, 'div', attrs={'class': 'leftblock'}) result = client.parseDOM(result, 'div', attrs={'class': 'cikk_listaelem'}) for i in result: try: name = client.parseDOM(i, 'div', attrs={'class': 'cim'})[0] name = re.search('>([^<]+)', name).group(1) name = client.replaceHTMLCodes(name) link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'TV2 Séf', IsPlayable=True) except: pass if '/pager_next' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 14, '', '', description, str(int(page) + 1), keyword) return
def getLegenda(url): try: url = urlparse.urlparse(url).query url = urlparse.parse_qsl(url)[0][1] url = 'http://videomega.tv/cdn.php?ref=%s' % url result = client.request(url, mobile=True) sub = client.parseDOM(result, 'track' , ret='src')[0] return sub except: return '-'
def epizod_lista_zenebutik(): r = client.request("%s%s/oldal%s" % (url, keyword, page)) if len(keyword) == 0: addDir('[COLOR orange]' 'Szűkítés' '[/COLOR]', url, 29, '', '', description, '17') else: addDir( '[COLOR orange]' 'Zenebutik szűrés: [COLOR lime]%s[/COLOR]' '[/COLOR]' % urllib.unquote_plus(keyword), url, 17, os.path.join(artPath, 'zenebutik.png'), '', description, page, keyword) result = client.parseDOM(r, 'div', attrs={'class': 'pagewrapper'}) result = client.parseDOM(result, 'div', attrs={'class': 'cikk_listaelem'}) for i in result: try: name = client.parseDOM(i, 'a', attrs={'class': 'cim'})[0] name = client.replaceHTMLCodes(name) link = client.parseDOM(i, 'a', ret='href')[0] img = client.parseDOM(i, 'img', ret='src')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'Zenebutik', IsPlayable=True) except: pass if '/assets/next' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 17, '', '', description, str(int(page) + 1), keyword) return
def getLegenda(url): try: url = urlparse.urlparse(url).query url = urlparse.parse_qsl(url)[0][1] url = 'http://videomega.tv/cdn.php?ref=%s' % url result = client.request(url, mobile=True) sub = client.parseDOM(result, 'track', ret='src')[0] return sub except: return '-'
def recaptcha(data): try: url = [] if data.startswith('http://www.google.com'): url += [data] url += client.parseDOM(data, 'script', ret='src', attrs = {'type': 'text/javascript'}) url = [i for i in url if 'http://www.google.com' in i] if not len(url) > 0: return result = client.request(url[0]) challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0] response = 'http://www.google.com/recaptcha/api/image?c=' + challenge response = keyboard(response) return {'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response} except: pass
def epizod_lista_sTV2(): r = client.request( "%s/videok/oldal%s?keyword=%s&datumtol=2000-01-01&datumig=%s&musorid=%s" % (url, page, keyword, current_date, description)) result = client.parseDOM(r, 'div', attrs={'class': 'listaelem_kicsi[^"]*'}) if len(keyword) == 0: addDir('[COLOR orange]' 'Szűkítés' '[/COLOR]', url, 29, '', '', description, '7') else: if description: musorid = client.parseDOM(r, 'select', attrs={'name': 'musorid'})[0] option = client.parseDOM(musorid, 'option', attrs={'value': description })[0].strip().encode('utf-8') else: option = "Összes videó" addDir( '[COLOR orange]' '%s szűrés: [COLOR lime]%s[/COLOR]' '[/COLOR]' % (option, urllib.unquote_plus(keyword)), url, 7, os.path.join(artPath, 'supertv2.png'), '', description, page, keyword) for i in result: try: img = client.parseDOM(i, 'img', ret='src')[0] cim = client.parseDOM(i, 'div', attrs={'class': 'cim'})[0] name = client.parseDOM(cim, 'a')[0] link = client.parseDOM(cim, 'a', ret='href')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'SuperTV2', IsPlayable=True) except: pass if 'következő oldal' in r or 'következő »' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 9, '', '', description, str(int(page) + 1))
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://vidto.me/embed-%s.html' % url result = client.request(url) result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def resolve(url): try: url = url.replace('/embed-', '/') url = re.compile('//.+?/([\w]+)').findall(url)[0] url = 'http://turbovideos.net/embed-%s.html' % url result = client.request(url) url = re.compile('file *: *"(.+?)"').findall(result) if len(url) > 0: return url[0] result = re.compile('(eval.*?\)\)\))').findall(result)[-1] result = jsunpack.unpack(result) url = client.parseDOM(result, 'embed', ret='src') url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result) url = [i for i in url if not i.endswith('.srt')] url = 'http://' + url[0].split('://', 1)[-1] return url except: return
def epizod_lista_moziverzum(): r = client.request("%s%s/oldal%s" % (url, keyword, page)) if len(keyword) == 0: addDir('[COLOR orange]' 'Szűkítés' '[/COLOR]', url, 29, '', '', description, '16') else: addDir( '[COLOR orange]' 'Moziverzum szűrés: [COLOR lime]%s[/COLOR]' '[/COLOR]' % urllib.unquote_plus(keyword), url, 16, os.path.join(artPath, 'moziverzum.png'), '', description, page, keyword) result = client.parseDOM(r, 'div', attrs={'class': 'leftblock'}) result = client.parseDOM(result, 'div', attrs={'class': 'cikk_listaelem'}) for i in result: try: img = client.parseDOM(i, 'img', ret='src')[0] cim = client.parseDOM(i, 'div', attrs={'class': 'cim'})[0] name = client.parseDOM(cim, 'a')[0] link = client.parseDOM(cim, 'a', ret='href')[0] addFile(name.encode('utf-8'), "%s%s" % (base_url, link), 20, "%s%s" % (base_url, img), '', 'Moziverzum', IsPlayable=True) except: pass if 'class="next"' in r: addDir('[COLOR green]' 'Következő oldal' '[/COLOR]', url, 16, '', '', description, str(int(page) + 1), keyword) return
def solvemedia(data): try: url = client.parseDOM(data, 'iframe', ret='src') url = [i for i in url if 'api.solvemedia.com' in i] if not len(url) > 0: return result = client.request(url[0], referer='') response = client.parseDOM(result, 'iframe', ret='src') response += client.parseDOM(result, 'img', ret='src') response = [i for i in response if '/papi/media' in i][0] response = 'http://api.solvemedia.com' + response response = keyboard(response) post = {} f = client.parseDOM(result, 'form', attrs={'action': 'verify.noscript'})[0] k = client.parseDOM(f, 'input', ret='name', attrs={'type': 'hidden'}) for i in k: post.update({ i: client.parseDOM(f, 'input', ret='value', attrs={'name': i})[0] }) post.update({'adcopy_response': response}) client.request('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)) return { 'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge' } except: pass
def getChannelVideo(item): item = eval(item) myurl, info, key = item['url'].split('@') s = requests.Session() header = {'User-Agent': UA, 'Referer': myurl} content = s.get(myurl, headers=header) links = client.parseDOM(content.text, 'iframe', ret='src') link = [i for i in links if '.pw' in i][0] # xbmc.log('@#@DEROT-LINK: %s' % link, xbmc.LOGNOTICE) if link: header['Referer'] = item.get('url') # link = re.sub(r'&#(\d+);', lambda x: chr(int(x.group(1))), link[0]) data = requests.get(link).text f = re.compile(r'.*?name="f"\s*value=["\']([^"\']+)["\']').findall( data) d = re.compile(r'.*?name="d"\s*value=["\']([^"\']+)["\']').findall( data) r = re.compile(r'.*?name="r"\s*value=["\']([^"\']+)["\']').findall( data) # b = re.compile('.*?name="b"\s*value=["\']([^"\']+)["\']').findall(data) action = re.compile( r'[\'"]action[\'"][,\s]*[\'"](http.*?)[\'"]').findall(data) srcs = re.compile(r'src=[\'"](.*?)[\'"]').findall(data) if f and r and d and action: header['Referer'] = link # payload = urllib.urlencode({'b': b[0], 'd': d[0], 'f': f[0], 'r': r[0]}) payload = urllib.urlencode({'f': f[0], 'd': d[0], 'r': r[0]}) data2, c = getUrlc(action[0], payload, header=header, usecookies=True) # xbmc.log('@#@DEROT-DATA2: %s' % data2, xbmc.LOGNOTICE) script = re.findall( r'src="(http://s1.medianetworkinternational.com/js/\w+.js\?.+?)">', data2, re.DOTALL)[0] # xbmc.log('@#@DEROT-script: %s' % script, xbmc.LOGNOTICE) html = getUrl(script, header=header) xset = re.findall(r'var\s+xset=(\[[^\]]+\]);', html)[0] # xbmc.log('@#@DEROT-XSET: %s' % xset, xbmc.LOGNOTICE) hset = re.findall(r'var\s+hset=(\[[^\]]+\]);', html)[0] # xbmc.log('@#@DEROT-HSET: %s' % hset, xbmc.LOGNOTICE) try: #######ads banners######### bheaders = header bheaders['Referer'] = action[0] banner = re.findall(r'<script\s*src=[\'"](.+?)[\'"]', data2)[-1] # xbmc.log('@#@BANNER-LINK: %s' % banner, xbmc.LOGNOTICE) bsrc = s.get(banner, headers=bheaders).content # xbmc.log('@#@BANNER-DATA: %s' % bsrc, xbmc.LOGNOTICE) banner = re.findall(r"url:'([^']+)", bsrc)[0] # xbmc.log('@#@BANNER-LINK2: %s' % banner, xbmc.LOGNOTICE) bsrc = s.get(banner, headers=bheaders).content # xbmc.log('@#@BANNER-DATA2: %s' % bsrc, xbmc.LOGNOTICE) bheaders['Referer'] = banner banner = re.findall(r"img\s+src='([^']+)", bsrc)[0] banner = banner.replace('amp;', '') # xbmc.log('@#@BANNER-LINK3: %s' % banner, xbmc.LOGNOTICE) bsrc = s.get(banner).status_code # ########################### except BaseException: pass src = re.findall( r"function\(\)\s*{\s*[a-z0-9]{43}\(.*?,.*?,\s*'([^']+)'", data2)[0] # xbmc.log('@#@FUNCTION: %s' % src, xbmc.LOGNOTICE) s = derot(xset, hset, src) key = key[:32] decrypter = pyAES.Decrypter(pyAES.AESModeOfOperationECB(key)) data = decrypter.feed(s.decode('hex').strip()) data += decrypter.feed() fstream = data.decode('hex') # fstream = aes.AESModeOfOperationCBC(key, info).decrypt(s.decode('hex')) # xbmc.log('getStreams-Final-data: %s' % fstream, level=xbmc.LOGNOTICE) # fstream = re.findall('([a-f0-9]+)', fstream)[0].decode('hex') # xbmc.log('@#@DAAAATAAA-2---LINK: %s' % fstream, xbmc.LOGNOTICE) # enc_data = json.loads(base64.b64decode(link[0])) # # ciphertext = 'Salted__' + enc_data['s'].decode('hex') + base64.b64decode(enc_data['ct']) # src = jscrypto.decode(enc_data["ct"], item['key'], enc_data["s"].decode("hex")) # src = src.replace('"','').replace('\\', '').encode('utf-8') if fstream.startswith('http'): href = fstream return href, srcs[-1], header, item['title'], myurl return ''
def list_cache(channel): r = client.request( 'http://www.mediaklikk.hu/iface/broadcast/%s/broadcast_%s.xml' % (current_date, broadcast[channel.lower()])) items = client.parseDOM(r, 'Item') return items
def get_seriesid(self): try: return client.parseDOM(self.tvdb_data, 'seriesid')[0] except: return False
def get_series_name(self): try: return client.parseDOM(self.tvdb_data, 'SeriesName')[0] except: return False
def get_year(self): try: first_aired = client.parseDOM(self.tvdb_data, 'FirstAired')[0] return first_aired.split("-")[0] except: return False