def Browse_PlayAnimezone(url, page='', content='episodes', view='515'): if url == '': return import requests headers = { 'Pragma': 'no-cache', 'Origin': 'http://www.animezone.pl', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'pl-PL,pl;q=0.8,en-US;q=0.6,en;q=0.4', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Accept': '*/*', 'Cache-Control': 'no-cache', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Referer': url, } s = requests.Session() s.get('http://www.animezone.pl/images/statistics.gif') players = s.get(url, headers=headers) players = players.text players = GetDataBeetwenMarkers(players, '<tbody>', "</table>")[1] players = players.replace('\'', '') players = players.replace('\n', '') players = players.replace('\r', '') players = players.replace('\t', '') players = players.replace(' ', '') # print players.encode('ascii','ignore') lista = re.compile('<td>(.+?)</td><tdclass(.+?)"data(.+?)="(.+?)"><iclass' ).findall(players) import xbmcgui d = xbmcgui.Dialog() item = d.select("Wybór jakości", getItemTitles(lista)) if item != -1: player = str(lista[item][3]) data = {'data': player} r = s.post(url, headers=headers, data=data) players = r.text players = players.lower() print players.encode('ascii', 'ignore') lista = re.compile('<iframe src="(.+?)"').findall(players) for item in lista: url = item from common import PlayFromHost PlayFromHost(url) eod()
def Browse_Itemscen(html, name, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, '<div class="content_episode">', '<div class="clr"></div>', False)[1] html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') html.encode("utf-8") data = re.compile( '<a href="(.+?)" title="(.+?)">(.+?)url\((.+?)\);">').findall(html) ItemCount = len(data) for item in data: img = item[3].replace(' ', '%20') strona = item[0] name2 = item[1].encode("utf-8") plot = '' labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ## pars = { 'mode': 'Episodes4fun', 'site': site, 'section': section, 'title': name2, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name2, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'anime4fun': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name2 _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # next page npage = url[:-1] + str(int(url[-1:]) + 1) # if -1 != html.find("do strony "): _addon.add_directory( { 'mode': 'Page4fun', 'site': site, 'section': section, 'url': npage, 'page': npage }, {'title': "Next page"}, is_folder=True, fanart=fanart, img=nexticon) set_view(content, view_mode=addst('links-view')) eod()
def Browse_ItemAol(html, url, metamethod='', content='movies', view='515'): if (len(html) == 0): return html = GetDataBeetwenMarkers(html, 'Anime na liter', '<ul class="pagination">', False)[1] data = re.findall('<a href="(.+?)">(.+?)</a>', html) ItemCount = len(data) for item in data: strona = mainSite7 + item[0] name = item[1].encode("utf-8") name = ParseDescription(name) ### scraper if (tfalse(addst("zone-thumbs")) == True): import scraper scrap = scraper.scraper_check(host, name) try: if (name not in scrap): html = nURL(strona) html = GetDataBeetwenMarkers(html, 'og:image', '<h5>Odcinki</h5>', False)[1] html = html.replace('\'', '') html = html.replace('\n', '') html = html.replace('\r', '') html = html.replace('\t', '') print html.encode('ascii', 'ignore') data = re.findall('content="(.+?)"></head>', html) ItemCount = len(data) if len(data) > 0: for item in data: img = item else: img = '' data = re.findall('summary">(.+?)<div class', html) ItemCount = len(data) if len(data) > 0: for item in data: plot = ParseDescription(item) else: plot = '' scraper.scraper_add(host, name, img, plot, '') scrap = scraper.scraper_check(host, name) except: scrap = '' try: img = scrap[1] except: img = '' try: plot = scrap[2] except: plot = '' else: img = '' plot = '' fanart = fanartAol labs = {} try: labs['plot'] = plot except: labs['plot'] = '' ### pars = { 'mode': 'Episodeszone', 'site': site, 'section': section, 'title': name, 'url': strona, 'img': img, 'fanart': fanart } contextLabs = { 'title': name, 'url': strona, 'img': img, 'fanart': fanart, 'todoparams': _addon.build_plugin_url(pars), 'site': site, 'section': section, 'plot': labs['plot'] } if section == 'animezone': contextMenuItems = ContextMenu_Series(contextLabs) else: contextMenuItems = [] labs['title'] = name _addon.add_directory(pars, labs, is_folder=True, fanart=fanart, img=img, contextmenu_items=contextMenuItems, total_items=ItemCount) # next page npage = url[:-1] + str(int(url[-1:]) + 1) # if -1 != html.find("do strony "): _addon.add_directory( { 'mode': 'Pagezone', 'site': site, 'section': section, 'url': npage, 'page': npage }, {'title': "Next page"}, is_folder=True, fanart=fanartAol, img=nexticon) set_view(content, view_mode=addst('tvshows-view'))